135e6168fSJeff Roberson /*- 2e7d50326SJeff Roberson * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org> 335e6168fSJeff Roberson * All rights reserved. 435e6168fSJeff Roberson * 535e6168fSJeff Roberson * Redistribution and use in source and binary forms, with or without 635e6168fSJeff Roberson * modification, are permitted provided that the following conditions 735e6168fSJeff Roberson * are met: 835e6168fSJeff Roberson * 1. Redistributions of source code must retain the above copyright 935e6168fSJeff Roberson * notice unmodified, this list of conditions, and the following 1035e6168fSJeff Roberson * disclaimer. 1135e6168fSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 1235e6168fSJeff Roberson * notice, this list of conditions and the following disclaimer in the 1335e6168fSJeff Roberson * documentation and/or other materials provided with the distribution. 1435e6168fSJeff Roberson * 1535e6168fSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1635e6168fSJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1735e6168fSJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1835e6168fSJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1935e6168fSJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2035e6168fSJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2135e6168fSJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2235e6168fSJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2335e6168fSJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2435e6168fSJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2535e6168fSJeff Roberson */ 2635e6168fSJeff Roberson 27ae7a6b38SJeff Roberson /* 28ae7a6b38SJeff Roberson * This file implements the ULE scheduler. ULE supports independent CPU 29ae7a6b38SJeff Roberson * run queues and fine grain locking. It has superior interactive 30ae7a6b38SJeff Roberson * performance under load even on uni-processor systems. 31ae7a6b38SJeff Roberson * 32ae7a6b38SJeff Roberson * etymology: 33ae7a6b38SJeff Roberson * ULE is the last three letters in schedule. It owes it's name to a 34ae7a6b38SJeff Roberson * generic user created for a scheduling system by Paul Mikesell at 35ae7a6b38SJeff Roberson * Isilon Systems and a general lack of creativity on the part of the author. 36ae7a6b38SJeff Roberson */ 37ae7a6b38SJeff Roberson 38677b542eSDavid E. O'Brien #include <sys/cdefs.h> 39677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 40677b542eSDavid E. O'Brien 414da0d332SPeter Wemm #include "opt_hwpmc_hooks.h" 424da0d332SPeter Wemm #include "opt_sched.h" 439923b511SScott Long 4435e6168fSJeff Roberson #include <sys/param.h> 4535e6168fSJeff Roberson #include <sys/systm.h> 462c3490b1SMarcel Moolenaar #include <sys/kdb.h> 4735e6168fSJeff Roberson #include <sys/kernel.h> 4835e6168fSJeff Roberson #include <sys/ktr.h> 4935e6168fSJeff Roberson #include <sys/lock.h> 5035e6168fSJeff Roberson #include <sys/mutex.h> 5135e6168fSJeff Roberson #include <sys/proc.h> 52245f3abfSJeff Roberson #include <sys/resource.h> 539bacd788SJeff Roberson #include <sys/resourcevar.h> 5435e6168fSJeff Roberson #include <sys/sched.h> 5535e6168fSJeff Roberson #include <sys/smp.h> 5635e6168fSJeff Roberson #include <sys/sx.h> 5735e6168fSJeff Roberson #include <sys/sysctl.h> 5835e6168fSJeff Roberson #include <sys/sysproto.h> 59f5c157d9SJohn Baldwin #include <sys/turnstile.h> 603db720fdSDavid Xu #include <sys/umtx.h> 6135e6168fSJeff Roberson #include <sys/vmmeter.h> 6235e6168fSJeff Roberson #ifdef KTRACE 6335e6168fSJeff Roberson #include <sys/uio.h> 6435e6168fSJeff Roberson #include <sys/ktrace.h> 6535e6168fSJeff Roberson #endif 6635e6168fSJeff Roberson 67ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 68ebccf1e3SJoseph Koshy #include <sys/pmckern.h> 69ebccf1e3SJoseph Koshy #endif 70ebccf1e3SJoseph Koshy 7135e6168fSJeff Roberson #include <machine/cpu.h> 7222bf7d9aSJeff Roberson #include <machine/smp.h> 7335e6168fSJeff Roberson 747a5e5e2aSJeff Roberson #ifndef PREEMPTION 757a5e5e2aSJeff Roberson #error "SCHED_ULE requires options PREEMPTION" 767a5e5e2aSJeff Roberson #endif 777a5e5e2aSJeff Roberson 78ae7a6b38SJeff Roberson #define KTR_ULE 0 7914618990SJeff Roberson 806b2f763fSJeff Roberson /* 81ae7a6b38SJeff Roberson * Thread scheduler specific section. All fields are protected 82ae7a6b38SJeff Roberson * by the thread lock. 83ed062c8dSJulian Elischer */ 84ad1e7d28SJulian Elischer struct td_sched { 85ae7a6b38SJeff Roberson TAILQ_ENTRY(td_sched) ts_procq; /* Run queue. */ 86ae7a6b38SJeff Roberson struct thread *ts_thread; /* Active associated thread. */ 87ae7a6b38SJeff Roberson struct runq *ts_runq; /* Run-queue we're queued on. */ 88ae7a6b38SJeff Roberson short ts_flags; /* TSF_* flags. */ 89ae7a6b38SJeff Roberson u_char ts_rqindex; /* Run queue index. */ 90ad1e7d28SJulian Elischer u_char ts_cpu; /* CPU that we have affinity for. */ 91ae7a6b38SJeff Roberson int ts_slptick; /* Tick when we went to sleep. */ 92ae7a6b38SJeff Roberson int ts_slice; /* Ticks of slice remaining. */ 93ae7a6b38SJeff Roberson u_int ts_slptime; /* Number of ticks we vol. slept */ 94ae7a6b38SJeff Roberson u_int ts_runtime; /* Number of ticks we were running */ 95ed062c8dSJulian Elischer /* The following variables are only used for pctcpu calculation */ 96ad1e7d28SJulian Elischer int ts_ltick; /* Last tick that we were running on */ 97ad1e7d28SJulian Elischer int ts_ftick; /* First tick that we were running on */ 98ad1e7d28SJulian Elischer int ts_ticks; /* Tick count */ 997b8bfa0dSJeff Roberson #ifdef SMP 1007b8bfa0dSJeff Roberson int ts_rltick; /* Real last tick, for affinity. */ 1017b8bfa0dSJeff Roberson #endif 102ed062c8dSJulian Elischer }; 103ad1e7d28SJulian Elischer /* flags kept in ts_flags */ 1047b8bfa0dSJeff Roberson #define TSF_BOUND 0x0001 /* Thread can not migrate. */ 1057b8bfa0dSJeff Roberson #define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */ 10635e6168fSJeff Roberson 107ad1e7d28SJulian Elischer static struct td_sched td_sched0; 10835e6168fSJeff Roberson 10935e6168fSJeff Roberson /* 110e7d50326SJeff Roberson * Cpu percentage computation macros and defines. 111e1f89c22SJeff Roberson * 112e7d50326SJeff Roberson * SCHED_TICK_SECS: Number of seconds to average the cpu usage across. 113e7d50326SJeff Roberson * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across. 1148ab80cf0SJeff Roberson * SCHED_TICK_MAX: Maximum number of ticks before scaling back. 115e7d50326SJeff Roberson * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results. 116e7d50326SJeff Roberson * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count. 117e7d50326SJeff Roberson * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks. 11835e6168fSJeff Roberson */ 119e7d50326SJeff Roberson #define SCHED_TICK_SECS 10 120e7d50326SJeff Roberson #define SCHED_TICK_TARG (hz * SCHED_TICK_SECS) 1218ab80cf0SJeff Roberson #define SCHED_TICK_MAX (SCHED_TICK_TARG + hz) 122e7d50326SJeff Roberson #define SCHED_TICK_SHIFT 10 123e7d50326SJeff Roberson #define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT) 124eddb4efaSJeff Roberson #define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz)) 12535e6168fSJeff Roberson 12635e6168fSJeff Roberson /* 127e7d50326SJeff Roberson * These macros determine priorities for non-interactive threads. They are 128e7d50326SJeff Roberson * assigned a priority based on their recent cpu utilization as expressed 129e7d50326SJeff Roberson * by the ratio of ticks to the tick total. NHALF priorities at the start 130e7d50326SJeff Roberson * and end of the MIN to MAX timeshare range are only reachable with negative 131e7d50326SJeff Roberson * or positive nice respectively. 132e7d50326SJeff Roberson * 133e7d50326SJeff Roberson * PRI_RANGE: Priority range for utilization dependent priorities. 134e7d50326SJeff Roberson * PRI_NRESV: Number of nice values. 135e7d50326SJeff Roberson * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total. 136e7d50326SJeff Roberson * PRI_NICE: Determines the part of the priority inherited from nice. 137e7d50326SJeff Roberson */ 138e7d50326SJeff Roberson #define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN) 139e7d50326SJeff Roberson #define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 140e7d50326SJeff Roberson #define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF) 141e7d50326SJeff Roberson #define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF) 142dda713dfSJeff Roberson #define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN) 143e7d50326SJeff Roberson #define SCHED_PRI_TICKS(ts) \ 144e7d50326SJeff Roberson (SCHED_TICK_HZ((ts)) / \ 1451e516cf5SJeff Roberson (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE)) 146e7d50326SJeff Roberson #define SCHED_PRI_NICE(nice) (nice) 147e7d50326SJeff Roberson 148e7d50326SJeff Roberson /* 149e7d50326SJeff Roberson * These determine the interactivity of a process. Interactivity differs from 150e7d50326SJeff Roberson * cpu utilization in that it expresses the voluntary time slept vs time ran 151e7d50326SJeff Roberson * while cpu utilization includes all time not running. This more accurately 152e7d50326SJeff Roberson * models the intent of the thread. 15335e6168fSJeff Roberson * 154407b0157SJeff Roberson * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 155407b0157SJeff Roberson * before throttling back. 156d322132cSJeff Roberson * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 157210491d3SJeff Roberson * INTERACT_MAX: Maximum interactivity value. Smaller is better. 158e1f89c22SJeff Roberson * INTERACT_THRESH: Threshhold for placement on the current runq. 15935e6168fSJeff Roberson */ 160e7d50326SJeff Roberson #define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT) 161e7d50326SJeff Roberson #define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT) 162210491d3SJeff Roberson #define SCHED_INTERACT_MAX (100) 163210491d3SJeff Roberson #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 1644c9612c6SJeff Roberson #define SCHED_INTERACT_THRESH (30) 165e1f89c22SJeff Roberson 16635e6168fSJeff Roberson /* 167e7d50326SJeff Roberson * tickincr: Converts a stathz tick into a hz domain scaled by 168e7d50326SJeff Roberson * the shift factor. Without the shift the error rate 169e7d50326SJeff Roberson * due to rounding would be unacceptably high. 170e7d50326SJeff Roberson * realstathz: stathz is sometimes 0 and run off of hz. 171e7d50326SJeff Roberson * sched_slice: Runtime of each thread before rescheduling. 172ae7a6b38SJeff Roberson * preempt_thresh: Priority threshold for preemption and remote IPIs. 17335e6168fSJeff Roberson */ 174e7d50326SJeff Roberson static int sched_interact = SCHED_INTERACT_THRESH; 175e7d50326SJeff Roberson static int realstathz; 176e7d50326SJeff Roberson static int tickincr; 177e7d50326SJeff Roberson static int sched_slice; 178ae7a6b38SJeff Roberson static int preempt_thresh = PRI_MIN_KERN; 179ae7a6b38SJeff Roberson 18035e6168fSJeff Roberson /* 181ae7a6b38SJeff Roberson * tdq - per processor runqs and statistics. All fields are protected by the 182ae7a6b38SJeff Roberson * tdq_lock. The load and lowpri may be accessed without to avoid excess 183ae7a6b38SJeff Roberson * locking in sched_pickcpu(); 18435e6168fSJeff Roberson */ 185ad1e7d28SJulian Elischer struct tdq { 186c47f202bSJeff Roberson struct mtx *tdq_lock; /* Pointer to group lock. */ 187e7d50326SJeff Roberson struct runq tdq_realtime; /* real-time run queue. */ 188ae7a6b38SJeff Roberson struct runq tdq_timeshare; /* timeshare run queue. */ 189ae7a6b38SJeff Roberson struct runq tdq_idle; /* Queue of IDLE threads. */ 190ae7a6b38SJeff Roberson int tdq_load; /* Aggregate load. */ 191ed0e8f2fSJeff Roberson u_char tdq_idx; /* Current insert index. */ 192ed0e8f2fSJeff Roberson u_char tdq_ridx; /* Current removal index. */ 1935d7ef00cSJeff Roberson #ifdef SMP 194ae7a6b38SJeff Roberson u_char tdq_lowpri; /* Lowest priority thread. */ 195ae7a6b38SJeff Roberson int tdq_transferable; /* Transferable thread count. */ 196d2ad694cSJeff Roberson LIST_ENTRY(tdq) tdq_siblings; /* Next in tdq group. */ 197d2ad694cSJeff Roberson struct tdq_group *tdq_group; /* Our processor group. */ 19833916c36SJeff Roberson #else 199d2ad694cSJeff Roberson int tdq_sysload; /* For loadavg, !ITHD load. */ 2005d7ef00cSJeff Roberson #endif 201ae7a6b38SJeff Roberson } __aligned(64); 20235e6168fSJeff Roberson 2037b8bfa0dSJeff Roberson 20480f86c9fSJeff Roberson #ifdef SMP 20580f86c9fSJeff Roberson /* 206ad1e7d28SJulian Elischer * tdq groups are groups of processors which can cheaply share threads. When 20780f86c9fSJeff Roberson * one processor in the group goes idle it will check the runqs of the other 20880f86c9fSJeff Roberson * processors in its group prior to halting and waiting for an interrupt. 20980f86c9fSJeff Roberson * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. 21080f86c9fSJeff Roberson * In a numa environment we'd want an idle bitmap per group and a two tiered 21180f86c9fSJeff Roberson * load balancer. 21280f86c9fSJeff Roberson */ 213ad1e7d28SJulian Elischer struct tdq_group { 214c47f202bSJeff Roberson struct mtx tdg_lock; /* Protects all fields below. */ 215d2ad694cSJeff Roberson int tdg_cpus; /* Count of CPUs in this tdq group. */ 216d2ad694cSJeff Roberson cpumask_t tdg_cpumask; /* Mask of cpus in this group. */ 217d2ad694cSJeff Roberson cpumask_t tdg_idlemask; /* Idle cpus in this group. */ 218d2ad694cSJeff Roberson cpumask_t tdg_mask; /* Bit mask for first cpu. */ 219d2ad694cSJeff Roberson int tdg_load; /* Total load of this group. */ 220d2ad694cSJeff Roberson int tdg_transferable; /* Transferable load of this group. */ 221d2ad694cSJeff Roberson LIST_HEAD(, tdq) tdg_members; /* Linked list of all members. */ 222c47f202bSJeff Roberson char tdg_name[16]; /* lock name. */ 223ae7a6b38SJeff Roberson } __aligned(64); 2247b8bfa0dSJeff Roberson 225ae7a6b38SJeff Roberson #define SCHED_AFFINITY_DEFAULT (max(1, hz / 300)) 2267b8bfa0dSJeff Roberson #define SCHED_AFFINITY(ts) ((ts)->ts_rltick > ticks - affinity) 2277b8bfa0dSJeff Roberson 2287b8bfa0dSJeff Roberson /* 2297b8bfa0dSJeff Roberson * Run-time tunables. 2307b8bfa0dSJeff Roberson */ 23128994a58SJeff Roberson static int rebalance = 1; 23228994a58SJeff Roberson static int balance_secs = 1; 23328994a58SJeff Roberson static int pick_pri = 1; 2347b8bfa0dSJeff Roberson static int affinity; 2357b8bfa0dSJeff Roberson static int tryself = 1; 236ae7a6b38SJeff Roberson static int steal_htt = 0; 23728994a58SJeff Roberson static int steal_idle = 1; 23828994a58SJeff Roberson static int steal_thresh = 2; 2397b20fb19SJeff Roberson static int topology = 0; 24080f86c9fSJeff Roberson 24135e6168fSJeff Roberson /* 242d2ad694cSJeff Roberson * One thread queue per processor. 24335e6168fSJeff Roberson */ 2447b8bfa0dSJeff Roberson static volatile cpumask_t tdq_idle; 245d2ad694cSJeff Roberson static int tdg_maxid; 246ad1e7d28SJulian Elischer static struct tdq tdq_cpu[MAXCPU]; 247ad1e7d28SJulian Elischer static struct tdq_group tdq_groups[MAXCPU]; 248ae7a6b38SJeff Roberson static struct callout balco; 249ae7a6b38SJeff Roberson static struct callout gbalco; 250dc03363dSJeff Roberson 251ad1e7d28SJulian Elischer #define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)]) 252ad1e7d28SJulian Elischer #define TDQ_CPU(x) (&tdq_cpu[(x)]) 253c47f202bSJeff Roberson #define TDQ_ID(x) ((int)((x) - tdq_cpu)) 254ad1e7d28SJulian Elischer #define TDQ_GROUP(x) (&tdq_groups[(x)]) 255c47f202bSJeff Roberson #define TDG_ID(x) ((int)((x) - tdq_groups)) 25680f86c9fSJeff Roberson #else /* !SMP */ 257ad1e7d28SJulian Elischer static struct tdq tdq_cpu; 258c47f202bSJeff Roberson static struct mtx tdq_lock; 259dc03363dSJeff Roberson 26036b36916SJeff Roberson #define TDQ_ID(x) (0) 261ad1e7d28SJulian Elischer #define TDQ_SELF() (&tdq_cpu) 262ad1e7d28SJulian Elischer #define TDQ_CPU(x) (&tdq_cpu) 2630a016a05SJeff Roberson #endif 26435e6168fSJeff Roberson 265ae7a6b38SJeff Roberson #define TDQ_LOCK_ASSERT(t, type) mtx_assert(TDQ_LOCKPTR((t)), (type)) 266ae7a6b38SJeff Roberson #define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t))) 267ae7a6b38SJeff Roberson #define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f)) 268ae7a6b38SJeff Roberson #define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t))) 269c47f202bSJeff Roberson #define TDQ_LOCKPTR(t) ((t)->tdq_lock) 270ae7a6b38SJeff Roberson 2718460a577SJohn Birrell static void sched_priority(struct thread *); 27221381d1bSJeff Roberson static void sched_thread_priority(struct thread *, u_char); 2738460a577SJohn Birrell static int sched_interact_score(struct thread *); 2748460a577SJohn Birrell static void sched_interact_update(struct thread *); 2758460a577SJohn Birrell static void sched_interact_fork(struct thread *); 276ad1e7d28SJulian Elischer static void sched_pctcpu_update(struct td_sched *); 27735e6168fSJeff Roberson 2785d7ef00cSJeff Roberson /* Operations on per processor queues */ 279ad1e7d28SJulian Elischer static struct td_sched * tdq_choose(struct tdq *); 280ad1e7d28SJulian Elischer static void tdq_setup(struct tdq *); 281ad1e7d28SJulian Elischer static void tdq_load_add(struct tdq *, struct td_sched *); 282ad1e7d28SJulian Elischer static void tdq_load_rem(struct tdq *, struct td_sched *); 283ad1e7d28SJulian Elischer static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int); 284ad1e7d28SJulian Elischer static __inline void tdq_runq_rem(struct tdq *, struct td_sched *); 285ad1e7d28SJulian Elischer void tdq_print(int cpu); 286e7d50326SJeff Roberson static void runq_print(struct runq *rq); 287ae7a6b38SJeff Roberson static void tdq_add(struct tdq *, struct thread *, int); 2885d7ef00cSJeff Roberson #ifdef SMP 289ae7a6b38SJeff Roberson static void tdq_move(struct tdq *, struct tdq *); 290ad1e7d28SJulian Elischer static int tdq_idled(struct tdq *); 2917b8bfa0dSJeff Roberson static void tdq_notify(struct td_sched *); 292ad1e7d28SJulian Elischer static struct td_sched *tdq_steal(struct tdq *, int); 293ae7a6b38SJeff Roberson static struct td_sched *runq_steal(struct runq *); 294ae7a6b38SJeff Roberson static int sched_pickcpu(struct td_sched *, int); 295ae7a6b38SJeff Roberson static void sched_balance(void *); 296ae7a6b38SJeff Roberson static void sched_balance_groups(void *); 297ae7a6b38SJeff Roberson static void sched_balance_group(struct tdq_group *); 298ae7a6b38SJeff Roberson static void sched_balance_pair(struct tdq *, struct tdq *); 299ae7a6b38SJeff Roberson static inline struct tdq *sched_setcpu(struct td_sched *, int, int); 300ae7a6b38SJeff Roberson static inline struct mtx *thread_block_switch(struct thread *); 301ae7a6b38SJeff Roberson static inline void thread_unblock_switch(struct thread *, struct mtx *); 302c47f202bSJeff Roberson static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int); 3031e516cf5SJeff Roberson 3047b8bfa0dSJeff Roberson #define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0) 3055d7ef00cSJeff Roberson #endif 3065d7ef00cSJeff Roberson 307e7d50326SJeff Roberson static void sched_setup(void *dummy); 308e7d50326SJeff Roberson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 309e7d50326SJeff Roberson 310e7d50326SJeff Roberson static void sched_initticks(void *dummy); 311e7d50326SJeff Roberson SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL) 312e7d50326SJeff Roberson 313ae7a6b38SJeff Roberson /* 314ae7a6b38SJeff Roberson * Print the threads waiting on a run-queue. 315ae7a6b38SJeff Roberson */ 316e7d50326SJeff Roberson static void 317e7d50326SJeff Roberson runq_print(struct runq *rq) 318e7d50326SJeff Roberson { 319e7d50326SJeff Roberson struct rqhead *rqh; 320e7d50326SJeff Roberson struct td_sched *ts; 321e7d50326SJeff Roberson int pri; 322e7d50326SJeff Roberson int j; 323e7d50326SJeff Roberson int i; 324e7d50326SJeff Roberson 325e7d50326SJeff Roberson for (i = 0; i < RQB_LEN; i++) { 326e7d50326SJeff Roberson printf("\t\trunq bits %d 0x%zx\n", 327e7d50326SJeff Roberson i, rq->rq_status.rqb_bits[i]); 328e7d50326SJeff Roberson for (j = 0; j < RQB_BPW; j++) 329e7d50326SJeff Roberson if (rq->rq_status.rqb_bits[i] & (1ul << j)) { 330e7d50326SJeff Roberson pri = j + (i << RQB_L2BPW); 331e7d50326SJeff Roberson rqh = &rq->rq_queues[pri]; 332e7d50326SJeff Roberson TAILQ_FOREACH(ts, rqh, ts_procq) { 333e7d50326SJeff Roberson printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n", 334e7d50326SJeff Roberson ts->ts_thread, ts->ts_thread->td_proc->p_comm, ts->ts_thread->td_priority, ts->ts_rqindex, pri); 335e7d50326SJeff Roberson } 336e7d50326SJeff Roberson } 337e7d50326SJeff Roberson } 338e7d50326SJeff Roberson } 339e7d50326SJeff Roberson 340ae7a6b38SJeff Roberson /* 341ae7a6b38SJeff Roberson * Print the status of a per-cpu thread queue. Should be a ddb show cmd. 342ae7a6b38SJeff Roberson */ 34315dc847eSJeff Roberson void 344ad1e7d28SJulian Elischer tdq_print(int cpu) 34515dc847eSJeff Roberson { 346ad1e7d28SJulian Elischer struct tdq *tdq; 34715dc847eSJeff Roberson 348ad1e7d28SJulian Elischer tdq = TDQ_CPU(cpu); 34915dc847eSJeff Roberson 350c47f202bSJeff Roberson printf("tdq %d:\n", TDQ_ID(tdq)); 351ae7a6b38SJeff Roberson printf("\tlockptr %p\n", TDQ_LOCKPTR(tdq)); 352d2ad694cSJeff Roberson printf("\tload: %d\n", tdq->tdq_load); 353e7d50326SJeff Roberson printf("\ttimeshare idx: %d\n", tdq->tdq_idx); 3543f872f85SJeff Roberson printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx); 355e7d50326SJeff Roberson printf("\trealtime runq:\n"); 356e7d50326SJeff Roberson runq_print(&tdq->tdq_realtime); 357e7d50326SJeff Roberson printf("\ttimeshare runq:\n"); 358e7d50326SJeff Roberson runq_print(&tdq->tdq_timeshare); 359e7d50326SJeff Roberson printf("\tidle runq:\n"); 360e7d50326SJeff Roberson runq_print(&tdq->tdq_idle); 361ef1134c9SJeff Roberson #ifdef SMP 362d2ad694cSJeff Roberson printf("\tload transferable: %d\n", tdq->tdq_transferable); 363ae7a6b38SJeff Roberson printf("\tlowest priority: %d\n", tdq->tdq_lowpri); 364c47f202bSJeff Roberson printf("\tgroup: %d\n", TDG_ID(tdq->tdq_group)); 365c47f202bSJeff Roberson printf("\tLock name: %s\n", tdq->tdq_group->tdg_name); 366ef1134c9SJeff Roberson #endif 36715dc847eSJeff Roberson } 36815dc847eSJeff Roberson 369ae7a6b38SJeff Roberson #define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS) 370ae7a6b38SJeff Roberson /* 371ae7a6b38SJeff Roberson * Add a thread to the actual run-queue. Keeps transferable counts up to 372ae7a6b38SJeff Roberson * date with what is actually on the run-queue. Selects the correct 373ae7a6b38SJeff Roberson * queue position for timeshare threads. 374ae7a6b38SJeff Roberson */ 375155b9987SJeff Roberson static __inline void 376ad1e7d28SJulian Elischer tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags) 377155b9987SJeff Roberson { 378ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 379ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 380155b9987SJeff Roberson #ifdef SMP 381e7d50326SJeff Roberson if (THREAD_CAN_MIGRATE(ts->ts_thread)) { 382d2ad694cSJeff Roberson tdq->tdq_transferable++; 383d2ad694cSJeff Roberson tdq->tdq_group->tdg_transferable++; 384ad1e7d28SJulian Elischer ts->ts_flags |= TSF_XFERABLE; 38580f86c9fSJeff Roberson } 386155b9987SJeff Roberson #endif 387e7d50326SJeff Roberson if (ts->ts_runq == &tdq->tdq_timeshare) { 388ed0e8f2fSJeff Roberson u_char pri; 389e7d50326SJeff Roberson 390e7d50326SJeff Roberson pri = ts->ts_thread->td_priority; 391e7d50326SJeff Roberson KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE, 392e7d50326SJeff Roberson ("Invalid priority %d on timeshare runq", pri)); 393e7d50326SJeff Roberson /* 394e7d50326SJeff Roberson * This queue contains only priorities between MIN and MAX 395e7d50326SJeff Roberson * realtime. Use the whole queue to represent these values. 396e7d50326SJeff Roberson */ 397c47f202bSJeff Roberson if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) { 398e7d50326SJeff Roberson pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ; 399e7d50326SJeff Roberson pri = (pri + tdq->tdq_idx) % RQ_NQS; 4003f872f85SJeff Roberson /* 4013f872f85SJeff Roberson * This effectively shortens the queue by one so we 4023f872f85SJeff Roberson * can have a one slot difference between idx and 4033f872f85SJeff Roberson * ridx while we wait for threads to drain. 4043f872f85SJeff Roberson */ 4053f872f85SJeff Roberson if (tdq->tdq_ridx != tdq->tdq_idx && 4063f872f85SJeff Roberson pri == tdq->tdq_ridx) 4074499aff6SJeff Roberson pri = (unsigned char)(pri - 1) % RQ_NQS; 408e7d50326SJeff Roberson } else 4093f872f85SJeff Roberson pri = tdq->tdq_ridx; 410e7d50326SJeff Roberson runq_add_pri(ts->ts_runq, ts, pri, flags); 411e7d50326SJeff Roberson } else 412ad1e7d28SJulian Elischer runq_add(ts->ts_runq, ts, flags); 413155b9987SJeff Roberson } 414155b9987SJeff Roberson 415ae7a6b38SJeff Roberson /* 416ae7a6b38SJeff Roberson * Remove a thread from a run-queue. This typically happens when a thread 417ae7a6b38SJeff Roberson * is selected to run. Running threads are not on the queue and the 418ae7a6b38SJeff Roberson * transferable count does not reflect them. 419ae7a6b38SJeff Roberson */ 420155b9987SJeff Roberson static __inline void 421ad1e7d28SJulian Elischer tdq_runq_rem(struct tdq *tdq, struct td_sched *ts) 422155b9987SJeff Roberson { 423ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 424ae7a6b38SJeff Roberson KASSERT(ts->ts_runq != NULL, 425ae7a6b38SJeff Roberson ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread)); 426155b9987SJeff Roberson #ifdef SMP 427ad1e7d28SJulian Elischer if (ts->ts_flags & TSF_XFERABLE) { 428d2ad694cSJeff Roberson tdq->tdq_transferable--; 429d2ad694cSJeff Roberson tdq->tdq_group->tdg_transferable--; 430ad1e7d28SJulian Elischer ts->ts_flags &= ~TSF_XFERABLE; 43180f86c9fSJeff Roberson } 432155b9987SJeff Roberson #endif 4333f872f85SJeff Roberson if (ts->ts_runq == &tdq->tdq_timeshare) { 4343f872f85SJeff Roberson if (tdq->tdq_idx != tdq->tdq_ridx) 4353f872f85SJeff Roberson runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx); 436e7d50326SJeff Roberson else 4373f872f85SJeff Roberson runq_remove_idx(ts->ts_runq, ts, NULL); 4388ab80cf0SJeff Roberson /* 4398ab80cf0SJeff Roberson * For timeshare threads we update the priority here so 4408ab80cf0SJeff Roberson * the priority reflects the time we've been sleeping. 4418ab80cf0SJeff Roberson */ 4428ab80cf0SJeff Roberson ts->ts_ltick = ticks; 4438ab80cf0SJeff Roberson sched_pctcpu_update(ts); 4448ab80cf0SJeff Roberson sched_priority(ts->ts_thread); 4453f872f85SJeff Roberson } else 446ad1e7d28SJulian Elischer runq_remove(ts->ts_runq, ts); 447155b9987SJeff Roberson } 448155b9987SJeff Roberson 449ae7a6b38SJeff Roberson /* 450ae7a6b38SJeff Roberson * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load 451ae7a6b38SJeff Roberson * for this thread to the referenced thread queue. 452ae7a6b38SJeff Roberson */ 453a8949de2SJeff Roberson static void 454ad1e7d28SJulian Elischer tdq_load_add(struct tdq *tdq, struct td_sched *ts) 4555d7ef00cSJeff Roberson { 456ef1134c9SJeff Roberson int class; 457ae7a6b38SJeff Roberson 458ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 459ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 460ad1e7d28SJulian Elischer class = PRI_BASE(ts->ts_thread->td_pri_class); 461d2ad694cSJeff Roberson tdq->tdq_load++; 462c47f202bSJeff Roberson CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load); 4637b8bfa0dSJeff Roberson if (class != PRI_ITHD && 4647b8bfa0dSJeff Roberson (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 46533916c36SJeff Roberson #ifdef SMP 466d2ad694cSJeff Roberson tdq->tdq_group->tdg_load++; 46733916c36SJeff Roberson #else 468d2ad694cSJeff Roberson tdq->tdq_sysload++; 469cac77d04SJeff Roberson #endif 4705d7ef00cSJeff Roberson } 47115dc847eSJeff Roberson 472ae7a6b38SJeff Roberson /* 473ae7a6b38SJeff Roberson * Remove the load from a thread that is transitioning to a sleep state or 474ae7a6b38SJeff Roberson * exiting. 475ae7a6b38SJeff Roberson */ 476a8949de2SJeff Roberson static void 477ad1e7d28SJulian Elischer tdq_load_rem(struct tdq *tdq, struct td_sched *ts) 4785d7ef00cSJeff Roberson { 479ef1134c9SJeff Roberson int class; 480ae7a6b38SJeff Roberson 481ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 482ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 483ad1e7d28SJulian Elischer class = PRI_BASE(ts->ts_thread->td_pri_class); 4847b8bfa0dSJeff Roberson if (class != PRI_ITHD && 4857b8bfa0dSJeff Roberson (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 48633916c36SJeff Roberson #ifdef SMP 487d2ad694cSJeff Roberson tdq->tdq_group->tdg_load--; 48833916c36SJeff Roberson #else 489d2ad694cSJeff Roberson tdq->tdq_sysload--; 490cac77d04SJeff Roberson #endif 491ae7a6b38SJeff Roberson KASSERT(tdq->tdq_load != 0, 492c47f202bSJeff Roberson ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq))); 493d2ad694cSJeff Roberson tdq->tdq_load--; 494d2ad694cSJeff Roberson CTR1(KTR_SCHED, "load: %d", tdq->tdq_load); 495ad1e7d28SJulian Elischer ts->ts_runq = NULL; 49615dc847eSJeff Roberson } 49715dc847eSJeff Roberson 4985d7ef00cSJeff Roberson #ifdef SMP 499356500a3SJeff Roberson /* 500155b9987SJeff Roberson * sched_balance is a simple CPU load balancing algorithm. It operates by 501356500a3SJeff Roberson * finding the least loaded and most loaded cpu and equalizing their load 502356500a3SJeff Roberson * by migrating some processes. 503356500a3SJeff Roberson * 504356500a3SJeff Roberson * Dealing only with two CPUs at a time has two advantages. Firstly, most 505356500a3SJeff Roberson * installations will only have 2 cpus. Secondly, load balancing too much at 506356500a3SJeff Roberson * once can have an unpleasant effect on the system. The scheduler rarely has 507356500a3SJeff Roberson * enough information to make perfect decisions. So this algorithm chooses 508ae7a6b38SJeff Roberson * simplicity and more gradual effects on load in larger systems. 509356500a3SJeff Roberson * 510356500a3SJeff Roberson */ 51122bf7d9aSJeff Roberson static void 512ae7a6b38SJeff Roberson sched_balance(void *arg) 513356500a3SJeff Roberson { 514ad1e7d28SJulian Elischer struct tdq_group *high; 515ad1e7d28SJulian Elischer struct tdq_group *low; 516d2ad694cSJeff Roberson struct tdq_group *tdg; 517cac77d04SJeff Roberson int cnt; 518356500a3SJeff Roberson int i; 519356500a3SJeff Roberson 52028994a58SJeff Roberson callout_reset(&balco, max(hz / 2, random() % (hz * balance_secs)), 521ae7a6b38SJeff Roberson sched_balance, NULL); 522ae7a6b38SJeff Roberson if (smp_started == 0 || rebalance == 0) 523598b368dSJeff Roberson return; 524cac77d04SJeff Roberson low = high = NULL; 525d2ad694cSJeff Roberson i = random() % (tdg_maxid + 1); 526d2ad694cSJeff Roberson for (cnt = 0; cnt <= tdg_maxid; cnt++) { 527d2ad694cSJeff Roberson tdg = TDQ_GROUP(i); 528cac77d04SJeff Roberson /* 529cac77d04SJeff Roberson * Find the CPU with the highest load that has some 530cac77d04SJeff Roberson * threads to transfer. 531cac77d04SJeff Roberson */ 532d2ad694cSJeff Roberson if ((high == NULL || tdg->tdg_load > high->tdg_load) 533d2ad694cSJeff Roberson && tdg->tdg_transferable) 534d2ad694cSJeff Roberson high = tdg; 535d2ad694cSJeff Roberson if (low == NULL || tdg->tdg_load < low->tdg_load) 536d2ad694cSJeff Roberson low = tdg; 537d2ad694cSJeff Roberson if (++i > tdg_maxid) 538cac77d04SJeff Roberson i = 0; 539cac77d04SJeff Roberson } 540cac77d04SJeff Roberson if (low != NULL && high != NULL && high != low) 541d2ad694cSJeff Roberson sched_balance_pair(LIST_FIRST(&high->tdg_members), 542d2ad694cSJeff Roberson LIST_FIRST(&low->tdg_members)); 543cac77d04SJeff Roberson } 54486f8ae96SJeff Roberson 545ae7a6b38SJeff Roberson /* 546ae7a6b38SJeff Roberson * Balance load between CPUs in a group. Will only migrate within the group. 547ae7a6b38SJeff Roberson */ 548cac77d04SJeff Roberson static void 549ae7a6b38SJeff Roberson sched_balance_groups(void *arg) 550cac77d04SJeff Roberson { 551cac77d04SJeff Roberson int i; 552cac77d04SJeff Roberson 55328994a58SJeff Roberson callout_reset(&gbalco, max(hz / 2, random() % (hz * balance_secs)), 554ae7a6b38SJeff Roberson sched_balance_groups, NULL); 555ae7a6b38SJeff Roberson if (smp_started == 0 || rebalance == 0) 556ae7a6b38SJeff Roberson return; 557d2ad694cSJeff Roberson for (i = 0; i <= tdg_maxid; i++) 558ad1e7d28SJulian Elischer sched_balance_group(TDQ_GROUP(i)); 559356500a3SJeff Roberson } 560cac77d04SJeff Roberson 561ae7a6b38SJeff Roberson /* 562ae7a6b38SJeff Roberson * Finds the greatest imbalance between two tdqs in a group. 563ae7a6b38SJeff Roberson */ 564cac77d04SJeff Roberson static void 565d2ad694cSJeff Roberson sched_balance_group(struct tdq_group *tdg) 566cac77d04SJeff Roberson { 567ad1e7d28SJulian Elischer struct tdq *tdq; 568ad1e7d28SJulian Elischer struct tdq *high; 569ad1e7d28SJulian Elischer struct tdq *low; 570cac77d04SJeff Roberson int load; 571cac77d04SJeff Roberson 572d2ad694cSJeff Roberson if (tdg->tdg_transferable == 0) 573cac77d04SJeff Roberson return; 574cac77d04SJeff Roberson low = NULL; 575cac77d04SJeff Roberson high = NULL; 576d2ad694cSJeff Roberson LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) { 577d2ad694cSJeff Roberson load = tdq->tdq_load; 578d2ad694cSJeff Roberson if (high == NULL || load > high->tdq_load) 579ad1e7d28SJulian Elischer high = tdq; 580d2ad694cSJeff Roberson if (low == NULL || load < low->tdq_load) 581ad1e7d28SJulian Elischer low = tdq; 582356500a3SJeff Roberson } 583cac77d04SJeff Roberson if (high != NULL && low != NULL && high != low) 584cac77d04SJeff Roberson sched_balance_pair(high, low); 585356500a3SJeff Roberson } 586cac77d04SJeff Roberson 587ae7a6b38SJeff Roberson /* 588ae7a6b38SJeff Roberson * Lock two thread queues using their address to maintain lock order. 589ae7a6b38SJeff Roberson */ 590ae7a6b38SJeff Roberson static void 591ae7a6b38SJeff Roberson tdq_lock_pair(struct tdq *one, struct tdq *two) 592ae7a6b38SJeff Roberson { 593ae7a6b38SJeff Roberson if (one < two) { 594ae7a6b38SJeff Roberson TDQ_LOCK(one); 595ae7a6b38SJeff Roberson TDQ_LOCK_FLAGS(two, MTX_DUPOK); 596ae7a6b38SJeff Roberson } else { 597ae7a6b38SJeff Roberson TDQ_LOCK(two); 598ae7a6b38SJeff Roberson TDQ_LOCK_FLAGS(one, MTX_DUPOK); 599ae7a6b38SJeff Roberson } 600ae7a6b38SJeff Roberson } 601ae7a6b38SJeff Roberson 602ae7a6b38SJeff Roberson /* 603ae7a6b38SJeff Roberson * Transfer load between two imbalanced thread queues. 604ae7a6b38SJeff Roberson */ 605cac77d04SJeff Roberson static void 606ad1e7d28SJulian Elischer sched_balance_pair(struct tdq *high, struct tdq *low) 607cac77d04SJeff Roberson { 608cac77d04SJeff Roberson int transferable; 609cac77d04SJeff Roberson int high_load; 610cac77d04SJeff Roberson int low_load; 611cac77d04SJeff Roberson int move; 612cac77d04SJeff Roberson int diff; 613cac77d04SJeff Roberson int i; 614cac77d04SJeff Roberson 615ae7a6b38SJeff Roberson tdq_lock_pair(high, low); 61680f86c9fSJeff Roberson /* 61780f86c9fSJeff Roberson * If we're transfering within a group we have to use this specific 618ad1e7d28SJulian Elischer * tdq's transferable count, otherwise we can steal from other members 61980f86c9fSJeff Roberson * of the group. 62080f86c9fSJeff Roberson */ 621d2ad694cSJeff Roberson if (high->tdq_group == low->tdq_group) { 622d2ad694cSJeff Roberson transferable = high->tdq_transferable; 623d2ad694cSJeff Roberson high_load = high->tdq_load; 624d2ad694cSJeff Roberson low_load = low->tdq_load; 625cac77d04SJeff Roberson } else { 626d2ad694cSJeff Roberson transferable = high->tdq_group->tdg_transferable; 627d2ad694cSJeff Roberson high_load = high->tdq_group->tdg_load; 628d2ad694cSJeff Roberson low_load = low->tdq_group->tdg_load; 629cac77d04SJeff Roberson } 630155b9987SJeff Roberson /* 631155b9987SJeff Roberson * Determine what the imbalance is and then adjust that to how many 632d2ad694cSJeff Roberson * threads we actually have to give up (transferable). 633155b9987SJeff Roberson */ 634ae7a6b38SJeff Roberson if (transferable != 0) { 635cac77d04SJeff Roberson diff = high_load - low_load; 636356500a3SJeff Roberson move = diff / 2; 637356500a3SJeff Roberson if (diff & 0x1) 638356500a3SJeff Roberson move++; 63980f86c9fSJeff Roberson move = min(move, transferable); 640356500a3SJeff Roberson for (i = 0; i < move; i++) 641ae7a6b38SJeff Roberson tdq_move(high, low); 642ae7a6b38SJeff Roberson } 643ae7a6b38SJeff Roberson TDQ_UNLOCK(high); 644ae7a6b38SJeff Roberson TDQ_UNLOCK(low); 645356500a3SJeff Roberson return; 646356500a3SJeff Roberson } 647356500a3SJeff Roberson 648ae7a6b38SJeff Roberson /* 649ae7a6b38SJeff Roberson * Move a thread from one thread queue to another. 650ae7a6b38SJeff Roberson */ 65122bf7d9aSJeff Roberson static void 652ae7a6b38SJeff Roberson tdq_move(struct tdq *from, struct tdq *to) 653356500a3SJeff Roberson { 654ad1e7d28SJulian Elischer struct td_sched *ts; 655ae7a6b38SJeff Roberson struct thread *td; 656ae7a6b38SJeff Roberson struct tdq *tdq; 657ae7a6b38SJeff Roberson int cpu; 658356500a3SJeff Roberson 659ad1e7d28SJulian Elischer tdq = from; 660ae7a6b38SJeff Roberson cpu = TDQ_ID(to); 661ad1e7d28SJulian Elischer ts = tdq_steal(tdq, 1); 662ad1e7d28SJulian Elischer if (ts == NULL) { 663d2ad694cSJeff Roberson struct tdq_group *tdg; 66480f86c9fSJeff Roberson 665d2ad694cSJeff Roberson tdg = tdq->tdq_group; 666d2ad694cSJeff Roberson LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) { 667d2ad694cSJeff Roberson if (tdq == from || tdq->tdq_transferable == 0) 66880f86c9fSJeff Roberson continue; 669ad1e7d28SJulian Elischer ts = tdq_steal(tdq, 1); 67080f86c9fSJeff Roberson break; 67180f86c9fSJeff Roberson } 672ad1e7d28SJulian Elischer if (ts == NULL) 673ae7a6b38SJeff Roberson return; 67480f86c9fSJeff Roberson } 675ad1e7d28SJulian Elischer if (tdq == to) 67680f86c9fSJeff Roberson return; 677ae7a6b38SJeff Roberson td = ts->ts_thread; 678ae7a6b38SJeff Roberson /* 679ae7a6b38SJeff Roberson * Although the run queue is locked the thread may be blocked. Lock 680ae7a6b38SJeff Roberson * it to clear this. 681ae7a6b38SJeff Roberson */ 682ae7a6b38SJeff Roberson thread_lock(td); 683ae7a6b38SJeff Roberson /* Drop recursive lock on from. */ 684ae7a6b38SJeff Roberson TDQ_UNLOCK(from); 685ae7a6b38SJeff Roberson sched_rem(td); 6867b8bfa0dSJeff Roberson ts->ts_cpu = cpu; 687ae7a6b38SJeff Roberson td->td_lock = TDQ_LOCKPTR(to); 688ae7a6b38SJeff Roberson tdq_add(to, td, SRQ_YIELDING); 68908c9a16cSJeff Roberson tdq_notify(ts); 690356500a3SJeff Roberson } 69122bf7d9aSJeff Roberson 692ae7a6b38SJeff Roberson /* 693ae7a6b38SJeff Roberson * This tdq has idled. Try to steal a thread from another cpu and switch 694ae7a6b38SJeff Roberson * to it. 695ae7a6b38SJeff Roberson */ 69680f86c9fSJeff Roberson static int 697ad1e7d28SJulian Elischer tdq_idled(struct tdq *tdq) 69822bf7d9aSJeff Roberson { 699d2ad694cSJeff Roberson struct tdq_group *tdg; 700ad1e7d28SJulian Elischer struct tdq *steal; 701ad1e7d28SJulian Elischer struct td_sched *ts; 702ae7a6b38SJeff Roberson struct thread *td; 703ae7a6b38SJeff Roberson int highload; 704ae7a6b38SJeff Roberson int highcpu; 705ae7a6b38SJeff Roberson int load; 706ae7a6b38SJeff Roberson int cpu; 70780f86c9fSJeff Roberson 708ae7a6b38SJeff Roberson /* We don't want to be preempted while we're iterating over tdqs */ 709ae7a6b38SJeff Roberson spinlock_enter(); 710d2ad694cSJeff Roberson tdg = tdq->tdq_group; 71180f86c9fSJeff Roberson /* 712d2ad694cSJeff Roberson * If we're in a cpu group, try and steal threads from another cpu in 71380f86c9fSJeff Roberson * the group before idling. 71480f86c9fSJeff Roberson */ 7157b8bfa0dSJeff Roberson if (steal_htt && tdg->tdg_cpus > 1 && tdg->tdg_transferable) { 716d2ad694cSJeff Roberson LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) { 717d2ad694cSJeff Roberson if (steal == tdq || steal->tdq_transferable == 0) 71880f86c9fSJeff Roberson continue; 719ae7a6b38SJeff Roberson TDQ_LOCK(steal); 720ad1e7d28SJulian Elischer ts = tdq_steal(steal, 0); 7217b8bfa0dSJeff Roberson if (ts) 7227b8bfa0dSJeff Roberson goto steal; 723ae7a6b38SJeff Roberson TDQ_UNLOCK(steal); 7247b8bfa0dSJeff Roberson } 7257b8bfa0dSJeff Roberson } 726ae7a6b38SJeff Roberson for (;;) { 727ae7a6b38SJeff Roberson if (steal_idle == 0) 7287b8bfa0dSJeff Roberson break; 729ae7a6b38SJeff Roberson highcpu = 0; 730ae7a6b38SJeff Roberson highload = 0; 731ae7a6b38SJeff Roberson for (cpu = 0; cpu <= mp_maxid; cpu++) { 732ae7a6b38SJeff Roberson if (CPU_ABSENT(cpu)) 733ae7a6b38SJeff Roberson continue; 7347b8bfa0dSJeff Roberson steal = TDQ_CPU(cpu); 735ae7a6b38SJeff Roberson load = TDQ_CPU(cpu)->tdq_transferable; 736ae7a6b38SJeff Roberson if (load < highload) 7377b8bfa0dSJeff Roberson continue; 738ae7a6b38SJeff Roberson highload = load; 739ae7a6b38SJeff Roberson highcpu = cpu; 740ae7a6b38SJeff Roberson } 74128994a58SJeff Roberson if (highload < steal_thresh) 742ae7a6b38SJeff Roberson break; 743ae7a6b38SJeff Roberson steal = TDQ_CPU(highcpu); 744ae7a6b38SJeff Roberson TDQ_LOCK(steal); 74528994a58SJeff Roberson if (steal->tdq_transferable >= steal_thresh && 746ae7a6b38SJeff Roberson (ts = tdq_steal(steal, 1)) != NULL) 7477b8bfa0dSJeff Roberson goto steal; 748ae7a6b38SJeff Roberson TDQ_UNLOCK(steal); 749ae7a6b38SJeff Roberson break; 75080f86c9fSJeff Roberson } 751ae7a6b38SJeff Roberson spinlock_exit(); 75280f86c9fSJeff Roberson return (1); 7537b8bfa0dSJeff Roberson steal: 754ae7a6b38SJeff Roberson td = ts->ts_thread; 755ae7a6b38SJeff Roberson thread_lock(td); 756ae7a6b38SJeff Roberson spinlock_exit(); 757ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(steal)); 758ae7a6b38SJeff Roberson TDQ_UNLOCK(steal); 759ae7a6b38SJeff Roberson sched_rem(td); 760ae7a6b38SJeff Roberson sched_setcpu(ts, PCPU_GET(cpuid), SRQ_YIELDING); 761ae7a6b38SJeff Roberson tdq_add(tdq, td, SRQ_YIELDING); 762ae7a6b38SJeff Roberson MPASS(td->td_lock == curthread->td_lock); 763ae7a6b38SJeff Roberson mi_switch(SW_VOL, NULL); 764ae7a6b38SJeff Roberson thread_unlock(curthread); 7657b8bfa0dSJeff Roberson 7667b8bfa0dSJeff Roberson return (0); 76722bf7d9aSJeff Roberson } 76822bf7d9aSJeff Roberson 769ae7a6b38SJeff Roberson /* 770ae7a6b38SJeff Roberson * Notify a remote cpu of new work. Sends an IPI if criteria are met. 771ae7a6b38SJeff Roberson */ 77222bf7d9aSJeff Roberson static void 7737b8bfa0dSJeff Roberson tdq_notify(struct td_sched *ts) 77422bf7d9aSJeff Roberson { 775fc3a97dcSJeff Roberson struct thread *ctd; 77622bf7d9aSJeff Roberson struct pcpu *pcpu; 777fc3a97dcSJeff Roberson int cpri; 778fc3a97dcSJeff Roberson int pri; 7797b8bfa0dSJeff Roberson int cpu; 78022bf7d9aSJeff Roberson 7817b8bfa0dSJeff Roberson cpu = ts->ts_cpu; 782fc3a97dcSJeff Roberson pri = ts->ts_thread->td_priority; 78322bf7d9aSJeff Roberson pcpu = pcpu_find(cpu); 784fc3a97dcSJeff Roberson ctd = pcpu->pc_curthread; 785fc3a97dcSJeff Roberson cpri = ctd->td_priority; 7866b2f763fSJeff Roberson 7876b2f763fSJeff Roberson /* 7886b2f763fSJeff Roberson * If our priority is not better than the current priority there is 7896b2f763fSJeff Roberson * nothing to do. 7906b2f763fSJeff Roberson */ 791fc3a97dcSJeff Roberson if (pri > cpri) 7926b2f763fSJeff Roberson return; 7937b8bfa0dSJeff Roberson /* 794fc3a97dcSJeff Roberson * Always IPI idle. 7957b8bfa0dSJeff Roberson */ 796fc3a97dcSJeff Roberson if (cpri > PRI_MIN_IDLE) 797fc3a97dcSJeff Roberson goto sendipi; 798fc3a97dcSJeff Roberson /* 799fc3a97dcSJeff Roberson * If we're realtime or better and there is timeshare or worse running 800fc3a97dcSJeff Roberson * send an IPI. 801fc3a97dcSJeff Roberson */ 802fc3a97dcSJeff Roberson if (pri < PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME) 803fc3a97dcSJeff Roberson goto sendipi; 804fc3a97dcSJeff Roberson /* 805fc3a97dcSJeff Roberson * Otherwise only IPI if we exceed the threshold. 806fc3a97dcSJeff Roberson */ 807ae7a6b38SJeff Roberson if (pri > preempt_thresh) 8087b8bfa0dSJeff Roberson return; 809fc3a97dcSJeff Roberson sendipi: 810fc3a97dcSJeff Roberson ctd->td_flags |= TDF_NEEDRESCHED; 81114618990SJeff Roberson ipi_selected(1 << cpu, IPI_PREEMPT); 81222bf7d9aSJeff Roberson } 81322bf7d9aSJeff Roberson 814ae7a6b38SJeff Roberson /* 815ae7a6b38SJeff Roberson * Steals load from a timeshare queue. Honors the rotating queue head 816ae7a6b38SJeff Roberson * index. 817ae7a6b38SJeff Roberson */ 818ae7a6b38SJeff Roberson static struct td_sched * 819ae7a6b38SJeff Roberson runq_steal_from(struct runq *rq, u_char start) 820ae7a6b38SJeff Roberson { 821ae7a6b38SJeff Roberson struct td_sched *ts; 822ae7a6b38SJeff Roberson struct rqbits *rqb; 823ae7a6b38SJeff Roberson struct rqhead *rqh; 824ae7a6b38SJeff Roberson int first; 825ae7a6b38SJeff Roberson int bit; 826ae7a6b38SJeff Roberson int pri; 827ae7a6b38SJeff Roberson int i; 828ae7a6b38SJeff Roberson 829ae7a6b38SJeff Roberson rqb = &rq->rq_status; 830ae7a6b38SJeff Roberson bit = start & (RQB_BPW -1); 831ae7a6b38SJeff Roberson pri = 0; 832ae7a6b38SJeff Roberson first = 0; 833ae7a6b38SJeff Roberson again: 834ae7a6b38SJeff Roberson for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) { 835ae7a6b38SJeff Roberson if (rqb->rqb_bits[i] == 0) 836ae7a6b38SJeff Roberson continue; 837ae7a6b38SJeff Roberson if (bit != 0) { 838ae7a6b38SJeff Roberson for (pri = bit; pri < RQB_BPW; pri++) 839ae7a6b38SJeff Roberson if (rqb->rqb_bits[i] & (1ul << pri)) 840ae7a6b38SJeff Roberson break; 841ae7a6b38SJeff Roberson if (pri >= RQB_BPW) 842ae7a6b38SJeff Roberson continue; 843ae7a6b38SJeff Roberson } else 844ae7a6b38SJeff Roberson pri = RQB_FFS(rqb->rqb_bits[i]); 845ae7a6b38SJeff Roberson pri += (i << RQB_L2BPW); 846ae7a6b38SJeff Roberson rqh = &rq->rq_queues[pri]; 847ae7a6b38SJeff Roberson TAILQ_FOREACH(ts, rqh, ts_procq) { 848ae7a6b38SJeff Roberson if (first && THREAD_CAN_MIGRATE(ts->ts_thread)) 849ae7a6b38SJeff Roberson return (ts); 850ae7a6b38SJeff Roberson first = 1; 851ae7a6b38SJeff Roberson } 852ae7a6b38SJeff Roberson } 853ae7a6b38SJeff Roberson if (start != 0) { 854ae7a6b38SJeff Roberson start = 0; 855ae7a6b38SJeff Roberson goto again; 856ae7a6b38SJeff Roberson } 857ae7a6b38SJeff Roberson 858ae7a6b38SJeff Roberson return (NULL); 859ae7a6b38SJeff Roberson } 860ae7a6b38SJeff Roberson 861ae7a6b38SJeff Roberson /* 862ae7a6b38SJeff Roberson * Steals load from a standard linear queue. 863ae7a6b38SJeff Roberson */ 864ad1e7d28SJulian Elischer static struct td_sched * 86522bf7d9aSJeff Roberson runq_steal(struct runq *rq) 86622bf7d9aSJeff Roberson { 86722bf7d9aSJeff Roberson struct rqhead *rqh; 86822bf7d9aSJeff Roberson struct rqbits *rqb; 869ad1e7d28SJulian Elischer struct td_sched *ts; 87022bf7d9aSJeff Roberson int word; 87122bf7d9aSJeff Roberson int bit; 87222bf7d9aSJeff Roberson 87322bf7d9aSJeff Roberson rqb = &rq->rq_status; 87422bf7d9aSJeff Roberson for (word = 0; word < RQB_LEN; word++) { 87522bf7d9aSJeff Roberson if (rqb->rqb_bits[word] == 0) 87622bf7d9aSJeff Roberson continue; 87722bf7d9aSJeff Roberson for (bit = 0; bit < RQB_BPW; bit++) { 878a2640c9bSPeter Wemm if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 87922bf7d9aSJeff Roberson continue; 88022bf7d9aSJeff Roberson rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 88128994a58SJeff Roberson TAILQ_FOREACH(ts, rqh, ts_procq) 88228994a58SJeff Roberson if (THREAD_CAN_MIGRATE(ts->ts_thread)) 883ad1e7d28SJulian Elischer return (ts); 88422bf7d9aSJeff Roberson } 88522bf7d9aSJeff Roberson } 88622bf7d9aSJeff Roberson return (NULL); 88722bf7d9aSJeff Roberson } 88822bf7d9aSJeff Roberson 889ae7a6b38SJeff Roberson /* 890ae7a6b38SJeff Roberson * Attempt to steal a thread in priority order from a thread queue. 891ae7a6b38SJeff Roberson */ 892ad1e7d28SJulian Elischer static struct td_sched * 893ad1e7d28SJulian Elischer tdq_steal(struct tdq *tdq, int stealidle) 89422bf7d9aSJeff Roberson { 895ad1e7d28SJulian Elischer struct td_sched *ts; 89622bf7d9aSJeff Roberson 897ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 898e7d50326SJeff Roberson if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL) 899ad1e7d28SJulian Elischer return (ts); 900ae7a6b38SJeff Roberson if ((ts = runq_steal_from(&tdq->tdq_timeshare, tdq->tdq_ridx)) != NULL) 901ad1e7d28SJulian Elischer return (ts); 90280f86c9fSJeff Roberson if (stealidle) 903d2ad694cSJeff Roberson return (runq_steal(&tdq->tdq_idle)); 90480f86c9fSJeff Roberson return (NULL); 90522bf7d9aSJeff Roberson } 90680f86c9fSJeff Roberson 907ae7a6b38SJeff Roberson /* 908ae7a6b38SJeff Roberson * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the 909ae7a6b38SJeff Roberson * current lock and returns with the assigned queue locked. If this is 910ae7a6b38SJeff Roberson * via sched_switch() we leave the thread in a blocked state as an 911ae7a6b38SJeff Roberson * optimization. 912ae7a6b38SJeff Roberson */ 913ae7a6b38SJeff Roberson static inline struct tdq * 914ae7a6b38SJeff Roberson sched_setcpu(struct td_sched *ts, int cpu, int flags) 91580f86c9fSJeff Roberson { 916ae7a6b38SJeff Roberson struct thread *td; 917ae7a6b38SJeff Roberson struct tdq *tdq; 91880f86c9fSJeff Roberson 919ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 920ae7a6b38SJeff Roberson 921ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpu); 922ae7a6b38SJeff Roberson td = ts->ts_thread; 923ae7a6b38SJeff Roberson ts->ts_cpu = cpu; 924c47f202bSJeff Roberson 925c47f202bSJeff Roberson /* If the lock matches just return the queue. */ 926ae7a6b38SJeff Roberson if (td->td_lock == TDQ_LOCKPTR(tdq)) 927ae7a6b38SJeff Roberson return (tdq); 928ae7a6b38SJeff Roberson #ifdef notyet 92980f86c9fSJeff Roberson /* 930ae7a6b38SJeff Roberson * If the thread isn't running it's lockptr is a 931ae7a6b38SJeff Roberson * turnstile or a sleepqueue. We can just lock_set without 932ae7a6b38SJeff Roberson * blocking. 933670c524fSJeff Roberson */ 934ae7a6b38SJeff Roberson if (TD_CAN_RUN(td)) { 935ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 936ae7a6b38SJeff Roberson thread_lock_set(td, TDQ_LOCKPTR(tdq)); 937ae7a6b38SJeff Roberson return (tdq); 938ae7a6b38SJeff Roberson } 939ae7a6b38SJeff Roberson #endif 94080f86c9fSJeff Roberson /* 941ae7a6b38SJeff Roberson * The hard case, migration, we need to block the thread first to 942ae7a6b38SJeff Roberson * prevent order reversals with other cpus locks. 9437b8bfa0dSJeff Roberson */ 944ae7a6b38SJeff Roberson thread_lock_block(td); 945ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 946ae7a6b38SJeff Roberson thread_lock_unblock(td, TDQ_LOCKPTR(tdq)); 947ae7a6b38SJeff Roberson return (tdq); 94880f86c9fSJeff Roberson } 9492454aaf5SJeff Roberson 950ae7a6b38SJeff Roberson /* 951ae7a6b38SJeff Roberson * Find the thread queue running the lowest priority thread. 952ae7a6b38SJeff Roberson */ 9537b8bfa0dSJeff Roberson static int 954ae7a6b38SJeff Roberson tdq_lowestpri(void) 9557b8bfa0dSJeff Roberson { 956ae7a6b38SJeff Roberson struct tdq *tdq; 9577b8bfa0dSJeff Roberson int lowpri; 9587b8bfa0dSJeff Roberson int lowcpu; 9597b8bfa0dSJeff Roberson int lowload; 9607b8bfa0dSJeff Roberson int load; 961ae7a6b38SJeff Roberson int cpu; 962ae7a6b38SJeff Roberson int pri; 963ae7a6b38SJeff Roberson 964ae7a6b38SJeff Roberson lowload = 0; 965ae7a6b38SJeff Roberson lowpri = lowcpu = 0; 966ae7a6b38SJeff Roberson for (cpu = 0; cpu <= mp_maxid; cpu++) { 967ae7a6b38SJeff Roberson if (CPU_ABSENT(cpu)) 968ae7a6b38SJeff Roberson continue; 969ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpu); 970ae7a6b38SJeff Roberson pri = tdq->tdq_lowpri; 971ae7a6b38SJeff Roberson load = TDQ_CPU(cpu)->tdq_load; 972ae7a6b38SJeff Roberson CTR4(KTR_ULE, 973ae7a6b38SJeff Roberson "cpu %d pri %d lowcpu %d lowpri %d", 974ae7a6b38SJeff Roberson cpu, pri, lowcpu, lowpri); 975ae7a6b38SJeff Roberson if (pri < lowpri) 976ae7a6b38SJeff Roberson continue; 977ae7a6b38SJeff Roberson if (lowpri && lowpri == pri && load > lowload) 978ae7a6b38SJeff Roberson continue; 979ae7a6b38SJeff Roberson lowpri = pri; 980ae7a6b38SJeff Roberson lowcpu = cpu; 981ae7a6b38SJeff Roberson lowload = load; 982ae7a6b38SJeff Roberson } 983ae7a6b38SJeff Roberson 984ae7a6b38SJeff Roberson return (lowcpu); 985ae7a6b38SJeff Roberson } 986ae7a6b38SJeff Roberson 987ae7a6b38SJeff Roberson /* 988ae7a6b38SJeff Roberson * Find the thread queue with the least load. 989ae7a6b38SJeff Roberson */ 990ae7a6b38SJeff Roberson static int 991ae7a6b38SJeff Roberson tdq_lowestload(void) 992ae7a6b38SJeff Roberson { 993ae7a6b38SJeff Roberson struct tdq *tdq; 994ae7a6b38SJeff Roberson int lowload; 995ae7a6b38SJeff Roberson int lowpri; 996ae7a6b38SJeff Roberson int lowcpu; 997ae7a6b38SJeff Roberson int load; 998ae7a6b38SJeff Roberson int cpu; 999ae7a6b38SJeff Roberson int pri; 1000ae7a6b38SJeff Roberson 1001ae7a6b38SJeff Roberson lowcpu = 0; 1002ae7a6b38SJeff Roberson lowload = TDQ_CPU(0)->tdq_load; 1003ae7a6b38SJeff Roberson lowpri = TDQ_CPU(0)->tdq_lowpri; 1004ae7a6b38SJeff Roberson for (cpu = 1; cpu <= mp_maxid; cpu++) { 1005ae7a6b38SJeff Roberson if (CPU_ABSENT(cpu)) 1006ae7a6b38SJeff Roberson continue; 1007ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpu); 1008ae7a6b38SJeff Roberson load = tdq->tdq_load; 1009ae7a6b38SJeff Roberson pri = tdq->tdq_lowpri; 1010ae7a6b38SJeff Roberson CTR4(KTR_ULE, "cpu %d load %d lowcpu %d lowload %d", 1011ae7a6b38SJeff Roberson cpu, load, lowcpu, lowload); 1012ae7a6b38SJeff Roberson if (load > lowload) 1013ae7a6b38SJeff Roberson continue; 1014ae7a6b38SJeff Roberson if (load == lowload && pri < lowpri) 1015ae7a6b38SJeff Roberson continue; 1016ae7a6b38SJeff Roberson lowcpu = cpu; 1017ae7a6b38SJeff Roberson lowload = load; 1018ae7a6b38SJeff Roberson lowpri = pri; 1019ae7a6b38SJeff Roberson } 1020ae7a6b38SJeff Roberson 1021ae7a6b38SJeff Roberson return (lowcpu); 1022ae7a6b38SJeff Roberson } 1023ae7a6b38SJeff Roberson 1024ae7a6b38SJeff Roberson /* 1025ae7a6b38SJeff Roberson * Pick the destination cpu for sched_add(). Respects affinity and makes 1026ae7a6b38SJeff Roberson * a determination based on load or priority of available processors. 1027ae7a6b38SJeff Roberson */ 1028ae7a6b38SJeff Roberson static int 1029ae7a6b38SJeff Roberson sched_pickcpu(struct td_sched *ts, int flags) 1030ae7a6b38SJeff Roberson { 1031ae7a6b38SJeff Roberson struct tdq *tdq; 10327b8bfa0dSJeff Roberson int self; 10337b8bfa0dSJeff Roberson int pri; 10347b8bfa0dSJeff Roberson int cpu; 10357b8bfa0dSJeff Roberson 1036ae7a6b38SJeff Roberson cpu = self = PCPU_GET(cpuid); 10377b8bfa0dSJeff Roberson if (smp_started == 0) 10387b8bfa0dSJeff Roberson return (self); 103928994a58SJeff Roberson /* 104028994a58SJeff Roberson * Don't migrate a running thread from sched_switch(). 104128994a58SJeff Roberson */ 104228994a58SJeff Roberson if (flags & SRQ_OURSELF) { 104328994a58SJeff Roberson CTR1(KTR_ULE, "YIELDING %d", 104428994a58SJeff Roberson curthread->td_priority); 104528994a58SJeff Roberson return (self); 104628994a58SJeff Roberson } 10477b8bfa0dSJeff Roberson pri = ts->ts_thread->td_priority; 1048ae7a6b38SJeff Roberson cpu = ts->ts_cpu; 10497b8bfa0dSJeff Roberson /* 10507b8bfa0dSJeff Roberson * Regardless of affinity, if the last cpu is idle send it there. 10517b8bfa0dSJeff Roberson */ 1052ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpu); 1053ae7a6b38SJeff Roberson if (tdq->tdq_lowpri > PRI_MIN_IDLE) { 105414618990SJeff Roberson CTR5(KTR_ULE, 10557b8bfa0dSJeff Roberson "ts_cpu %d idle, ltick %d ticks %d pri %d curthread %d", 10567b8bfa0dSJeff Roberson ts->ts_cpu, ts->ts_rltick, ticks, pri, 1057ae7a6b38SJeff Roberson tdq->tdq_lowpri); 10587b8bfa0dSJeff Roberson return (ts->ts_cpu); 10597b8bfa0dSJeff Roberson } 10607b8bfa0dSJeff Roberson /* 10617b8bfa0dSJeff Roberson * If we have affinity, try to place it on the cpu we last ran on. 10627b8bfa0dSJeff Roberson */ 1063ae7a6b38SJeff Roberson if (SCHED_AFFINITY(ts) && tdq->tdq_lowpri > pri) { 106414618990SJeff Roberson CTR5(KTR_ULE, 10657b8bfa0dSJeff Roberson "affinity for %d, ltick %d ticks %d pri %d curthread %d", 10667b8bfa0dSJeff Roberson ts->ts_cpu, ts->ts_rltick, ticks, pri, 1067ae7a6b38SJeff Roberson tdq->tdq_lowpri); 10687b8bfa0dSJeff Roberson return (ts->ts_cpu); 10697b8bfa0dSJeff Roberson } 10707b8bfa0dSJeff Roberson /* 10717b8bfa0dSJeff Roberson * Look for an idle group. 10727b8bfa0dSJeff Roberson */ 107314618990SJeff Roberson CTR1(KTR_ULE, "tdq_idle %X", tdq_idle); 10747b8bfa0dSJeff Roberson cpu = ffs(tdq_idle); 10757b8bfa0dSJeff Roberson if (cpu) 1076ae7a6b38SJeff Roberson return (--cpu); 107728994a58SJeff Roberson /* 107828994a58SJeff Roberson * If there are no idle cores see if we can run the thread locally. This may 107928994a58SJeff Roberson * improve locality among sleepers and wakers when there is shared data. 108028994a58SJeff Roberson */ 108128994a58SJeff Roberson if (tryself && pri < curthread->td_priority) { 108228994a58SJeff Roberson CTR1(KTR_ULE, "tryself %d", 10837b8bfa0dSJeff Roberson curthread->td_priority); 10847b8bfa0dSJeff Roberson return (self); 10857b8bfa0dSJeff Roberson } 10867b8bfa0dSJeff Roberson /* 10877b8bfa0dSJeff Roberson * Now search for the cpu running the lowest priority thread with 10887b8bfa0dSJeff Roberson * the least load. 10897b8bfa0dSJeff Roberson */ 1090ae7a6b38SJeff Roberson if (pick_pri) 1091ae7a6b38SJeff Roberson cpu = tdq_lowestpri(); 1092ae7a6b38SJeff Roberson else 1093ae7a6b38SJeff Roberson cpu = tdq_lowestload(); 1094ae7a6b38SJeff Roberson return (cpu); 109580f86c9fSJeff Roberson } 109680f86c9fSJeff Roberson 109722bf7d9aSJeff Roberson #endif /* SMP */ 109822bf7d9aSJeff Roberson 109922bf7d9aSJeff Roberson /* 110022bf7d9aSJeff Roberson * Pick the highest priority task we have and return it. 11010c0a98b2SJeff Roberson */ 1102ad1e7d28SJulian Elischer static struct td_sched * 1103ad1e7d28SJulian Elischer tdq_choose(struct tdq *tdq) 11045d7ef00cSJeff Roberson { 1105ad1e7d28SJulian Elischer struct td_sched *ts; 11065d7ef00cSJeff Roberson 1107ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 1108e7d50326SJeff Roberson ts = runq_choose(&tdq->tdq_realtime); 1109dda713dfSJeff Roberson if (ts != NULL) 1110e7d50326SJeff Roberson return (ts); 11113f872f85SJeff Roberson ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx); 1112e7d50326SJeff Roberson if (ts != NULL) { 1113dda713dfSJeff Roberson KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE, 1114e7d50326SJeff Roberson ("tdq_choose: Invalid priority on timeshare queue %d", 1115e7d50326SJeff Roberson ts->ts_thread->td_priority)); 1116ad1e7d28SJulian Elischer return (ts); 111715dc847eSJeff Roberson } 111815dc847eSJeff Roberson 1119e7d50326SJeff Roberson ts = runq_choose(&tdq->tdq_idle); 1120e7d50326SJeff Roberson if (ts != NULL) { 1121e7d50326SJeff Roberson KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE, 1122e7d50326SJeff Roberson ("tdq_choose: Invalid priority on idle queue %d", 1123e7d50326SJeff Roberson ts->ts_thread->td_priority)); 1124e7d50326SJeff Roberson return (ts); 1125e7d50326SJeff Roberson } 1126e7d50326SJeff Roberson 1127e7d50326SJeff Roberson return (NULL); 1128245f3abfSJeff Roberson } 11290a016a05SJeff Roberson 1130ae7a6b38SJeff Roberson /* 1131ae7a6b38SJeff Roberson * Initialize a thread queue. 1132ae7a6b38SJeff Roberson */ 11330a016a05SJeff Roberson static void 1134ad1e7d28SJulian Elischer tdq_setup(struct tdq *tdq) 11350a016a05SJeff Roberson { 1136ae7a6b38SJeff Roberson 1137c47f202bSJeff Roberson if (bootverbose) 1138c47f202bSJeff Roberson printf("ULE: setup cpu %d\n", TDQ_ID(tdq)); 1139e7d50326SJeff Roberson runq_init(&tdq->tdq_realtime); 1140e7d50326SJeff Roberson runq_init(&tdq->tdq_timeshare); 1141d2ad694cSJeff Roberson runq_init(&tdq->tdq_idle); 1142d2ad694cSJeff Roberson tdq->tdq_load = 0; 11430a016a05SJeff Roberson } 11440a016a05SJeff Roberson 1145c47f202bSJeff Roberson #ifdef SMP 1146c47f202bSJeff Roberson static void 1147c47f202bSJeff Roberson tdg_setup(struct tdq_group *tdg) 1148c47f202bSJeff Roberson { 1149c47f202bSJeff Roberson if (bootverbose) 1150c47f202bSJeff Roberson printf("ULE: setup cpu group %d\n", TDG_ID(tdg)); 1151c47f202bSJeff Roberson snprintf(tdg->tdg_name, sizeof(tdg->tdg_name), 1152c47f202bSJeff Roberson "sched lock %d", (int)TDG_ID(tdg)); 1153c47f202bSJeff Roberson mtx_init(&tdg->tdg_lock, tdg->tdg_name, "sched lock", 1154c47f202bSJeff Roberson MTX_SPIN | MTX_RECURSE); 1155c47f202bSJeff Roberson LIST_INIT(&tdg->tdg_members); 1156c47f202bSJeff Roberson tdg->tdg_load = 0; 1157c47f202bSJeff Roberson tdg->tdg_transferable = 0; 1158c47f202bSJeff Roberson tdg->tdg_cpus = 0; 1159c47f202bSJeff Roberson tdg->tdg_mask = 0; 1160c47f202bSJeff Roberson tdg->tdg_cpumask = 0; 1161c47f202bSJeff Roberson tdg->tdg_idlemask = 0; 1162c47f202bSJeff Roberson } 1163c47f202bSJeff Roberson 1164c47f202bSJeff Roberson static void 1165c47f202bSJeff Roberson tdg_add(struct tdq_group *tdg, struct tdq *tdq) 1166c47f202bSJeff Roberson { 1167c47f202bSJeff Roberson if (tdg->tdg_mask == 0) 1168c47f202bSJeff Roberson tdg->tdg_mask |= 1 << TDQ_ID(tdq); 1169c47f202bSJeff Roberson tdg->tdg_cpumask |= 1 << TDQ_ID(tdq); 1170c47f202bSJeff Roberson tdg->tdg_cpus++; 1171c47f202bSJeff Roberson tdq->tdq_group = tdg; 1172c47f202bSJeff Roberson tdq->tdq_lock = &tdg->tdg_lock; 1173c47f202bSJeff Roberson LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings); 1174c47f202bSJeff Roberson if (bootverbose) 1175c47f202bSJeff Roberson printf("ULE: adding cpu %d to group %d: cpus %d mask 0x%X\n", 1176c47f202bSJeff Roberson TDQ_ID(tdq), TDG_ID(tdg), tdg->tdg_cpus, tdg->tdg_cpumask); 1177c47f202bSJeff Roberson } 1178c47f202bSJeff Roberson 1179c47f202bSJeff Roberson static void 1180c47f202bSJeff Roberson sched_setup_topology(void) 1181c47f202bSJeff Roberson { 1182c47f202bSJeff Roberson struct tdq_group *tdg; 1183c47f202bSJeff Roberson struct cpu_group *cg; 1184c47f202bSJeff Roberson int balance_groups; 1185c47f202bSJeff Roberson struct tdq *tdq; 1186c47f202bSJeff Roberson int i; 1187c47f202bSJeff Roberson int j; 1188c47f202bSJeff Roberson 1189c47f202bSJeff Roberson topology = 1; 1190c47f202bSJeff Roberson balance_groups = 0; 1191c47f202bSJeff Roberson for (i = 0; i < smp_topology->ct_count; i++) { 1192c47f202bSJeff Roberson cg = &smp_topology->ct_group[i]; 1193c47f202bSJeff Roberson tdg = &tdq_groups[i]; 1194c47f202bSJeff Roberson /* 1195c47f202bSJeff Roberson * Initialize the group. 1196c47f202bSJeff Roberson */ 1197c47f202bSJeff Roberson tdg_setup(tdg); 1198c47f202bSJeff Roberson /* 1199c47f202bSJeff Roberson * Find all of the group members and add them. 1200c47f202bSJeff Roberson */ 1201c47f202bSJeff Roberson for (j = 0; j < MAXCPU; j++) { 1202c47f202bSJeff Roberson if ((cg->cg_mask & (1 << j)) != 0) { 1203c47f202bSJeff Roberson tdq = TDQ_CPU(j); 1204c47f202bSJeff Roberson tdq_setup(tdq); 1205c47f202bSJeff Roberson tdg_add(tdg, tdq); 1206c47f202bSJeff Roberson } 1207c47f202bSJeff Roberson } 1208c47f202bSJeff Roberson if (tdg->tdg_cpus > 1) 1209c47f202bSJeff Roberson balance_groups = 1; 1210c47f202bSJeff Roberson } 1211c47f202bSJeff Roberson tdg_maxid = smp_topology->ct_count - 1; 1212c47f202bSJeff Roberson if (balance_groups) 1213c47f202bSJeff Roberson sched_balance_groups(NULL); 1214c47f202bSJeff Roberson } 1215c47f202bSJeff Roberson 1216c47f202bSJeff Roberson static void 1217c47f202bSJeff Roberson sched_setup_smp(void) 1218c47f202bSJeff Roberson { 1219c47f202bSJeff Roberson struct tdq_group *tdg; 1220c47f202bSJeff Roberson struct tdq *tdq; 1221c47f202bSJeff Roberson int cpus; 1222c47f202bSJeff Roberson int i; 1223c47f202bSJeff Roberson 1224c47f202bSJeff Roberson for (cpus = 0, i = 0; i < MAXCPU; i++) { 1225c47f202bSJeff Roberson if (CPU_ABSENT(i)) 1226c47f202bSJeff Roberson continue; 1227c47f202bSJeff Roberson tdq = &tdq_cpu[i]; 1228c47f202bSJeff Roberson tdg = &tdq_groups[i]; 1229c47f202bSJeff Roberson /* 1230c47f202bSJeff Roberson * Setup a tdq group with one member. 1231c47f202bSJeff Roberson */ 1232c47f202bSJeff Roberson tdg_setup(tdg); 1233c47f202bSJeff Roberson tdq_setup(tdq); 1234c47f202bSJeff Roberson tdg_add(tdg, tdq); 1235c47f202bSJeff Roberson cpus++; 1236c47f202bSJeff Roberson } 1237c47f202bSJeff Roberson tdg_maxid = cpus - 1; 1238c47f202bSJeff Roberson } 1239c47f202bSJeff Roberson 1240c47f202bSJeff Roberson /* 1241c47f202bSJeff Roberson * Fake a topology with one group containing all CPUs. 1242c47f202bSJeff Roberson */ 1243c47f202bSJeff Roberson static void 1244c47f202bSJeff Roberson sched_fake_topo(void) 1245c47f202bSJeff Roberson { 1246c47f202bSJeff Roberson #ifdef SCHED_FAKE_TOPOLOGY 1247c47f202bSJeff Roberson static struct cpu_top top; 1248c47f202bSJeff Roberson static struct cpu_group group; 1249c47f202bSJeff Roberson 1250c47f202bSJeff Roberson top.ct_count = 1; 1251c47f202bSJeff Roberson top.ct_group = &group; 1252c47f202bSJeff Roberson group.cg_mask = all_cpus; 1253c47f202bSJeff Roberson group.cg_count = mp_ncpus; 1254c47f202bSJeff Roberson group.cg_children = 0; 1255c47f202bSJeff Roberson smp_topology = ⊤ 1256c47f202bSJeff Roberson #endif 1257c47f202bSJeff Roberson } 1258c47f202bSJeff Roberson #endif 1259c47f202bSJeff Roberson 1260ae7a6b38SJeff Roberson /* 1261ae7a6b38SJeff Roberson * Setup the thread queues and initialize the topology based on MD 1262ae7a6b38SJeff Roberson * information. 1263ae7a6b38SJeff Roberson */ 126435e6168fSJeff Roberson static void 126535e6168fSJeff Roberson sched_setup(void *dummy) 126635e6168fSJeff Roberson { 1267ae7a6b38SJeff Roberson struct tdq *tdq; 1268c47f202bSJeff Roberson 1269c47f202bSJeff Roberson tdq = TDQ_SELF(); 12700ec896fdSJeff Roberson #ifdef SMP 1271cac77d04SJeff Roberson /* 1272ae7a6b38SJeff Roberson * Initialize long-term cpu balancing algorithm. 1273cac77d04SJeff Roberson */ 1274ae7a6b38SJeff Roberson callout_init(&balco, CALLOUT_MPSAFE); 1275ae7a6b38SJeff Roberson callout_init(&gbalco, CALLOUT_MPSAFE); 1276c47f202bSJeff Roberson sched_fake_topo(); 1277c47f202bSJeff Roberson /* 1278c47f202bSJeff Roberson * Setup tdqs based on a topology configuration or vanilla SMP based 1279c47f202bSJeff Roberson * on mp_maxid. 1280c47f202bSJeff Roberson */ 1281c47f202bSJeff Roberson if (smp_topology == NULL) 1282c47f202bSJeff Roberson sched_setup_smp(); 1283c47f202bSJeff Roberson else 1284c47f202bSJeff Roberson sched_setup_topology(); 1285ae7a6b38SJeff Roberson sched_balance(NULL); 1286749d01b0SJeff Roberson #else 1287c47f202bSJeff Roberson tdq_setup(tdq); 1288c47f202bSJeff Roberson mtx_init(&tdq_lock, "sched lock", "sched lock", MTX_SPIN | MTX_RECURSE); 1289c47f202bSJeff Roberson tdq->tdq_lock = &tdq_lock; 1290356500a3SJeff Roberson #endif 1291ae7a6b38SJeff Roberson /* 1292ae7a6b38SJeff Roberson * To avoid divide-by-zero, we set realstathz a dummy value 1293ae7a6b38SJeff Roberson * in case which sched_clock() called before sched_initticks(). 1294ae7a6b38SJeff Roberson */ 1295ae7a6b38SJeff Roberson realstathz = hz; 1296ae7a6b38SJeff Roberson sched_slice = (realstathz/10); /* ~100ms */ 1297ae7a6b38SJeff Roberson tickincr = 1 << SCHED_TICK_SHIFT; 1298ae7a6b38SJeff Roberson 1299ae7a6b38SJeff Roberson /* Add thread0's load since it's running. */ 1300ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1301c47f202bSJeff Roberson thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF()); 1302ae7a6b38SJeff Roberson tdq_load_add(tdq, &td_sched0); 1303ae7a6b38SJeff Roberson TDQ_UNLOCK(tdq); 130435e6168fSJeff Roberson } 130535e6168fSJeff Roberson 1306ae7a6b38SJeff Roberson /* 1307ae7a6b38SJeff Roberson * This routine determines the tickincr after stathz and hz are setup. 1308ae7a6b38SJeff Roberson */ 1309a1d4fe69SDavid Xu /* ARGSUSED */ 1310a1d4fe69SDavid Xu static void 1311a1d4fe69SDavid Xu sched_initticks(void *dummy) 1312a1d4fe69SDavid Xu { 1313ae7a6b38SJeff Roberson int incr; 1314ae7a6b38SJeff Roberson 1315a1d4fe69SDavid Xu realstathz = stathz ? stathz : hz; 131614618990SJeff Roberson sched_slice = (realstathz/10); /* ~100ms */ 1317a1d4fe69SDavid Xu 1318a1d4fe69SDavid Xu /* 1319e7d50326SJeff Roberson * tickincr is shifted out by 10 to avoid rounding errors due to 13203f872f85SJeff Roberson * hz not being evenly divisible by stathz on all platforms. 1321e7d50326SJeff Roberson */ 1322ae7a6b38SJeff Roberson incr = (hz << SCHED_TICK_SHIFT) / realstathz; 1323e7d50326SJeff Roberson /* 1324e7d50326SJeff Roberson * This does not work for values of stathz that are more than 1325e7d50326SJeff Roberson * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen. 1326a1d4fe69SDavid Xu */ 1327ae7a6b38SJeff Roberson if (incr == 0) 1328ae7a6b38SJeff Roberson incr = 1; 1329ae7a6b38SJeff Roberson tickincr = incr; 13307b8bfa0dSJeff Roberson #ifdef SMP 13319862717aSJeff Roberson /* 13329862717aSJeff Roberson * Set steal thresh to log2(mp_ncpu) but no greater than 4. This 13339862717aSJeff Roberson * prevents excess thrashing on large machines and excess idle on 13349862717aSJeff Roberson * smaller machines. 13359862717aSJeff Roberson */ 13369862717aSJeff Roberson steal_thresh = min(ffs(mp_ncpus) - 1, 4); 13377b8bfa0dSJeff Roberson affinity = SCHED_AFFINITY_DEFAULT; 13387b8bfa0dSJeff Roberson #endif 1339a1d4fe69SDavid Xu } 1340a1d4fe69SDavid Xu 1341a1d4fe69SDavid Xu 134235e6168fSJeff Roberson /* 1343ae7a6b38SJeff Roberson * This is the core of the interactivity algorithm. Determines a score based 1344ae7a6b38SJeff Roberson * on past behavior. It is the ratio of sleep time to run time scaled to 1345ae7a6b38SJeff Roberson * a [0, 100] integer. This is the voluntary sleep time of a process, which 1346ae7a6b38SJeff Roberson * differs from the cpu usage because it does not account for time spent 1347ae7a6b38SJeff Roberson * waiting on a run-queue. Would be prettier if we had floating point. 1348ae7a6b38SJeff Roberson */ 1349ae7a6b38SJeff Roberson static int 1350ae7a6b38SJeff Roberson sched_interact_score(struct thread *td) 1351ae7a6b38SJeff Roberson { 1352ae7a6b38SJeff Roberson struct td_sched *ts; 1353ae7a6b38SJeff Roberson int div; 1354ae7a6b38SJeff Roberson 1355ae7a6b38SJeff Roberson ts = td->td_sched; 1356ae7a6b38SJeff Roberson /* 1357ae7a6b38SJeff Roberson * The score is only needed if this is likely to be an interactive 1358ae7a6b38SJeff Roberson * task. Don't go through the expense of computing it if there's 1359ae7a6b38SJeff Roberson * no chance. 1360ae7a6b38SJeff Roberson */ 1361ae7a6b38SJeff Roberson if (sched_interact <= SCHED_INTERACT_HALF && 1362ae7a6b38SJeff Roberson ts->ts_runtime >= ts->ts_slptime) 1363ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF); 1364ae7a6b38SJeff Roberson 1365ae7a6b38SJeff Roberson if (ts->ts_runtime > ts->ts_slptime) { 1366ae7a6b38SJeff Roberson div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF); 1367ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF + 1368ae7a6b38SJeff Roberson (SCHED_INTERACT_HALF - (ts->ts_slptime / div))); 1369ae7a6b38SJeff Roberson } 1370ae7a6b38SJeff Roberson if (ts->ts_slptime > ts->ts_runtime) { 1371ae7a6b38SJeff Roberson div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF); 1372ae7a6b38SJeff Roberson return (ts->ts_runtime / div); 1373ae7a6b38SJeff Roberson } 1374ae7a6b38SJeff Roberson /* runtime == slptime */ 1375ae7a6b38SJeff Roberson if (ts->ts_runtime) 1376ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF); 1377ae7a6b38SJeff Roberson 1378ae7a6b38SJeff Roberson /* 1379ae7a6b38SJeff Roberson * This can happen if slptime and runtime are 0. 1380ae7a6b38SJeff Roberson */ 1381ae7a6b38SJeff Roberson return (0); 1382ae7a6b38SJeff Roberson 1383ae7a6b38SJeff Roberson } 1384ae7a6b38SJeff Roberson 1385ae7a6b38SJeff Roberson /* 138635e6168fSJeff Roberson * Scale the scheduling priority according to the "interactivity" of this 138735e6168fSJeff Roberson * process. 138835e6168fSJeff Roberson */ 138915dc847eSJeff Roberson static void 13908460a577SJohn Birrell sched_priority(struct thread *td) 139135e6168fSJeff Roberson { 1392e7d50326SJeff Roberson int score; 139335e6168fSJeff Roberson int pri; 139435e6168fSJeff Roberson 13958460a577SJohn Birrell if (td->td_pri_class != PRI_TIMESHARE) 139615dc847eSJeff Roberson return; 1397e7d50326SJeff Roberson /* 1398e7d50326SJeff Roberson * If the score is interactive we place the thread in the realtime 1399e7d50326SJeff Roberson * queue with a priority that is less than kernel and interrupt 1400e7d50326SJeff Roberson * priorities. These threads are not subject to nice restrictions. 1401e7d50326SJeff Roberson * 1402ae7a6b38SJeff Roberson * Scores greater than this are placed on the normal timeshare queue 1403e7d50326SJeff Roberson * where the priority is partially decided by the most recent cpu 1404e7d50326SJeff Roberson * utilization and the rest is decided by nice value. 1405e7d50326SJeff Roberson */ 1406e7d50326SJeff Roberson score = sched_interact_score(td); 1407e7d50326SJeff Roberson if (score < sched_interact) { 1408e7d50326SJeff Roberson pri = PRI_MIN_REALTIME; 1409e7d50326SJeff Roberson pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact) 1410e7d50326SJeff Roberson * score; 1411e7d50326SJeff Roberson KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME, 14129a93305aSJeff Roberson ("sched_priority: invalid interactive priority %d score %d", 14139a93305aSJeff Roberson pri, score)); 1414e7d50326SJeff Roberson } else { 1415e7d50326SJeff Roberson pri = SCHED_PRI_MIN; 1416e7d50326SJeff Roberson if (td->td_sched->ts_ticks) 1417e7d50326SJeff Roberson pri += SCHED_PRI_TICKS(td->td_sched); 1418e7d50326SJeff Roberson pri += SCHED_PRI_NICE(td->td_proc->p_nice); 1419ae7a6b38SJeff Roberson KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE, 1420ae7a6b38SJeff Roberson ("sched_priority: invalid priority %d: nice %d, " 1421ae7a6b38SJeff Roberson "ticks %d ftick %d ltick %d tick pri %d", 1422ae7a6b38SJeff Roberson pri, td->td_proc->p_nice, td->td_sched->ts_ticks, 1423ae7a6b38SJeff Roberson td->td_sched->ts_ftick, td->td_sched->ts_ltick, 1424ae7a6b38SJeff Roberson SCHED_PRI_TICKS(td->td_sched))); 1425e7d50326SJeff Roberson } 14268460a577SJohn Birrell sched_user_prio(td, pri); 142735e6168fSJeff Roberson 142815dc847eSJeff Roberson return; 142935e6168fSJeff Roberson } 143035e6168fSJeff Roberson 143135e6168fSJeff Roberson /* 1432d322132cSJeff Roberson * This routine enforces a maximum limit on the amount of scheduling history 1433ae7a6b38SJeff Roberson * kept. It is called after either the slptime or runtime is adjusted. This 1434ae7a6b38SJeff Roberson * function is ugly due to integer math. 1435d322132cSJeff Roberson */ 14364b60e324SJeff Roberson static void 14378460a577SJohn Birrell sched_interact_update(struct thread *td) 14384b60e324SJeff Roberson { 1439155b6ca1SJeff Roberson struct td_sched *ts; 14409a93305aSJeff Roberson u_int sum; 14413f741ca1SJeff Roberson 1442155b6ca1SJeff Roberson ts = td->td_sched; 1443ae7a6b38SJeff Roberson sum = ts->ts_runtime + ts->ts_slptime; 1444d322132cSJeff Roberson if (sum < SCHED_SLP_RUN_MAX) 1445d322132cSJeff Roberson return; 1446d322132cSJeff Roberson /* 1447155b6ca1SJeff Roberson * This only happens from two places: 1448155b6ca1SJeff Roberson * 1) We have added an unusual amount of run time from fork_exit. 1449155b6ca1SJeff Roberson * 2) We have added an unusual amount of sleep time from sched_sleep(). 1450155b6ca1SJeff Roberson */ 1451155b6ca1SJeff Roberson if (sum > SCHED_SLP_RUN_MAX * 2) { 1452ae7a6b38SJeff Roberson if (ts->ts_runtime > ts->ts_slptime) { 1453ae7a6b38SJeff Roberson ts->ts_runtime = SCHED_SLP_RUN_MAX; 1454ae7a6b38SJeff Roberson ts->ts_slptime = 1; 1455155b6ca1SJeff Roberson } else { 1456ae7a6b38SJeff Roberson ts->ts_slptime = SCHED_SLP_RUN_MAX; 1457ae7a6b38SJeff Roberson ts->ts_runtime = 1; 1458155b6ca1SJeff Roberson } 1459155b6ca1SJeff Roberson return; 1460155b6ca1SJeff Roberson } 1461155b6ca1SJeff Roberson /* 1462d322132cSJeff Roberson * If we have exceeded by more than 1/5th then the algorithm below 1463d322132cSJeff Roberson * will not bring us back into range. Dividing by two here forces 14642454aaf5SJeff Roberson * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1465d322132cSJeff Roberson */ 146637a35e4aSJeff Roberson if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1467ae7a6b38SJeff Roberson ts->ts_runtime /= 2; 1468ae7a6b38SJeff Roberson ts->ts_slptime /= 2; 1469d322132cSJeff Roberson return; 1470d322132cSJeff Roberson } 1471ae7a6b38SJeff Roberson ts->ts_runtime = (ts->ts_runtime / 5) * 4; 1472ae7a6b38SJeff Roberson ts->ts_slptime = (ts->ts_slptime / 5) * 4; 1473d322132cSJeff Roberson } 1474d322132cSJeff Roberson 1475ae7a6b38SJeff Roberson /* 1476ae7a6b38SJeff Roberson * Scale back the interactivity history when a child thread is created. The 1477ae7a6b38SJeff Roberson * history is inherited from the parent but the thread may behave totally 1478ae7a6b38SJeff Roberson * differently. For example, a shell spawning a compiler process. We want 1479ae7a6b38SJeff Roberson * to learn that the compiler is behaving badly very quickly. 1480ae7a6b38SJeff Roberson */ 1481d322132cSJeff Roberson static void 14828460a577SJohn Birrell sched_interact_fork(struct thread *td) 1483d322132cSJeff Roberson { 1484d322132cSJeff Roberson int ratio; 1485d322132cSJeff Roberson int sum; 1486d322132cSJeff Roberson 1487ae7a6b38SJeff Roberson sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime; 1488d322132cSJeff Roberson if (sum > SCHED_SLP_RUN_FORK) { 1489d322132cSJeff Roberson ratio = sum / SCHED_SLP_RUN_FORK; 1490ae7a6b38SJeff Roberson td->td_sched->ts_runtime /= ratio; 1491ae7a6b38SJeff Roberson td->td_sched->ts_slptime /= ratio; 14924b60e324SJeff Roberson } 14934b60e324SJeff Roberson } 14944b60e324SJeff Roberson 149515dc847eSJeff Roberson /* 1496ae7a6b38SJeff Roberson * Called from proc0_init() to setup the scheduler fields. 1497ed062c8dSJulian Elischer */ 1498ed062c8dSJulian Elischer void 1499ed062c8dSJulian Elischer schedinit(void) 1500ed062c8dSJulian Elischer { 1501e7d50326SJeff Roberson 1502ed062c8dSJulian Elischer /* 1503ed062c8dSJulian Elischer * Set up the scheduler specific parts of proc0. 1504ed062c8dSJulian Elischer */ 1505ed062c8dSJulian Elischer proc0.p_sched = NULL; /* XXX */ 1506ad1e7d28SJulian Elischer thread0.td_sched = &td_sched0; 1507e7d50326SJeff Roberson td_sched0.ts_ltick = ticks; 15088ab80cf0SJeff Roberson td_sched0.ts_ftick = ticks; 1509ad1e7d28SJulian Elischer td_sched0.ts_thread = &thread0; 1510ed062c8dSJulian Elischer } 1511ed062c8dSJulian Elischer 1512ed062c8dSJulian Elischer /* 151315dc847eSJeff Roberson * This is only somewhat accurate since given many processes of the same 151415dc847eSJeff Roberson * priority they will switch when their slices run out, which will be 1515e7d50326SJeff Roberson * at most sched_slice stathz ticks. 151615dc847eSJeff Roberson */ 151735e6168fSJeff Roberson int 151835e6168fSJeff Roberson sched_rr_interval(void) 151935e6168fSJeff Roberson { 1520e7d50326SJeff Roberson 1521e7d50326SJeff Roberson /* Convert sched_slice to hz */ 1522e7d50326SJeff Roberson return (hz/(realstathz/sched_slice)); 152335e6168fSJeff Roberson } 152435e6168fSJeff Roberson 1525ae7a6b38SJeff Roberson /* 1526ae7a6b38SJeff Roberson * Update the percent cpu tracking information when it is requested or 1527ae7a6b38SJeff Roberson * the total history exceeds the maximum. We keep a sliding history of 1528ae7a6b38SJeff Roberson * tick counts that slowly decays. This is less precise than the 4BSD 1529ae7a6b38SJeff Roberson * mechanism since it happens with less regular and frequent events. 1530ae7a6b38SJeff Roberson */ 153122bf7d9aSJeff Roberson static void 1532ad1e7d28SJulian Elischer sched_pctcpu_update(struct td_sched *ts) 153335e6168fSJeff Roberson { 1534e7d50326SJeff Roberson 1535e7d50326SJeff Roberson if (ts->ts_ticks == 0) 1536e7d50326SJeff Roberson return; 15378ab80cf0SJeff Roberson if (ticks - (hz / 10) < ts->ts_ltick && 15388ab80cf0SJeff Roberson SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX) 15398ab80cf0SJeff Roberson return; 154035e6168fSJeff Roberson /* 154135e6168fSJeff Roberson * Adjust counters and watermark for pctcpu calc. 1542210491d3SJeff Roberson */ 1543e7d50326SJeff Roberson if (ts->ts_ltick > ticks - SCHED_TICK_TARG) 1544ad1e7d28SJulian Elischer ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) * 1545e7d50326SJeff Roberson SCHED_TICK_TARG; 1546e7d50326SJeff Roberson else 1547ad1e7d28SJulian Elischer ts->ts_ticks = 0; 1548ad1e7d28SJulian Elischer ts->ts_ltick = ticks; 1549e7d50326SJeff Roberson ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG; 155035e6168fSJeff Roberson } 155135e6168fSJeff Roberson 1552ae7a6b38SJeff Roberson /* 1553ae7a6b38SJeff Roberson * Adjust the priority of a thread. Move it to the appropriate run-queue 1554ae7a6b38SJeff Roberson * if necessary. This is the back-end for several priority related 1555ae7a6b38SJeff Roberson * functions. 1556ae7a6b38SJeff Roberson */ 1557e7d50326SJeff Roberson static void 1558f5c157d9SJohn Baldwin sched_thread_priority(struct thread *td, u_char prio) 155935e6168fSJeff Roberson { 1560ad1e7d28SJulian Elischer struct td_sched *ts; 156135e6168fSJeff Roberson 156281d47d3fSJeff Roberson CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", 156381d47d3fSJeff Roberson td, td->td_proc->p_comm, td->td_priority, prio, curthread, 156481d47d3fSJeff Roberson curthread->td_proc->p_comm); 1565ad1e7d28SJulian Elischer ts = td->td_sched; 15667b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1567f5c157d9SJohn Baldwin if (td->td_priority == prio) 1568f5c157d9SJohn Baldwin return; 1569e7d50326SJeff Roberson 15703f872f85SJeff Roberson if (TD_ON_RUNQ(td) && prio < td->td_priority) { 15713f741ca1SJeff Roberson /* 15723f741ca1SJeff Roberson * If the priority has been elevated due to priority 15733f741ca1SJeff Roberson * propagation, we may have to move ourselves to a new 1574e7d50326SJeff Roberson * queue. This could be optimized to not re-add in some 1575e7d50326SJeff Roberson * cases. 1576f2b74cbfSJeff Roberson */ 1577e7d50326SJeff Roberson sched_rem(td); 1578e7d50326SJeff Roberson td->td_priority = prio; 1579ae7a6b38SJeff Roberson sched_add(td, SRQ_BORROWING); 1580ae7a6b38SJeff Roberson } else { 1581ae7a6b38SJeff Roberson #ifdef SMP 1582ae7a6b38SJeff Roberson struct tdq *tdq; 1583ae7a6b38SJeff Roberson 1584ae7a6b38SJeff Roberson tdq = TDQ_CPU(ts->ts_cpu); 1585ae7a6b38SJeff Roberson if (prio < tdq->tdq_lowpri) 1586ae7a6b38SJeff Roberson tdq->tdq_lowpri = prio; 1587ae7a6b38SJeff Roberson #endif 15883f741ca1SJeff Roberson td->td_priority = prio; 158935e6168fSJeff Roberson } 1590ae7a6b38SJeff Roberson } 159135e6168fSJeff Roberson 1592f5c157d9SJohn Baldwin /* 1593f5c157d9SJohn Baldwin * Update a thread's priority when it is lent another thread's 1594f5c157d9SJohn Baldwin * priority. 1595f5c157d9SJohn Baldwin */ 1596f5c157d9SJohn Baldwin void 1597f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio) 1598f5c157d9SJohn Baldwin { 1599f5c157d9SJohn Baldwin 1600f5c157d9SJohn Baldwin td->td_flags |= TDF_BORROWING; 1601f5c157d9SJohn Baldwin sched_thread_priority(td, prio); 1602f5c157d9SJohn Baldwin } 1603f5c157d9SJohn Baldwin 1604f5c157d9SJohn Baldwin /* 1605f5c157d9SJohn Baldwin * Restore a thread's priority when priority propagation is 1606f5c157d9SJohn Baldwin * over. The prio argument is the minimum priority the thread 1607f5c157d9SJohn Baldwin * needs to have to satisfy other possible priority lending 1608f5c157d9SJohn Baldwin * requests. If the thread's regular priority is less 1609f5c157d9SJohn Baldwin * important than prio, the thread will keep a priority boost 1610f5c157d9SJohn Baldwin * of prio. 1611f5c157d9SJohn Baldwin */ 1612f5c157d9SJohn Baldwin void 1613f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio) 1614f5c157d9SJohn Baldwin { 1615f5c157d9SJohn Baldwin u_char base_pri; 1616f5c157d9SJohn Baldwin 1617f5c157d9SJohn Baldwin if (td->td_base_pri >= PRI_MIN_TIMESHARE && 1618f5c157d9SJohn Baldwin td->td_base_pri <= PRI_MAX_TIMESHARE) 16198460a577SJohn Birrell base_pri = td->td_user_pri; 1620f5c157d9SJohn Baldwin else 1621f5c157d9SJohn Baldwin base_pri = td->td_base_pri; 1622f5c157d9SJohn Baldwin if (prio >= base_pri) { 1623f5c157d9SJohn Baldwin td->td_flags &= ~TDF_BORROWING; 1624f5c157d9SJohn Baldwin sched_thread_priority(td, base_pri); 1625f5c157d9SJohn Baldwin } else 1626f5c157d9SJohn Baldwin sched_lend_prio(td, prio); 1627f5c157d9SJohn Baldwin } 1628f5c157d9SJohn Baldwin 1629ae7a6b38SJeff Roberson /* 1630ae7a6b38SJeff Roberson * Standard entry for setting the priority to an absolute value. 1631ae7a6b38SJeff Roberson */ 1632f5c157d9SJohn Baldwin void 1633f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio) 1634f5c157d9SJohn Baldwin { 1635f5c157d9SJohn Baldwin u_char oldprio; 1636f5c157d9SJohn Baldwin 1637f5c157d9SJohn Baldwin /* First, update the base priority. */ 1638f5c157d9SJohn Baldwin td->td_base_pri = prio; 1639f5c157d9SJohn Baldwin 1640f5c157d9SJohn Baldwin /* 164150aaa791SJohn Baldwin * If the thread is borrowing another thread's priority, don't 1642f5c157d9SJohn Baldwin * ever lower the priority. 1643f5c157d9SJohn Baldwin */ 1644f5c157d9SJohn Baldwin if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 1645f5c157d9SJohn Baldwin return; 1646f5c157d9SJohn Baldwin 1647f5c157d9SJohn Baldwin /* Change the real priority. */ 1648f5c157d9SJohn Baldwin oldprio = td->td_priority; 1649f5c157d9SJohn Baldwin sched_thread_priority(td, prio); 1650f5c157d9SJohn Baldwin 1651f5c157d9SJohn Baldwin /* 1652f5c157d9SJohn Baldwin * If the thread is on a turnstile, then let the turnstile update 1653f5c157d9SJohn Baldwin * its state. 1654f5c157d9SJohn Baldwin */ 1655f5c157d9SJohn Baldwin if (TD_ON_LOCK(td) && oldprio != prio) 1656f5c157d9SJohn Baldwin turnstile_adjust(td, oldprio); 1657f5c157d9SJohn Baldwin } 1658f5c157d9SJohn Baldwin 1659ae7a6b38SJeff Roberson /* 1660ae7a6b38SJeff Roberson * Set the base user priority, does not effect current running priority. 1661ae7a6b38SJeff Roberson */ 166235e6168fSJeff Roberson void 16638460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio) 16643db720fdSDavid Xu { 16653db720fdSDavid Xu u_char oldprio; 16663db720fdSDavid Xu 16678460a577SJohn Birrell td->td_base_user_pri = prio; 1668fc6c30f6SJulian Elischer if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio) 1669fc6c30f6SJulian Elischer return; 16708460a577SJohn Birrell oldprio = td->td_user_pri; 16718460a577SJohn Birrell td->td_user_pri = prio; 16723db720fdSDavid Xu 16733db720fdSDavid Xu if (TD_ON_UPILOCK(td) && oldprio != prio) 16743db720fdSDavid Xu umtx_pi_adjust(td, oldprio); 16753db720fdSDavid Xu } 16763db720fdSDavid Xu 16773db720fdSDavid Xu void 16783db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio) 16793db720fdSDavid Xu { 16803db720fdSDavid Xu u_char oldprio; 16813db720fdSDavid Xu 16823db720fdSDavid Xu td->td_flags |= TDF_UBORROWING; 16833db720fdSDavid Xu 1684f645b5daSMaxim Konovalov oldprio = td->td_user_pri; 16858460a577SJohn Birrell td->td_user_pri = prio; 16863db720fdSDavid Xu 16873db720fdSDavid Xu if (TD_ON_UPILOCK(td) && oldprio != prio) 16883db720fdSDavid Xu umtx_pi_adjust(td, oldprio); 16893db720fdSDavid Xu } 16903db720fdSDavid Xu 16913db720fdSDavid Xu void 16923db720fdSDavid Xu sched_unlend_user_prio(struct thread *td, u_char prio) 16933db720fdSDavid Xu { 16943db720fdSDavid Xu u_char base_pri; 16953db720fdSDavid Xu 16968460a577SJohn Birrell base_pri = td->td_base_user_pri; 16973db720fdSDavid Xu if (prio >= base_pri) { 16983db720fdSDavid Xu td->td_flags &= ~TDF_UBORROWING; 16998460a577SJohn Birrell sched_user_prio(td, base_pri); 17003db720fdSDavid Xu } else 17013db720fdSDavid Xu sched_lend_user_prio(td, prio); 17023db720fdSDavid Xu } 17033db720fdSDavid Xu 1704ae7a6b38SJeff Roberson /* 170508c9a16cSJeff Roberson * Add the thread passed as 'newtd' to the run queue before selecting 170608c9a16cSJeff Roberson * the next thread to run. This is only used for KSE. 170708c9a16cSJeff Roberson */ 170808c9a16cSJeff Roberson static void 170908c9a16cSJeff Roberson sched_switchin(struct tdq *tdq, struct thread *td) 171008c9a16cSJeff Roberson { 171108c9a16cSJeff Roberson #ifdef SMP 171208c9a16cSJeff Roberson spinlock_enter(); 171308c9a16cSJeff Roberson TDQ_UNLOCK(tdq); 171408c9a16cSJeff Roberson thread_lock(td); 171508c9a16cSJeff Roberson spinlock_exit(); 171608c9a16cSJeff Roberson sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING); 171708c9a16cSJeff Roberson #else 171808c9a16cSJeff Roberson td->td_lock = TDQ_LOCKPTR(tdq); 171908c9a16cSJeff Roberson #endif 172008c9a16cSJeff Roberson tdq_add(tdq, td, SRQ_YIELDING); 172108c9a16cSJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 172208c9a16cSJeff Roberson } 172308c9a16cSJeff Roberson 172408c9a16cSJeff Roberson /* 1725c47f202bSJeff Roberson * Handle migration from sched_switch(). This happens only for 1726c47f202bSJeff Roberson * cpu binding. 1727c47f202bSJeff Roberson */ 1728c47f202bSJeff Roberson static struct mtx * 1729c47f202bSJeff Roberson sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags) 1730c47f202bSJeff Roberson { 1731c47f202bSJeff Roberson struct tdq *tdn; 1732c47f202bSJeff Roberson 1733c47f202bSJeff Roberson tdn = TDQ_CPU(td->td_sched->ts_cpu); 1734c47f202bSJeff Roberson #ifdef SMP 1735c47f202bSJeff Roberson /* 1736c47f202bSJeff Roberson * Do the lock dance required to avoid LOR. We grab an extra 1737c47f202bSJeff Roberson * spinlock nesting to prevent preemption while we're 1738c47f202bSJeff Roberson * not holding either run-queue lock. 1739c47f202bSJeff Roberson */ 1740c47f202bSJeff Roberson spinlock_enter(); 1741c47f202bSJeff Roberson thread_block_switch(td); /* This releases the lock on tdq. */ 1742c47f202bSJeff Roberson TDQ_LOCK(tdn); 1743c47f202bSJeff Roberson tdq_add(tdn, td, flags); 1744c47f202bSJeff Roberson tdq_notify(td->td_sched); 1745c47f202bSJeff Roberson /* 1746c47f202bSJeff Roberson * After we unlock tdn the new cpu still can't switch into this 1747c47f202bSJeff Roberson * thread until we've unblocked it in cpu_switch(). The lock 1748c47f202bSJeff Roberson * pointers may match in the case of HTT cores. Don't unlock here 1749c47f202bSJeff Roberson * or we can deadlock when the other CPU runs the IPI handler. 1750c47f202bSJeff Roberson */ 1751c47f202bSJeff Roberson if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) { 1752c47f202bSJeff Roberson TDQ_UNLOCK(tdn); 1753c47f202bSJeff Roberson TDQ_LOCK(tdq); 1754c47f202bSJeff Roberson } 1755c47f202bSJeff Roberson spinlock_exit(); 1756c47f202bSJeff Roberson #endif 1757c47f202bSJeff Roberson return (TDQ_LOCKPTR(tdn)); 1758c47f202bSJeff Roberson } 1759c47f202bSJeff Roberson 1760c47f202bSJeff Roberson /* 1761ae7a6b38SJeff Roberson * Block a thread for switching. Similar to thread_block() but does not 1762ae7a6b38SJeff Roberson * bump the spin count. 1763ae7a6b38SJeff Roberson */ 1764ae7a6b38SJeff Roberson static inline struct mtx * 1765ae7a6b38SJeff Roberson thread_block_switch(struct thread *td) 1766ae7a6b38SJeff Roberson { 1767ae7a6b38SJeff Roberson struct mtx *lock; 1768ae7a6b38SJeff Roberson 1769ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1770ae7a6b38SJeff Roberson lock = td->td_lock; 1771ae7a6b38SJeff Roberson td->td_lock = &blocked_lock; 1772ae7a6b38SJeff Roberson mtx_unlock_spin(lock); 1773ae7a6b38SJeff Roberson 1774ae7a6b38SJeff Roberson return (lock); 1775ae7a6b38SJeff Roberson } 1776ae7a6b38SJeff Roberson 1777ae7a6b38SJeff Roberson /* 1778ae7a6b38SJeff Roberson * Release a thread that was blocked with thread_block_switch(). 1779ae7a6b38SJeff Roberson */ 1780ae7a6b38SJeff Roberson static inline void 1781ae7a6b38SJeff Roberson thread_unblock_switch(struct thread *td, struct mtx *mtx) 1782ae7a6b38SJeff Roberson { 1783ae7a6b38SJeff Roberson atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock, 1784ae7a6b38SJeff Roberson (uintptr_t)mtx); 1785ae7a6b38SJeff Roberson } 1786ae7a6b38SJeff Roberson 1787ae7a6b38SJeff Roberson /* 1788ae7a6b38SJeff Roberson * Switch threads. This function has to handle threads coming in while 1789ae7a6b38SJeff Roberson * blocked for some reason, running, or idle. It also must deal with 1790ae7a6b38SJeff Roberson * migrating a thread from one queue to another as running threads may 1791ae7a6b38SJeff Roberson * be assigned elsewhere via binding. 1792ae7a6b38SJeff Roberson */ 17933db720fdSDavid Xu void 17943389af30SJulian Elischer sched_switch(struct thread *td, struct thread *newtd, int flags) 179535e6168fSJeff Roberson { 1796c02bbb43SJeff Roberson struct tdq *tdq; 1797ad1e7d28SJulian Elischer struct td_sched *ts; 1798ae7a6b38SJeff Roberson struct mtx *mtx; 1799c47f202bSJeff Roberson int srqflag; 1800ae7a6b38SJeff Roberson int cpuid; 180135e6168fSJeff Roberson 18027b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 180335e6168fSJeff Roberson 1804ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 1805ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpuid); 1806e7d50326SJeff Roberson ts = td->td_sched; 1807c47f202bSJeff Roberson mtx = td->td_lock; 1808ae7a6b38SJeff Roberson #ifdef SMP 1809ae7a6b38SJeff Roberson ts->ts_rltick = ticks; 1810ae7a6b38SJeff Roberson if (newtd && newtd->td_priority < tdq->tdq_lowpri) 1811ae7a6b38SJeff Roberson tdq->tdq_lowpri = newtd->td_priority; 1812ae7a6b38SJeff Roberson #endif 1813060563ecSJulian Elischer td->td_lastcpu = td->td_oncpu; 1814060563ecSJulian Elischer td->td_oncpu = NOCPU; 181552eb8464SJohn Baldwin td->td_flags &= ~TDF_NEEDRESCHED; 181677918643SStephan Uphoff td->td_owepreempt = 0; 1817b11fdad0SJeff Roberson /* 1818ae7a6b38SJeff Roberson * The lock pointer in an idle thread should never change. Reset it 1819ae7a6b38SJeff Roberson * to CAN_RUN as well. 1820b11fdad0SJeff Roberson */ 1821486a9414SJulian Elischer if (TD_IS_IDLETHREAD(td)) { 1822ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1823bf0acc27SJohn Baldwin TD_SET_CAN_RUN(td); 18247b20fb19SJeff Roberson } else if (TD_IS_RUNNING(td)) { 1825ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 18267b20fb19SJeff Roberson tdq_load_rem(tdq, ts); 1827c47f202bSJeff Roberson srqflag = (flags & SW_PREEMPT) ? 1828598b368dSJeff Roberson SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 1829c47f202bSJeff Roberson SRQ_OURSELF|SRQ_YIELDING; 1830c47f202bSJeff Roberson if (ts->ts_cpu == cpuid) 1831c47f202bSJeff Roberson tdq_add(tdq, td, srqflag); 1832c47f202bSJeff Roberson else 1833c47f202bSJeff Roberson mtx = sched_switch_migrate(tdq, td, srqflag); 1834ae7a6b38SJeff Roberson } else { 1835ae7a6b38SJeff Roberson /* This thread must be going to sleep. */ 1836ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1837ae7a6b38SJeff Roberson mtx = thread_block_switch(td); 1838ae7a6b38SJeff Roberson tdq_load_rem(tdq, ts); 1839ae7a6b38SJeff Roberson } 1840ae7a6b38SJeff Roberson /* 1841ae7a6b38SJeff Roberson * We enter here with the thread blocked and assigned to the 1842ae7a6b38SJeff Roberson * appropriate cpu run-queue or sleep-queue and with the current 1843ae7a6b38SJeff Roberson * thread-queue locked. 1844ae7a6b38SJeff Roberson */ 1845ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 1846ae7a6b38SJeff Roberson /* 184708c9a16cSJeff Roberson * If KSE assigned a new thread just add it here and let choosethread 184808c9a16cSJeff Roberson * select the best one. 1849ae7a6b38SJeff Roberson */ 185008c9a16cSJeff Roberson if (newtd != NULL) 185108c9a16cSJeff Roberson sched_switchin(tdq, newtd); 18522454aaf5SJeff Roberson newtd = choosethread(); 1853ae7a6b38SJeff Roberson /* 1854ae7a6b38SJeff Roberson * Call the MD code to switch contexts if necessary. 1855ae7a6b38SJeff Roberson */ 1856ebccf1e3SJoseph Koshy if (td != newtd) { 1857ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 1858ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1859ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 1860ebccf1e3SJoseph Koshy #endif 1861ae7a6b38SJeff Roberson cpu_switch(td, newtd, mtx); 1862ae7a6b38SJeff Roberson /* 1863ae7a6b38SJeff Roberson * We may return from cpu_switch on a different cpu. However, 1864ae7a6b38SJeff Roberson * we always return with td_lock pointing to the current cpu's 1865ae7a6b38SJeff Roberson * run queue lock. 1866ae7a6b38SJeff Roberson */ 1867ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 1868ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpuid); 1869ae7a6b38SJeff Roberson TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)td; 1870ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 1871ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1872ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1873ebccf1e3SJoseph Koshy #endif 1874ae7a6b38SJeff Roberson } else 1875ae7a6b38SJeff Roberson thread_unblock_switch(td, mtx); 1876ae7a6b38SJeff Roberson /* 1877ae7a6b38SJeff Roberson * Assert that all went well and return. 1878ae7a6b38SJeff Roberson */ 1879ae7a6b38SJeff Roberson #ifdef SMP 1880ae7a6b38SJeff Roberson /* We should always get here with the lowest priority td possible */ 1881ae7a6b38SJeff Roberson tdq->tdq_lowpri = td->td_priority; 1882ae7a6b38SJeff Roberson #endif 1883ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED); 1884ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1885ae7a6b38SJeff Roberson td->td_oncpu = cpuid; 188635e6168fSJeff Roberson } 188735e6168fSJeff Roberson 1888ae7a6b38SJeff Roberson /* 1889ae7a6b38SJeff Roberson * Adjust thread priorities as a result of a nice request. 1890ae7a6b38SJeff Roberson */ 189135e6168fSJeff Roberson void 1892fa885116SJulian Elischer sched_nice(struct proc *p, int nice) 189335e6168fSJeff Roberson { 189435e6168fSJeff Roberson struct thread *td; 189535e6168fSJeff Roberson 1896fa885116SJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 18977b20fb19SJeff Roberson PROC_SLOCK_ASSERT(p, MA_OWNED); 1898e7d50326SJeff Roberson 1899fa885116SJulian Elischer p->p_nice = nice; 19008460a577SJohn Birrell FOREACH_THREAD_IN_PROC(p, td) { 19017b20fb19SJeff Roberson thread_lock(td); 19028460a577SJohn Birrell sched_priority(td); 1903e7d50326SJeff Roberson sched_prio(td, td->td_base_user_pri); 19047b20fb19SJeff Roberson thread_unlock(td); 190535e6168fSJeff Roberson } 1906fa885116SJulian Elischer } 190735e6168fSJeff Roberson 1908ae7a6b38SJeff Roberson /* 1909ae7a6b38SJeff Roberson * Record the sleep time for the interactivity scorer. 1910ae7a6b38SJeff Roberson */ 191135e6168fSJeff Roberson void 191244f3b092SJohn Baldwin sched_sleep(struct thread *td) 191335e6168fSJeff Roberson { 1914e7d50326SJeff Roberson 19157b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 191635e6168fSJeff Roberson 1917ae7a6b38SJeff Roberson td->td_sched->ts_slptick = ticks; 191835e6168fSJeff Roberson } 191935e6168fSJeff Roberson 1920ae7a6b38SJeff Roberson /* 1921ae7a6b38SJeff Roberson * Schedule a thread to resume execution and record how long it voluntarily 1922ae7a6b38SJeff Roberson * slept. We also update the pctcpu, interactivity, and priority. 1923ae7a6b38SJeff Roberson */ 192435e6168fSJeff Roberson void 192535e6168fSJeff Roberson sched_wakeup(struct thread *td) 192635e6168fSJeff Roberson { 192714618990SJeff Roberson struct td_sched *ts; 1928ae7a6b38SJeff Roberson int slptick; 1929e7d50326SJeff Roberson 19307b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 193114618990SJeff Roberson ts = td->td_sched; 193235e6168fSJeff Roberson /* 1933e7d50326SJeff Roberson * If we slept for more than a tick update our interactivity and 1934e7d50326SJeff Roberson * priority. 193535e6168fSJeff Roberson */ 1936ae7a6b38SJeff Roberson slptick = ts->ts_slptick; 1937ae7a6b38SJeff Roberson ts->ts_slptick = 0; 1938ae7a6b38SJeff Roberson if (slptick && slptick != ticks) { 19399a93305aSJeff Roberson u_int hzticks; 1940f1e8dc4aSJeff Roberson 1941ae7a6b38SJeff Roberson hzticks = (ticks - slptick) << SCHED_TICK_SHIFT; 1942ae7a6b38SJeff Roberson ts->ts_slptime += hzticks; 19438460a577SJohn Birrell sched_interact_update(td); 194414618990SJeff Roberson sched_pctcpu_update(ts); 19458460a577SJohn Birrell sched_priority(td); 1946f1e8dc4aSJeff Roberson } 194714618990SJeff Roberson /* Reset the slice value after we sleep. */ 194814618990SJeff Roberson ts->ts_slice = sched_slice; 19497a5e5e2aSJeff Roberson sched_add(td, SRQ_BORING); 195035e6168fSJeff Roberson } 195135e6168fSJeff Roberson 195235e6168fSJeff Roberson /* 195335e6168fSJeff Roberson * Penalize the parent for creating a new child and initialize the child's 195435e6168fSJeff Roberson * priority. 195535e6168fSJeff Roberson */ 195635e6168fSJeff Roberson void 19578460a577SJohn Birrell sched_fork(struct thread *td, struct thread *child) 195815dc847eSJeff Roberson { 19597b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1960ad1e7d28SJulian Elischer sched_fork_thread(td, child); 1961e7d50326SJeff Roberson /* 1962e7d50326SJeff Roberson * Penalize the parent and child for forking. 1963e7d50326SJeff Roberson */ 1964e7d50326SJeff Roberson sched_interact_fork(child); 1965e7d50326SJeff Roberson sched_priority(child); 1966ae7a6b38SJeff Roberson td->td_sched->ts_runtime += tickincr; 1967e7d50326SJeff Roberson sched_interact_update(td); 1968e7d50326SJeff Roberson sched_priority(td); 1969ad1e7d28SJulian Elischer } 1970ad1e7d28SJulian Elischer 1971ae7a6b38SJeff Roberson /* 1972ae7a6b38SJeff Roberson * Fork a new thread, may be within the same process. 1973ae7a6b38SJeff Roberson */ 1974ad1e7d28SJulian Elischer void 1975ad1e7d28SJulian Elischer sched_fork_thread(struct thread *td, struct thread *child) 1976ad1e7d28SJulian Elischer { 1977ad1e7d28SJulian Elischer struct td_sched *ts; 1978ad1e7d28SJulian Elischer struct td_sched *ts2; 19798460a577SJohn Birrell 1980e7d50326SJeff Roberson /* 1981e7d50326SJeff Roberson * Initialize child. 1982e7d50326SJeff Roberson */ 19837b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1984ed062c8dSJulian Elischer sched_newthread(child); 1985ae7a6b38SJeff Roberson child->td_lock = TDQ_LOCKPTR(TDQ_SELF()); 1986ad1e7d28SJulian Elischer ts = td->td_sched; 1987ad1e7d28SJulian Elischer ts2 = child->td_sched; 1988ad1e7d28SJulian Elischer ts2->ts_cpu = ts->ts_cpu; 1989ad1e7d28SJulian Elischer ts2->ts_runq = NULL; 1990e7d50326SJeff Roberson /* 1991e7d50326SJeff Roberson * Grab our parents cpu estimation information and priority. 1992e7d50326SJeff Roberson */ 1993ad1e7d28SJulian Elischer ts2->ts_ticks = ts->ts_ticks; 1994ad1e7d28SJulian Elischer ts2->ts_ltick = ts->ts_ltick; 1995ad1e7d28SJulian Elischer ts2->ts_ftick = ts->ts_ftick; 1996e7d50326SJeff Roberson child->td_user_pri = td->td_user_pri; 1997e7d50326SJeff Roberson child->td_base_user_pri = td->td_base_user_pri; 1998e7d50326SJeff Roberson /* 1999e7d50326SJeff Roberson * And update interactivity score. 2000e7d50326SJeff Roberson */ 2001ae7a6b38SJeff Roberson ts2->ts_slptime = ts->ts_slptime; 2002ae7a6b38SJeff Roberson ts2->ts_runtime = ts->ts_runtime; 2003e7d50326SJeff Roberson ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */ 200415dc847eSJeff Roberson } 200515dc847eSJeff Roberson 2006ae7a6b38SJeff Roberson /* 2007ae7a6b38SJeff Roberson * Adjust the priority class of a thread. 2008ae7a6b38SJeff Roberson */ 200915dc847eSJeff Roberson void 20108460a577SJohn Birrell sched_class(struct thread *td, int class) 201115dc847eSJeff Roberson { 201215dc847eSJeff Roberson 20137b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 20148460a577SJohn Birrell if (td->td_pri_class == class) 201515dc847eSJeff Roberson return; 201615dc847eSJeff Roberson 2017ef1134c9SJeff Roberson #ifdef SMP 2018155b9987SJeff Roberson /* 2019155b9987SJeff Roberson * On SMP if we're on the RUNQ we must adjust the transferable 2020155b9987SJeff Roberson * count because could be changing to or from an interrupt 2021155b9987SJeff Roberson * class. 2022155b9987SJeff Roberson */ 20237a5e5e2aSJeff Roberson if (TD_ON_RUNQ(td)) { 20241e516cf5SJeff Roberson struct tdq *tdq; 20251e516cf5SJeff Roberson 20261e516cf5SJeff Roberson tdq = TDQ_CPU(td->td_sched->ts_cpu); 20271e516cf5SJeff Roberson if (THREAD_CAN_MIGRATE(td)) { 2028d2ad694cSJeff Roberson tdq->tdq_transferable--; 2029d2ad694cSJeff Roberson tdq->tdq_group->tdg_transferable--; 203080f86c9fSJeff Roberson } 20311e516cf5SJeff Roberson td->td_pri_class = class; 20321e516cf5SJeff Roberson if (THREAD_CAN_MIGRATE(td)) { 2033d2ad694cSJeff Roberson tdq->tdq_transferable++; 2034d2ad694cSJeff Roberson tdq->tdq_group->tdg_transferable++; 203580f86c9fSJeff Roberson } 2036155b9987SJeff Roberson } 2037ef1134c9SJeff Roberson #endif 20388460a577SJohn Birrell td->td_pri_class = class; 203935e6168fSJeff Roberson } 204035e6168fSJeff Roberson 204135e6168fSJeff Roberson /* 204235e6168fSJeff Roberson * Return some of the child's priority and interactivity to the parent. 204335e6168fSJeff Roberson */ 204435e6168fSJeff Roberson void 2045fc6c30f6SJulian Elischer sched_exit(struct proc *p, struct thread *child) 204635e6168fSJeff Roberson { 2047e7d50326SJeff Roberson struct thread *td; 2048141ad61cSJeff Roberson 20498460a577SJohn Birrell CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", 2050fc6c30f6SJulian Elischer child, child->td_proc->p_comm, child->td_priority); 20518460a577SJohn Birrell 20527b20fb19SJeff Roberson PROC_SLOCK_ASSERT(p, MA_OWNED); 2053e7d50326SJeff Roberson td = FIRST_THREAD_IN_PROC(p); 2054e7d50326SJeff Roberson sched_exit_thread(td, child); 2055ad1e7d28SJulian Elischer } 2056ad1e7d28SJulian Elischer 2057ae7a6b38SJeff Roberson /* 2058ae7a6b38SJeff Roberson * Penalize another thread for the time spent on this one. This helps to 2059ae7a6b38SJeff Roberson * worsen the priority and interactivity of processes which schedule batch 2060ae7a6b38SJeff Roberson * jobs such as make. This has little effect on the make process itself but 2061ae7a6b38SJeff Roberson * causes new processes spawned by it to receive worse scores immediately. 2062ae7a6b38SJeff Roberson */ 2063ad1e7d28SJulian Elischer void 2064fc6c30f6SJulian Elischer sched_exit_thread(struct thread *td, struct thread *child) 2065ad1e7d28SJulian Elischer { 2066fc6c30f6SJulian Elischer 2067e7d50326SJeff Roberson CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", 2068e7d50326SJeff Roberson child, child->td_proc->p_comm, child->td_priority); 2069e7d50326SJeff Roberson 2070e7d50326SJeff Roberson #ifdef KSE 2071e7d50326SJeff Roberson /* 2072e7d50326SJeff Roberson * KSE forks and exits so often that this penalty causes short-lived 2073e7d50326SJeff Roberson * threads to always be non-interactive. This causes mozilla to 2074e7d50326SJeff Roberson * crawl under load. 2075e7d50326SJeff Roberson */ 2076e7d50326SJeff Roberson if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc) 2077e7d50326SJeff Roberson return; 2078e7d50326SJeff Roberson #endif 2079e7d50326SJeff Roberson /* 2080e7d50326SJeff Roberson * Give the child's runtime to the parent without returning the 2081e7d50326SJeff Roberson * sleep time as a penalty to the parent. This causes shells that 2082e7d50326SJeff Roberson * launch expensive things to mark their children as expensive. 2083e7d50326SJeff Roberson */ 20847b20fb19SJeff Roberson thread_lock(td); 2085ae7a6b38SJeff Roberson td->td_sched->ts_runtime += child->td_sched->ts_runtime; 2086fc6c30f6SJulian Elischer sched_interact_update(td); 2087e7d50326SJeff Roberson sched_priority(td); 20887b20fb19SJeff Roberson thread_unlock(td); 2089ad1e7d28SJulian Elischer } 2090ad1e7d28SJulian Elischer 2091ae7a6b38SJeff Roberson /* 2092ae7a6b38SJeff Roberson * Fix priorities on return to user-space. Priorities may be elevated due 2093ae7a6b38SJeff Roberson * to static priorities in msleep() or similar. 2094ae7a6b38SJeff Roberson */ 2095ad1e7d28SJulian Elischer void 2096ad1e7d28SJulian Elischer sched_userret(struct thread *td) 2097ad1e7d28SJulian Elischer { 2098ad1e7d28SJulian Elischer /* 2099ad1e7d28SJulian Elischer * XXX we cheat slightly on the locking here to avoid locking in 2100ad1e7d28SJulian Elischer * the usual case. Setting td_priority here is essentially an 2101ad1e7d28SJulian Elischer * incomplete workaround for not setting it properly elsewhere. 2102ad1e7d28SJulian Elischer * Now that some interrupt handlers are threads, not setting it 2103ad1e7d28SJulian Elischer * properly elsewhere can clobber it in the window between setting 2104ad1e7d28SJulian Elischer * it here and returning to user mode, so don't waste time setting 2105ad1e7d28SJulian Elischer * it perfectly here. 2106ad1e7d28SJulian Elischer */ 2107ad1e7d28SJulian Elischer KASSERT((td->td_flags & TDF_BORROWING) == 0, 2108ad1e7d28SJulian Elischer ("thread with borrowed priority returning to userland")); 2109ad1e7d28SJulian Elischer if (td->td_priority != td->td_user_pri) { 21107b20fb19SJeff Roberson thread_lock(td); 2111ad1e7d28SJulian Elischer td->td_priority = td->td_user_pri; 2112ad1e7d28SJulian Elischer td->td_base_pri = td->td_user_pri; 21137b20fb19SJeff Roberson thread_unlock(td); 2114ad1e7d28SJulian Elischer } 211535e6168fSJeff Roberson } 211635e6168fSJeff Roberson 2117ae7a6b38SJeff Roberson /* 2118ae7a6b38SJeff Roberson * Handle a stathz tick. This is really only relevant for timeshare 2119ae7a6b38SJeff Roberson * threads. 2120ae7a6b38SJeff Roberson */ 212135e6168fSJeff Roberson void 21227cf90fb3SJeff Roberson sched_clock(struct thread *td) 212335e6168fSJeff Roberson { 2124ad1e7d28SJulian Elischer struct tdq *tdq; 2125ad1e7d28SJulian Elischer struct td_sched *ts; 212635e6168fSJeff Roberson 2127ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 21283f872f85SJeff Roberson tdq = TDQ_SELF(); 21293f872f85SJeff Roberson /* 21303f872f85SJeff Roberson * Advance the insert index once for each tick to ensure that all 21313f872f85SJeff Roberson * threads get a chance to run. 21323f872f85SJeff Roberson */ 21333f872f85SJeff Roberson if (tdq->tdq_idx == tdq->tdq_ridx) { 21343f872f85SJeff Roberson tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS; 21353f872f85SJeff Roberson if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx])) 21363f872f85SJeff Roberson tdq->tdq_ridx = tdq->tdq_idx; 21373f872f85SJeff Roberson } 21383f872f85SJeff Roberson ts = td->td_sched; 21393f741ca1SJeff Roberson /* 21408460a577SJohn Birrell * We only do slicing code for TIMESHARE threads. 2141a8949de2SJeff Roberson */ 21428460a577SJohn Birrell if (td->td_pri_class != PRI_TIMESHARE) 2143a8949de2SJeff Roberson return; 2144a8949de2SJeff Roberson /* 21453f872f85SJeff Roberson * We used a tick; charge it to the thread so that we can compute our 214615dc847eSJeff Roberson * interactivity. 214715dc847eSJeff Roberson */ 2148ae7a6b38SJeff Roberson td->td_sched->ts_runtime += tickincr; 21498460a577SJohn Birrell sched_interact_update(td); 215035e6168fSJeff Roberson /* 215135e6168fSJeff Roberson * We used up one time slice. 215235e6168fSJeff Roberson */ 2153ad1e7d28SJulian Elischer if (--ts->ts_slice > 0) 215415dc847eSJeff Roberson return; 215535e6168fSJeff Roberson /* 215615dc847eSJeff Roberson * We're out of time, recompute priorities and requeue. 215735e6168fSJeff Roberson */ 21588460a577SJohn Birrell sched_priority(td); 21594a338afdSJulian Elischer td->td_flags |= TDF_NEEDRESCHED; 216035e6168fSJeff Roberson } 216135e6168fSJeff Roberson 2162ae7a6b38SJeff Roberson /* 2163ae7a6b38SJeff Roberson * Called once per hz tick. Used for cpu utilization information. This 2164ae7a6b38SJeff Roberson * is easier than trying to scale based on stathz. 2165ae7a6b38SJeff Roberson */ 2166ae7a6b38SJeff Roberson void 2167ae7a6b38SJeff Roberson sched_tick(void) 2168ae7a6b38SJeff Roberson { 2169ae7a6b38SJeff Roberson struct td_sched *ts; 2170ae7a6b38SJeff Roberson 2171ae7a6b38SJeff Roberson ts = curthread->td_sched; 2172ae7a6b38SJeff Roberson /* Adjust ticks for pctcpu */ 2173ae7a6b38SJeff Roberson ts->ts_ticks += 1 << SCHED_TICK_SHIFT; 2174ae7a6b38SJeff Roberson ts->ts_ltick = ticks; 2175ae7a6b38SJeff Roberson /* 2176ae7a6b38SJeff Roberson * Update if we've exceeded our desired tick threshhold by over one 2177ae7a6b38SJeff Roberson * second. 2178ae7a6b38SJeff Roberson */ 2179ae7a6b38SJeff Roberson if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick) 2180ae7a6b38SJeff Roberson sched_pctcpu_update(ts); 2181ae7a6b38SJeff Roberson } 2182ae7a6b38SJeff Roberson 2183ae7a6b38SJeff Roberson /* 2184ae7a6b38SJeff Roberson * Return whether the current CPU has runnable tasks. Used for in-kernel 2185ae7a6b38SJeff Roberson * cooperative idle threads. 2186ae7a6b38SJeff Roberson */ 218735e6168fSJeff Roberson int 218835e6168fSJeff Roberson sched_runnable(void) 218935e6168fSJeff Roberson { 2190ad1e7d28SJulian Elischer struct tdq *tdq; 2191b90816f1SJeff Roberson int load; 219235e6168fSJeff Roberson 2193b90816f1SJeff Roberson load = 1; 2194b90816f1SJeff Roberson 2195ad1e7d28SJulian Elischer tdq = TDQ_SELF(); 21963f741ca1SJeff Roberson if ((curthread->td_flags & TDF_IDLETD) != 0) { 2197d2ad694cSJeff Roberson if (tdq->tdq_load > 0) 21983f741ca1SJeff Roberson goto out; 21993f741ca1SJeff Roberson } else 2200d2ad694cSJeff Roberson if (tdq->tdq_load - 1 > 0) 2201b90816f1SJeff Roberson goto out; 2202b90816f1SJeff Roberson load = 0; 2203b90816f1SJeff Roberson out: 2204b90816f1SJeff Roberson return (load); 220535e6168fSJeff Roberson } 220635e6168fSJeff Roberson 2207ae7a6b38SJeff Roberson /* 2208ae7a6b38SJeff Roberson * Choose the highest priority thread to run. The thread is removed from 2209ae7a6b38SJeff Roberson * the run-queue while running however the load remains. For SMP we set 2210ae7a6b38SJeff Roberson * the tdq in the global idle bitmask if it idles here. 2211ae7a6b38SJeff Roberson */ 22127a5e5e2aSJeff Roberson struct thread * 2213c9f25d8fSJeff Roberson sched_choose(void) 2214c9f25d8fSJeff Roberson { 221515dc847eSJeff Roberson #ifdef SMP 2216ae7a6b38SJeff Roberson struct tdq_group *tdg; 221715dc847eSJeff Roberson #endif 2218ae7a6b38SJeff Roberson struct td_sched *ts; 2219ae7a6b38SJeff Roberson struct tdq *tdq; 2220ae7a6b38SJeff Roberson 2221ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 2222ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2223ad1e7d28SJulian Elischer ts = tdq_choose(tdq); 2224ad1e7d28SJulian Elischer if (ts) { 2225ad1e7d28SJulian Elischer tdq_runq_rem(tdq, ts); 22267a5e5e2aSJeff Roberson return (ts->ts_thread); 222735e6168fSJeff Roberson } 2228c9f25d8fSJeff Roberson #ifdef SMP 2229ae7a6b38SJeff Roberson /* 2230ae7a6b38SJeff Roberson * We only set the idled bit when all of the cpus in the group are 2231ae7a6b38SJeff Roberson * idle. Otherwise we could get into a situation where a thread bounces 2232ae7a6b38SJeff Roberson * back and forth between two idle cores on seperate physical CPUs. 2233ae7a6b38SJeff Roberson */ 2234ae7a6b38SJeff Roberson tdg = tdq->tdq_group; 2235ae7a6b38SJeff Roberson tdg->tdg_idlemask |= PCPU_GET(cpumask); 2236ae7a6b38SJeff Roberson if (tdg->tdg_idlemask == tdg->tdg_cpumask) 2237ae7a6b38SJeff Roberson atomic_set_int(&tdq_idle, tdg->tdg_mask); 2238ae7a6b38SJeff Roberson tdq->tdq_lowpri = PRI_MAX_IDLE; 2239c9f25d8fSJeff Roberson #endif 22407a5e5e2aSJeff Roberson return (PCPU_GET(idlethread)); 22417a5e5e2aSJeff Roberson } 22427a5e5e2aSJeff Roberson 2243ae7a6b38SJeff Roberson /* 2244ae7a6b38SJeff Roberson * Set owepreempt if necessary. Preemption never happens directly in ULE, 2245ae7a6b38SJeff Roberson * we always request it once we exit a critical section. 2246ae7a6b38SJeff Roberson */ 2247ae7a6b38SJeff Roberson static inline void 2248ae7a6b38SJeff Roberson sched_setpreempt(struct thread *td) 22497a5e5e2aSJeff Roberson { 22507a5e5e2aSJeff Roberson struct thread *ctd; 22517a5e5e2aSJeff Roberson int cpri; 22527a5e5e2aSJeff Roberson int pri; 22537a5e5e2aSJeff Roberson 22547a5e5e2aSJeff Roberson ctd = curthread; 22557a5e5e2aSJeff Roberson pri = td->td_priority; 22567a5e5e2aSJeff Roberson cpri = ctd->td_priority; 2257ae7a6b38SJeff Roberson if (td->td_priority < ctd->td_priority) 2258ae7a6b38SJeff Roberson curthread->td_flags |= TDF_NEEDRESCHED; 22597a5e5e2aSJeff Roberson if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd)) 2260ae7a6b38SJeff Roberson return; 22617a5e5e2aSJeff Roberson /* 22627a5e5e2aSJeff Roberson * Always preempt IDLE threads. Otherwise only if the preempting 22637a5e5e2aSJeff Roberson * thread is an ithread. 22647a5e5e2aSJeff Roberson */ 2265ae7a6b38SJeff Roberson if (pri > preempt_thresh && cpri < PRI_MIN_IDLE) 2266ae7a6b38SJeff Roberson return; 22677a5e5e2aSJeff Roberson ctd->td_owepreempt = 1; 2268ae7a6b38SJeff Roberson return; 226935e6168fSJeff Roberson } 227035e6168fSJeff Roberson 2271ae7a6b38SJeff Roberson /* 2272ae7a6b38SJeff Roberson * Add a thread to a thread queue. Initializes priority, slice, runq, and 2273ae7a6b38SJeff Roberson * add it to the appropriate queue. This is the internal function called 2274ae7a6b38SJeff Roberson * when the tdq is predetermined. 2275ae7a6b38SJeff Roberson */ 227635e6168fSJeff Roberson void 2277ae7a6b38SJeff Roberson tdq_add(struct tdq *tdq, struct thread *td, int flags) 227835e6168fSJeff Roberson { 2279ad1e7d28SJulian Elischer struct td_sched *ts; 228022bf7d9aSJeff Roberson int class; 22817b8bfa0dSJeff Roberson #ifdef SMP 22827b8bfa0dSJeff Roberson int cpumask; 22837b8bfa0dSJeff Roberson #endif 2284c9f25d8fSJeff Roberson 2285ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 22867a5e5e2aSJeff Roberson KASSERT((td->td_inhibitors == 0), 22877a5e5e2aSJeff Roberson ("sched_add: trying to run inhibited thread")); 22887a5e5e2aSJeff Roberson KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 22897a5e5e2aSJeff Roberson ("sched_add: bad thread state")); 2290b61ce5b0SJeff Roberson KASSERT(td->td_flags & TDF_INMEM, 2291b61ce5b0SJeff Roberson ("sched_add: thread swapped out")); 2292ae7a6b38SJeff Roberson 2293ae7a6b38SJeff Roberson ts = td->td_sched; 22947a5e5e2aSJeff Roberson class = PRI_BASE(td->td_pri_class); 2295ae7a6b38SJeff Roberson TD_SET_RUNQ(td); 22967a5e5e2aSJeff Roberson if (ts->ts_slice == 0) 22977a5e5e2aSJeff Roberson ts->ts_slice = sched_slice; 22982454aaf5SJeff Roberson /* 2299ae7a6b38SJeff Roberson * Pick the run queue based on priority. 23002454aaf5SJeff Roberson */ 2301ae7a6b38SJeff Roberson if (td->td_priority <= PRI_MAX_REALTIME) 2302ae7a6b38SJeff Roberson ts->ts_runq = &tdq->tdq_realtime; 2303ae7a6b38SJeff Roberson else if (td->td_priority <= PRI_MAX_TIMESHARE) 2304ae7a6b38SJeff Roberson ts->ts_runq = &tdq->tdq_timeshare; 23057b8bfa0dSJeff Roberson else 2306ae7a6b38SJeff Roberson ts->ts_runq = &tdq->tdq_idle; 2307ae7a6b38SJeff Roberson #ifdef SMP 23087b8bfa0dSJeff Roberson cpumask = 1 << ts->ts_cpu; 230922bf7d9aSJeff Roberson /* 2310670c524fSJeff Roberson * If we had been idle, clear our bit in the group and potentially 23117b8bfa0dSJeff Roberson * the global bitmap. 231222bf7d9aSJeff Roberson */ 2313e7d50326SJeff Roberson if ((class != PRI_IDLE && class != PRI_ITHD) && 23147b8bfa0dSJeff Roberson (tdq->tdq_group->tdg_idlemask & cpumask) != 0) { 231580f86c9fSJeff Roberson /* 231680f86c9fSJeff Roberson * Check to see if our group is unidling, and if so, remove it 231780f86c9fSJeff Roberson * from the global idle mask. 231880f86c9fSJeff Roberson */ 2319d2ad694cSJeff Roberson if (tdq->tdq_group->tdg_idlemask == 2320d2ad694cSJeff Roberson tdq->tdq_group->tdg_cpumask) 2321d2ad694cSJeff Roberson atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask); 232280f86c9fSJeff Roberson /* 232380f86c9fSJeff Roberson * Now remove ourselves from the group specific idle mask. 232480f86c9fSJeff Roberson */ 23257b8bfa0dSJeff Roberson tdq->tdq_group->tdg_idlemask &= ~cpumask; 23267b8bfa0dSJeff Roberson } 2327ae7a6b38SJeff Roberson if (td->td_priority < tdq->tdq_lowpri) 2328ae7a6b38SJeff Roberson tdq->tdq_lowpri = td->td_priority; 232922bf7d9aSJeff Roberson #endif 2330ad1e7d28SJulian Elischer tdq_runq_add(tdq, ts, flags); 2331ad1e7d28SJulian Elischer tdq_load_add(tdq, ts); 2332ae7a6b38SJeff Roberson } 2333ae7a6b38SJeff Roberson 2334ae7a6b38SJeff Roberson /* 2335ae7a6b38SJeff Roberson * Select the target thread queue and add a thread to it. Request 2336ae7a6b38SJeff Roberson * preemption or IPI a remote processor if required. 2337ae7a6b38SJeff Roberson */ 2338ae7a6b38SJeff Roberson void 2339ae7a6b38SJeff Roberson sched_add(struct thread *td, int flags) 2340ae7a6b38SJeff Roberson { 2341ae7a6b38SJeff Roberson struct td_sched *ts; 2342ae7a6b38SJeff Roberson struct tdq *tdq; 23437b8bfa0dSJeff Roberson #ifdef SMP 2344ae7a6b38SJeff Roberson int cpuid; 2345ae7a6b38SJeff Roberson int cpu; 2346ae7a6b38SJeff Roberson #endif 2347ae7a6b38SJeff Roberson CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 2348ae7a6b38SJeff Roberson td, td->td_proc->p_comm, td->td_priority, curthread, 2349ae7a6b38SJeff Roberson curthread->td_proc->p_comm); 2350ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2351ae7a6b38SJeff Roberson ts = td->td_sched; 2352ae7a6b38SJeff Roberson /* 2353ae7a6b38SJeff Roberson * Recalculate the priority before we select the target cpu or 2354ae7a6b38SJeff Roberson * run-queue. 2355ae7a6b38SJeff Roberson */ 2356ae7a6b38SJeff Roberson if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 2357ae7a6b38SJeff Roberson sched_priority(td); 2358ae7a6b38SJeff Roberson #ifdef SMP 2359ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 2360ae7a6b38SJeff Roberson /* 2361ae7a6b38SJeff Roberson * Pick the destination cpu and if it isn't ours transfer to the 2362ae7a6b38SJeff Roberson * target cpu. 2363ae7a6b38SJeff Roberson */ 2364ae7a6b38SJeff Roberson if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_MIGRATE(td)) 2365ae7a6b38SJeff Roberson cpu = cpuid; 2366ae7a6b38SJeff Roberson else if (!THREAD_CAN_MIGRATE(td)) 2367ae7a6b38SJeff Roberson cpu = ts->ts_cpu; 2368ae7a6b38SJeff Roberson else 2369ae7a6b38SJeff Roberson cpu = sched_pickcpu(ts, flags); 2370ae7a6b38SJeff Roberson tdq = sched_setcpu(ts, cpu, flags); 2371ae7a6b38SJeff Roberson tdq_add(tdq, td, flags); 2372ae7a6b38SJeff Roberson if (cpu != cpuid) { 23737b8bfa0dSJeff Roberson tdq_notify(ts); 23747b8bfa0dSJeff Roberson return; 23757b8bfa0dSJeff Roberson } 2376ae7a6b38SJeff Roberson #else 2377ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 2378ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 2379ae7a6b38SJeff Roberson /* 2380ae7a6b38SJeff Roberson * Now that the thread is moving to the run-queue, set the lock 2381ae7a6b38SJeff Roberson * to the scheduler's lock. 2382ae7a6b38SJeff Roberson */ 2383ae7a6b38SJeff Roberson thread_lock_set(td, TDQ_LOCKPTR(tdq)); 2384ae7a6b38SJeff Roberson tdq_add(tdq, td, flags); 23857b8bfa0dSJeff Roberson #endif 2386ae7a6b38SJeff Roberson if (!(flags & SRQ_YIELDING)) 2387ae7a6b38SJeff Roberson sched_setpreempt(td); 238835e6168fSJeff Roberson } 238935e6168fSJeff Roberson 2390ae7a6b38SJeff Roberson /* 2391ae7a6b38SJeff Roberson * Remove a thread from a run-queue without running it. This is used 2392ae7a6b38SJeff Roberson * when we're stealing a thread from a remote queue. Otherwise all threads 2393ae7a6b38SJeff Roberson * exit by calling sched_exit_thread() and sched_throw() themselves. 2394ae7a6b38SJeff Roberson */ 239535e6168fSJeff Roberson void 23967cf90fb3SJeff Roberson sched_rem(struct thread *td) 239735e6168fSJeff Roberson { 2398ad1e7d28SJulian Elischer struct tdq *tdq; 2399ad1e7d28SJulian Elischer struct td_sched *ts; 24007cf90fb3SJeff Roberson 240181d47d3fSJeff Roberson CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 240281d47d3fSJeff Roberson td, td->td_proc->p_comm, td->td_priority, curthread, 240381d47d3fSJeff Roberson curthread->td_proc->p_comm); 2404ad1e7d28SJulian Elischer ts = td->td_sched; 2405ae7a6b38SJeff Roberson tdq = TDQ_CPU(ts->ts_cpu); 2406ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2407ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 24087a5e5e2aSJeff Roberson KASSERT(TD_ON_RUNQ(td), 2409ad1e7d28SJulian Elischer ("sched_rem: thread not on run queue")); 2410ad1e7d28SJulian Elischer tdq_runq_rem(tdq, ts); 2411ad1e7d28SJulian Elischer tdq_load_rem(tdq, ts); 24127a5e5e2aSJeff Roberson TD_SET_CAN_RUN(td); 241335e6168fSJeff Roberson } 241435e6168fSJeff Roberson 2415ae7a6b38SJeff Roberson /* 2416ae7a6b38SJeff Roberson * Fetch cpu utilization information. Updates on demand. 2417ae7a6b38SJeff Roberson */ 241835e6168fSJeff Roberson fixpt_t 24197cf90fb3SJeff Roberson sched_pctcpu(struct thread *td) 242035e6168fSJeff Roberson { 242135e6168fSJeff Roberson fixpt_t pctcpu; 2422ad1e7d28SJulian Elischer struct td_sched *ts; 242335e6168fSJeff Roberson 242435e6168fSJeff Roberson pctcpu = 0; 2425ad1e7d28SJulian Elischer ts = td->td_sched; 2426ad1e7d28SJulian Elischer if (ts == NULL) 2427484288deSJeff Roberson return (0); 242835e6168fSJeff Roberson 24297b20fb19SJeff Roberson thread_lock(td); 2430ad1e7d28SJulian Elischer if (ts->ts_ticks) { 243135e6168fSJeff Roberson int rtick; 243235e6168fSJeff Roberson 2433ad1e7d28SJulian Elischer sched_pctcpu_update(ts); 243435e6168fSJeff Roberson /* How many rtick per second ? */ 2435e7d50326SJeff Roberson rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz); 2436e7d50326SJeff Roberson pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT; 243735e6168fSJeff Roberson } 2438ad1e7d28SJulian Elischer td->td_proc->p_swtime = ts->ts_ltick - ts->ts_ftick; 24397b20fb19SJeff Roberson thread_unlock(td); 244035e6168fSJeff Roberson 244135e6168fSJeff Roberson return (pctcpu); 244235e6168fSJeff Roberson } 244335e6168fSJeff Roberson 2444ae7a6b38SJeff Roberson /* 2445ae7a6b38SJeff Roberson * Bind a thread to a target cpu. 2446ae7a6b38SJeff Roberson */ 24479bacd788SJeff Roberson void 24489bacd788SJeff Roberson sched_bind(struct thread *td, int cpu) 24499bacd788SJeff Roberson { 2450ad1e7d28SJulian Elischer struct td_sched *ts; 24519bacd788SJeff Roberson 2452c47f202bSJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); 2453ad1e7d28SJulian Elischer ts = td->td_sched; 24546b2f763fSJeff Roberson if (ts->ts_flags & TSF_BOUND) 2455c95d2db2SJeff Roberson sched_unbind(td); 2456ad1e7d28SJulian Elischer ts->ts_flags |= TSF_BOUND; 245780f86c9fSJeff Roberson #ifdef SMP 24586b2f763fSJeff Roberson sched_pin(); 245980f86c9fSJeff Roberson if (PCPU_GET(cpuid) == cpu) 24609bacd788SJeff Roberson return; 24616b2f763fSJeff Roberson ts->ts_cpu = cpu; 24629bacd788SJeff Roberson /* When we return from mi_switch we'll be on the correct cpu. */ 2463279f949eSPoul-Henning Kamp mi_switch(SW_VOL, NULL); 24649bacd788SJeff Roberson #endif 24659bacd788SJeff Roberson } 24669bacd788SJeff Roberson 2467ae7a6b38SJeff Roberson /* 2468ae7a6b38SJeff Roberson * Release a bound thread. 2469ae7a6b38SJeff Roberson */ 24709bacd788SJeff Roberson void 24719bacd788SJeff Roberson sched_unbind(struct thread *td) 24729bacd788SJeff Roberson { 2473e7d50326SJeff Roberson struct td_sched *ts; 2474e7d50326SJeff Roberson 24757b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2476e7d50326SJeff Roberson ts = td->td_sched; 24776b2f763fSJeff Roberson if ((ts->ts_flags & TSF_BOUND) == 0) 24786b2f763fSJeff Roberson return; 2479e7d50326SJeff Roberson ts->ts_flags &= ~TSF_BOUND; 2480e7d50326SJeff Roberson #ifdef SMP 2481e7d50326SJeff Roberson sched_unpin(); 2482e7d50326SJeff Roberson #endif 24839bacd788SJeff Roberson } 24849bacd788SJeff Roberson 248535e6168fSJeff Roberson int 2486ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td) 2487ebccf1e3SJoseph Koshy { 24887b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2489ad1e7d28SJulian Elischer return (td->td_sched->ts_flags & TSF_BOUND); 2490ebccf1e3SJoseph Koshy } 2491ebccf1e3SJoseph Koshy 2492ae7a6b38SJeff Roberson /* 2493ae7a6b38SJeff Roberson * Basic yield call. 2494ae7a6b38SJeff Roberson */ 249536ec198bSDavid Xu void 249636ec198bSDavid Xu sched_relinquish(struct thread *td) 249736ec198bSDavid Xu { 24987b20fb19SJeff Roberson thread_lock(td); 24998460a577SJohn Birrell if (td->td_pri_class == PRI_TIMESHARE) 250036ec198bSDavid Xu sched_prio(td, PRI_MAX_TIMESHARE); 25017b20fb19SJeff Roberson SCHED_STAT_INC(switch_relinquish); 250236ec198bSDavid Xu mi_switch(SW_VOL, NULL); 25037b20fb19SJeff Roberson thread_unlock(td); 250436ec198bSDavid Xu } 250536ec198bSDavid Xu 2506ae7a6b38SJeff Roberson /* 2507ae7a6b38SJeff Roberson * Return the total system load. 2508ae7a6b38SJeff Roberson */ 2509ebccf1e3SJoseph Koshy int 251033916c36SJeff Roberson sched_load(void) 251133916c36SJeff Roberson { 251233916c36SJeff Roberson #ifdef SMP 251333916c36SJeff Roberson int total; 251433916c36SJeff Roberson int i; 251533916c36SJeff Roberson 251633916c36SJeff Roberson total = 0; 2517d2ad694cSJeff Roberson for (i = 0; i <= tdg_maxid; i++) 2518d2ad694cSJeff Roberson total += TDQ_GROUP(i)->tdg_load; 251933916c36SJeff Roberson return (total); 252033916c36SJeff Roberson #else 2521d2ad694cSJeff Roberson return (TDQ_SELF()->tdq_sysload); 252233916c36SJeff Roberson #endif 252333916c36SJeff Roberson } 252433916c36SJeff Roberson 252533916c36SJeff Roberson int 252635e6168fSJeff Roberson sched_sizeof_proc(void) 252735e6168fSJeff Roberson { 252835e6168fSJeff Roberson return (sizeof(struct proc)); 252935e6168fSJeff Roberson } 253035e6168fSJeff Roberson 253135e6168fSJeff Roberson int 253235e6168fSJeff Roberson sched_sizeof_thread(void) 253335e6168fSJeff Roberson { 253435e6168fSJeff Roberson return (sizeof(struct thread) + sizeof(struct td_sched)); 253535e6168fSJeff Roberson } 2536b41f1452SDavid Xu 25377a5e5e2aSJeff Roberson /* 25387a5e5e2aSJeff Roberson * The actual idle process. 25397a5e5e2aSJeff Roberson */ 25407a5e5e2aSJeff Roberson void 25417a5e5e2aSJeff Roberson sched_idletd(void *dummy) 25427a5e5e2aSJeff Roberson { 25437a5e5e2aSJeff Roberson struct thread *td; 2544ae7a6b38SJeff Roberson struct tdq *tdq; 25457a5e5e2aSJeff Roberson 25467a5e5e2aSJeff Roberson td = curthread; 2547ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 25487a5e5e2aSJeff Roberson mtx_assert(&Giant, MA_NOTOWNED); 2549ae7a6b38SJeff Roberson /* ULE relies on preemption for idle interruption. */ 2550ae7a6b38SJeff Roberson for (;;) { 2551ae7a6b38SJeff Roberson #ifdef SMP 2552ae7a6b38SJeff Roberson if (tdq_idled(tdq)) 25537a5e5e2aSJeff Roberson cpu_idle(); 2554ae7a6b38SJeff Roberson #else 2555ae7a6b38SJeff Roberson cpu_idle(); 2556ae7a6b38SJeff Roberson #endif 2557ae7a6b38SJeff Roberson } 2558b41f1452SDavid Xu } 2559e7d50326SJeff Roberson 25607b20fb19SJeff Roberson /* 25617b20fb19SJeff Roberson * A CPU is entering for the first time or a thread is exiting. 25627b20fb19SJeff Roberson */ 25637b20fb19SJeff Roberson void 25647b20fb19SJeff Roberson sched_throw(struct thread *td) 25657b20fb19SJeff Roberson { 2566ae7a6b38SJeff Roberson struct tdq *tdq; 2567ae7a6b38SJeff Roberson 2568ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 25697b20fb19SJeff Roberson if (td == NULL) { 2570ae7a6b38SJeff Roberson /* Correct spinlock nesting and acquire the correct lock. */ 2571ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 25727b20fb19SJeff Roberson spinlock_exit(); 25737b20fb19SJeff Roberson } else { 2574ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2575ae7a6b38SJeff Roberson tdq_load_rem(tdq, td->td_sched); 25767b20fb19SJeff Roberson } 25777b20fb19SJeff Roberson KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 25787b20fb19SJeff Roberson PCPU_SET(switchtime, cpu_ticks()); 25797b20fb19SJeff Roberson PCPU_SET(switchticks, ticks); 25807b20fb19SJeff Roberson cpu_throw(td, choosethread()); /* doesn't return */ 25817b20fb19SJeff Roberson } 25827b20fb19SJeff Roberson 2583ae7a6b38SJeff Roberson /* 2584ae7a6b38SJeff Roberson * This is called from fork_exit(). Just acquire the correct locks and 2585ae7a6b38SJeff Roberson * let fork do the rest of the work. 2586ae7a6b38SJeff Roberson */ 25877b20fb19SJeff Roberson void 2588fe54587fSJeff Roberson sched_fork_exit(struct thread *td) 25897b20fb19SJeff Roberson { 2590ae7a6b38SJeff Roberson struct td_sched *ts; 2591ae7a6b38SJeff Roberson struct tdq *tdq; 2592ae7a6b38SJeff Roberson int cpuid; 25937b20fb19SJeff Roberson 25947b20fb19SJeff Roberson /* 25957b20fb19SJeff Roberson * Finish setting up thread glue so that it begins execution in a 2596ae7a6b38SJeff Roberson * non-nested critical section with the scheduler lock held. 25977b20fb19SJeff Roberson */ 2598ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 2599ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpuid); 2600ae7a6b38SJeff Roberson ts = td->td_sched; 2601ae7a6b38SJeff Roberson if (TD_IS_IDLETHREAD(td)) 2602ae7a6b38SJeff Roberson td->td_lock = TDQ_LOCKPTR(tdq); 2603ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2604ae7a6b38SJeff Roberson td->td_oncpu = cpuid; 2605ae7a6b38SJeff Roberson TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)td; 2606fe54587fSJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); 26077b20fb19SJeff Roberson } 26087b20fb19SJeff Roberson 2609ae7a6b38SJeff Roberson static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, 2610ae7a6b38SJeff Roberson "Scheduler"); 2611ae7a6b38SJeff Roberson SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0, 2612e7d50326SJeff Roberson "Scheduler name"); 2613ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, 2614ae7a6b38SJeff Roberson "Slice size for timeshare threads"); 2615ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, 2616ae7a6b38SJeff Roberson "Interactivity score threshold"); 2617ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh, 2618ae7a6b38SJeff Roberson 0,"Min priority for preemption, lower priorities have greater precedence"); 26197b8bfa0dSJeff Roberson #ifdef SMP 2620ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0, 2621ae7a6b38SJeff Roberson "Pick the target cpu based on priority rather than load."); 2622ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0, 2623ae7a6b38SJeff Roberson "Number of hz ticks to keep thread affinity for"); 2624ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, tryself, CTLFLAG_RW, &tryself, 0, ""); 2625ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0, 2626ae7a6b38SJeff Roberson "Enables the long-term load balancer"); 262728994a58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance_secs, CTLFLAG_RW, &balance_secs, 0, 262828994a58SJeff Roberson "Average frequence in seconds to run the long-term balancer"); 2629ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0, 2630ae7a6b38SJeff Roberson "Steals work from another hyper-threaded core on idle"); 2631ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0, 2632ae7a6b38SJeff Roberson "Attempts to steal work from other cores before idling"); 263328994a58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0, 263428994a58SJeff Roberson "Minimum load on remote cpu before we'll steal"); 2635ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, topology, CTLFLAG_RD, &topology, 0, 2636ae7a6b38SJeff Roberson "True when a topology has been specified by the MD code."); 26377b8bfa0dSJeff Roberson #endif 2638e7d50326SJeff Roberson 2639e7d50326SJeff Roberson /* ps compat */ 2640e7d50326SJeff Roberson static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 2641e7d50326SJeff Roberson SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 2642e7d50326SJeff Roberson 2643e7d50326SJeff Roberson 2644ed062c8dSJulian Elischer #define KERN_SWITCH_INCLUDE 1 2645ed062c8dSJulian Elischer #include "kern/kern_switch.c" 2646