135e6168fSJeff Roberson /*- 2e7d50326SJeff Roberson * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org> 335e6168fSJeff Roberson * All rights reserved. 435e6168fSJeff Roberson * 535e6168fSJeff Roberson * Redistribution and use in source and binary forms, with or without 635e6168fSJeff Roberson * modification, are permitted provided that the following conditions 735e6168fSJeff Roberson * are met: 835e6168fSJeff Roberson * 1. Redistributions of source code must retain the above copyright 935e6168fSJeff Roberson * notice unmodified, this list of conditions, and the following 1035e6168fSJeff Roberson * disclaimer. 1135e6168fSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 1235e6168fSJeff Roberson * notice, this list of conditions and the following disclaimer in the 1335e6168fSJeff Roberson * documentation and/or other materials provided with the distribution. 1435e6168fSJeff Roberson * 1535e6168fSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1635e6168fSJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1735e6168fSJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1835e6168fSJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1935e6168fSJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2035e6168fSJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2135e6168fSJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2235e6168fSJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2335e6168fSJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2435e6168fSJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2535e6168fSJeff Roberson */ 2635e6168fSJeff Roberson 27ae7a6b38SJeff Roberson /* 28ae7a6b38SJeff Roberson * This file implements the ULE scheduler. ULE supports independent CPU 29ae7a6b38SJeff Roberson * run queues and fine grain locking. It has superior interactive 30ae7a6b38SJeff Roberson * performance under load even on uni-processor systems. 31ae7a6b38SJeff Roberson * 32ae7a6b38SJeff Roberson * etymology: 33a5423ea3SJeff Roberson * ULE is the last three letters in schedule. It owes its name to a 34ae7a6b38SJeff Roberson * generic user created for a scheduling system by Paul Mikesell at 35ae7a6b38SJeff Roberson * Isilon Systems and a general lack of creativity on the part of the author. 36ae7a6b38SJeff Roberson */ 37ae7a6b38SJeff Roberson 38677b542eSDavid E. O'Brien #include <sys/cdefs.h> 39677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 40677b542eSDavid E. O'Brien 414da0d332SPeter Wemm #include "opt_hwpmc_hooks.h" 424da0d332SPeter Wemm #include "opt_sched.h" 439923b511SScott Long 4435e6168fSJeff Roberson #include <sys/param.h> 4535e6168fSJeff Roberson #include <sys/systm.h> 462c3490b1SMarcel Moolenaar #include <sys/kdb.h> 4735e6168fSJeff Roberson #include <sys/kernel.h> 4835e6168fSJeff Roberson #include <sys/ktr.h> 4935e6168fSJeff Roberson #include <sys/lock.h> 5035e6168fSJeff Roberson #include <sys/mutex.h> 5135e6168fSJeff Roberson #include <sys/proc.h> 52245f3abfSJeff Roberson #include <sys/resource.h> 539bacd788SJeff Roberson #include <sys/resourcevar.h> 5435e6168fSJeff Roberson #include <sys/sched.h> 5535e6168fSJeff Roberson #include <sys/smp.h> 5635e6168fSJeff Roberson #include <sys/sx.h> 5735e6168fSJeff Roberson #include <sys/sysctl.h> 5835e6168fSJeff Roberson #include <sys/sysproto.h> 59f5c157d9SJohn Baldwin #include <sys/turnstile.h> 603db720fdSDavid Xu #include <sys/umtx.h> 6135e6168fSJeff Roberson #include <sys/vmmeter.h> 6262fa74d9SJeff Roberson #include <sys/cpuset.h> 6335e6168fSJeff Roberson #ifdef KTRACE 6435e6168fSJeff Roberson #include <sys/uio.h> 6535e6168fSJeff Roberson #include <sys/ktrace.h> 6635e6168fSJeff Roberson #endif 6735e6168fSJeff Roberson 68ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 69ebccf1e3SJoseph Koshy #include <sys/pmckern.h> 70ebccf1e3SJoseph Koshy #endif 71ebccf1e3SJoseph Koshy 7235e6168fSJeff Roberson #include <machine/cpu.h> 7322bf7d9aSJeff Roberson #include <machine/smp.h> 7435e6168fSJeff Roberson 75cbdd62adSPeter Grehan #if !defined(__i386__) && !defined(__amd64__) && !defined(__powerpc__) && !defined(__arm__) 7602e2d6b4SJeff Roberson #error "This architecture is not currently compatible with ULE" 777a5e5e2aSJeff Roberson #endif 787a5e5e2aSJeff Roberson 79ae7a6b38SJeff Roberson #define KTR_ULE 0 8014618990SJeff Roberson 816b2f763fSJeff Roberson /* 82ae7a6b38SJeff Roberson * Thread scheduler specific section. All fields are protected 83ae7a6b38SJeff Roberson * by the thread lock. 84ed062c8dSJulian Elischer */ 85ad1e7d28SJulian Elischer struct td_sched { 86ae7a6b38SJeff Roberson TAILQ_ENTRY(td_sched) ts_procq; /* Run queue. */ 87ae7a6b38SJeff Roberson struct thread *ts_thread; /* Active associated thread. */ 88ae7a6b38SJeff Roberson struct runq *ts_runq; /* Run-queue we're queued on. */ 89ae7a6b38SJeff Roberson short ts_flags; /* TSF_* flags. */ 90ae7a6b38SJeff Roberson u_char ts_rqindex; /* Run queue index. */ 91ad1e7d28SJulian Elischer u_char ts_cpu; /* CPU that we have affinity for. */ 9273daf66fSJeff Roberson int ts_rltick; /* Real last tick, for affinity. */ 93ae7a6b38SJeff Roberson int ts_slice; /* Ticks of slice remaining. */ 94ae7a6b38SJeff Roberson u_int ts_slptime; /* Number of ticks we vol. slept */ 95ae7a6b38SJeff Roberson u_int ts_runtime; /* Number of ticks we were running */ 96ad1e7d28SJulian Elischer int ts_ltick; /* Last tick that we were running on */ 97ad1e7d28SJulian Elischer int ts_ftick; /* First tick that we were running on */ 98ad1e7d28SJulian Elischer int ts_ticks; /* Tick count */ 99ed062c8dSJulian Elischer }; 100ad1e7d28SJulian Elischer /* flags kept in ts_flags */ 1017b8bfa0dSJeff Roberson #define TSF_BOUND 0x0001 /* Thread can not migrate. */ 1027b8bfa0dSJeff Roberson #define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */ 10335e6168fSJeff Roberson 104ad1e7d28SJulian Elischer static struct td_sched td_sched0; 10535e6168fSJeff Roberson 10662fa74d9SJeff Roberson #define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0) 10762fa74d9SJeff Roberson #define THREAD_CAN_SCHED(td, cpu) \ 10862fa74d9SJeff Roberson CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask) 10962fa74d9SJeff Roberson 11035e6168fSJeff Roberson /* 111e7d50326SJeff Roberson * Cpu percentage computation macros and defines. 112e1f89c22SJeff Roberson * 113e7d50326SJeff Roberson * SCHED_TICK_SECS: Number of seconds to average the cpu usage across. 114e7d50326SJeff Roberson * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across. 1158ab80cf0SJeff Roberson * SCHED_TICK_MAX: Maximum number of ticks before scaling back. 116e7d50326SJeff Roberson * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results. 117e7d50326SJeff Roberson * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count. 118e7d50326SJeff Roberson * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks. 11935e6168fSJeff Roberson */ 120e7d50326SJeff Roberson #define SCHED_TICK_SECS 10 121e7d50326SJeff Roberson #define SCHED_TICK_TARG (hz * SCHED_TICK_SECS) 1228ab80cf0SJeff Roberson #define SCHED_TICK_MAX (SCHED_TICK_TARG + hz) 123e7d50326SJeff Roberson #define SCHED_TICK_SHIFT 10 124e7d50326SJeff Roberson #define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT) 125eddb4efaSJeff Roberson #define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz)) 12635e6168fSJeff Roberson 12735e6168fSJeff Roberson /* 128e7d50326SJeff Roberson * These macros determine priorities for non-interactive threads. They are 129e7d50326SJeff Roberson * assigned a priority based on their recent cpu utilization as expressed 130e7d50326SJeff Roberson * by the ratio of ticks to the tick total. NHALF priorities at the start 131e7d50326SJeff Roberson * and end of the MIN to MAX timeshare range are only reachable with negative 132e7d50326SJeff Roberson * or positive nice respectively. 133e7d50326SJeff Roberson * 134e7d50326SJeff Roberson * PRI_RANGE: Priority range for utilization dependent priorities. 135e7d50326SJeff Roberson * PRI_NRESV: Number of nice values. 136e7d50326SJeff Roberson * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total. 137e7d50326SJeff Roberson * PRI_NICE: Determines the part of the priority inherited from nice. 138e7d50326SJeff Roberson */ 139e7d50326SJeff Roberson #define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN) 140e7d50326SJeff Roberson #define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 141e7d50326SJeff Roberson #define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF) 142e7d50326SJeff Roberson #define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF) 143dda713dfSJeff Roberson #define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN) 144e7d50326SJeff Roberson #define SCHED_PRI_TICKS(ts) \ 145e7d50326SJeff Roberson (SCHED_TICK_HZ((ts)) / \ 1461e516cf5SJeff Roberson (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE)) 147e7d50326SJeff Roberson #define SCHED_PRI_NICE(nice) (nice) 148e7d50326SJeff Roberson 149e7d50326SJeff Roberson /* 150e7d50326SJeff Roberson * These determine the interactivity of a process. Interactivity differs from 151e7d50326SJeff Roberson * cpu utilization in that it expresses the voluntary time slept vs time ran 152e7d50326SJeff Roberson * while cpu utilization includes all time not running. This more accurately 153e7d50326SJeff Roberson * models the intent of the thread. 15435e6168fSJeff Roberson * 155407b0157SJeff Roberson * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 156407b0157SJeff Roberson * before throttling back. 157d322132cSJeff Roberson * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 158210491d3SJeff Roberson * INTERACT_MAX: Maximum interactivity value. Smaller is better. 159e1f89c22SJeff Roberson * INTERACT_THRESH: Threshhold for placement on the current runq. 16035e6168fSJeff Roberson */ 161e7d50326SJeff Roberson #define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT) 162e7d50326SJeff Roberson #define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT) 163210491d3SJeff Roberson #define SCHED_INTERACT_MAX (100) 164210491d3SJeff Roberson #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 1654c9612c6SJeff Roberson #define SCHED_INTERACT_THRESH (30) 166e1f89c22SJeff Roberson 16735e6168fSJeff Roberson /* 168e7d50326SJeff Roberson * tickincr: Converts a stathz tick into a hz domain scaled by 169e7d50326SJeff Roberson * the shift factor. Without the shift the error rate 170e7d50326SJeff Roberson * due to rounding would be unacceptably high. 171e7d50326SJeff Roberson * realstathz: stathz is sometimes 0 and run off of hz. 172e7d50326SJeff Roberson * sched_slice: Runtime of each thread before rescheduling. 173ae7a6b38SJeff Roberson * preempt_thresh: Priority threshold for preemption and remote IPIs. 17435e6168fSJeff Roberson */ 175e7d50326SJeff Roberson static int sched_interact = SCHED_INTERACT_THRESH; 176e7d50326SJeff Roberson static int realstathz; 177e7d50326SJeff Roberson static int tickincr; 17873daf66fSJeff Roberson static int sched_slice = 1; 17902e2d6b4SJeff Roberson #ifdef PREEMPTION 18002e2d6b4SJeff Roberson #ifdef FULL_PREEMPTION 18102e2d6b4SJeff Roberson static int preempt_thresh = PRI_MAX_IDLE; 18202e2d6b4SJeff Roberson #else 183ae7a6b38SJeff Roberson static int preempt_thresh = PRI_MIN_KERN; 18402e2d6b4SJeff Roberson #endif 18502e2d6b4SJeff Roberson #else 18602e2d6b4SJeff Roberson static int preempt_thresh = 0; 18702e2d6b4SJeff Roberson #endif 188c5aa6b58SJeff Roberson static int static_boost = 1; 189ae7a6b38SJeff Roberson 19035e6168fSJeff Roberson /* 191ae7a6b38SJeff Roberson * tdq - per processor runqs and statistics. All fields are protected by the 192ae7a6b38SJeff Roberson * tdq_lock. The load and lowpri may be accessed without to avoid excess 193ae7a6b38SJeff Roberson * locking in sched_pickcpu(); 19435e6168fSJeff Roberson */ 195ad1e7d28SJulian Elischer struct tdq { 19673daf66fSJeff Roberson /* Ordered to improve efficiency of cpu_search() and switch(). */ 19762fa74d9SJeff Roberson struct mtx tdq_lock; /* run queue lock. */ 19873daf66fSJeff Roberson struct cpu_group *tdq_cg; /* Pointer to cpu topology. */ 19973daf66fSJeff Roberson int tdq_load; /* Aggregate load. */ 20073daf66fSJeff Roberson int tdq_sysload; /* For loadavg, !ITHD load. */ 20173daf66fSJeff Roberson int tdq_transferable; /* Transferable thread count. */ 20273daf66fSJeff Roberson u_char tdq_lowpri; /* Lowest priority thread. */ 20373daf66fSJeff Roberson u_char tdq_ipipending; /* IPI pending. */ 20473daf66fSJeff Roberson u_char tdq_idx; /* Current insert index. */ 20573daf66fSJeff Roberson u_char tdq_ridx; /* Current removal index. */ 206e7d50326SJeff Roberson struct runq tdq_realtime; /* real-time run queue. */ 207ae7a6b38SJeff Roberson struct runq tdq_timeshare; /* timeshare run queue. */ 208ae7a6b38SJeff Roberson struct runq tdq_idle; /* Queue of IDLE threads. */ 20962fa74d9SJeff Roberson char tdq_name[sizeof("sched lock") + 6]; 210ae7a6b38SJeff Roberson } __aligned(64); 21135e6168fSJeff Roberson 2127b8bfa0dSJeff Roberson 21380f86c9fSJeff Roberson #ifdef SMP 21462fa74d9SJeff Roberson struct cpu_group *cpu_top; 2157b8bfa0dSJeff Roberson 21662fa74d9SJeff Roberson #define SCHED_AFFINITY_DEFAULT (max(1, hz / 1000)) 21762fa74d9SJeff Roberson #define SCHED_AFFINITY(ts, t) ((ts)->ts_rltick > ticks - ((t) * affinity)) 2187b8bfa0dSJeff Roberson 2197b8bfa0dSJeff Roberson /* 2207b8bfa0dSJeff Roberson * Run-time tunables. 2217b8bfa0dSJeff Roberson */ 22228994a58SJeff Roberson static int rebalance = 1; 2237fcf154aSJeff Roberson static int balance_interval = 128; /* Default set in sched_initticks(). */ 2247b8bfa0dSJeff Roberson static int affinity; 2257fcf154aSJeff Roberson static int steal_htt = 1; 22628994a58SJeff Roberson static int steal_idle = 1; 22728994a58SJeff Roberson static int steal_thresh = 2; 22880f86c9fSJeff Roberson 22935e6168fSJeff Roberson /* 230d2ad694cSJeff Roberson * One thread queue per processor. 23135e6168fSJeff Roberson */ 232ad1e7d28SJulian Elischer static struct tdq tdq_cpu[MAXCPU]; 2337fcf154aSJeff Roberson static struct tdq *balance_tdq; 2347fcf154aSJeff Roberson static int balance_ticks; 235dc03363dSJeff Roberson 236ad1e7d28SJulian Elischer #define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)]) 237ad1e7d28SJulian Elischer #define TDQ_CPU(x) (&tdq_cpu[(x)]) 238c47f202bSJeff Roberson #define TDQ_ID(x) ((int)((x) - tdq_cpu)) 23980f86c9fSJeff Roberson #else /* !SMP */ 240ad1e7d28SJulian Elischer static struct tdq tdq_cpu; 241dc03363dSJeff Roberson 24236b36916SJeff Roberson #define TDQ_ID(x) (0) 243ad1e7d28SJulian Elischer #define TDQ_SELF() (&tdq_cpu) 244ad1e7d28SJulian Elischer #define TDQ_CPU(x) (&tdq_cpu) 2450a016a05SJeff Roberson #endif 24635e6168fSJeff Roberson 247ae7a6b38SJeff Roberson #define TDQ_LOCK_ASSERT(t, type) mtx_assert(TDQ_LOCKPTR((t)), (type)) 248ae7a6b38SJeff Roberson #define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t))) 249ae7a6b38SJeff Roberson #define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f)) 250ae7a6b38SJeff Roberson #define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t))) 25162fa74d9SJeff Roberson #define TDQ_LOCKPTR(t) (&(t)->tdq_lock) 252ae7a6b38SJeff Roberson 2538460a577SJohn Birrell static void sched_priority(struct thread *); 25421381d1bSJeff Roberson static void sched_thread_priority(struct thread *, u_char); 2558460a577SJohn Birrell static int sched_interact_score(struct thread *); 2568460a577SJohn Birrell static void sched_interact_update(struct thread *); 2578460a577SJohn Birrell static void sched_interact_fork(struct thread *); 258ad1e7d28SJulian Elischer static void sched_pctcpu_update(struct td_sched *); 25935e6168fSJeff Roberson 2605d7ef00cSJeff Roberson /* Operations on per processor queues */ 261ad1e7d28SJulian Elischer static struct td_sched * tdq_choose(struct tdq *); 262ad1e7d28SJulian Elischer static void tdq_setup(struct tdq *); 263ad1e7d28SJulian Elischer static void tdq_load_add(struct tdq *, struct td_sched *); 264ad1e7d28SJulian Elischer static void tdq_load_rem(struct tdq *, struct td_sched *); 265ad1e7d28SJulian Elischer static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int); 266ad1e7d28SJulian Elischer static __inline void tdq_runq_rem(struct tdq *, struct td_sched *); 267ff256d9cSJeff Roberson static inline int sched_shouldpreempt(int, int, int); 268ad1e7d28SJulian Elischer void tdq_print(int cpu); 269e7d50326SJeff Roberson static void runq_print(struct runq *rq); 270ae7a6b38SJeff Roberson static void tdq_add(struct tdq *, struct thread *, int); 2715d7ef00cSJeff Roberson #ifdef SMP 27262fa74d9SJeff Roberson static int tdq_move(struct tdq *, struct tdq *); 273ad1e7d28SJulian Elischer static int tdq_idled(struct tdq *); 274ff256d9cSJeff Roberson static void tdq_notify(struct tdq *, struct td_sched *); 27562fa74d9SJeff Roberson static struct td_sched *tdq_steal(struct tdq *, int); 27662fa74d9SJeff Roberson static struct td_sched *runq_steal(struct runq *, int); 277ae7a6b38SJeff Roberson static int sched_pickcpu(struct td_sched *, int); 2787fcf154aSJeff Roberson static void sched_balance(void); 27962fa74d9SJeff Roberson static int sched_balance_pair(struct tdq *, struct tdq *); 280ae7a6b38SJeff Roberson static inline struct tdq *sched_setcpu(struct td_sched *, int, int); 281ae7a6b38SJeff Roberson static inline struct mtx *thread_block_switch(struct thread *); 282ae7a6b38SJeff Roberson static inline void thread_unblock_switch(struct thread *, struct mtx *); 283c47f202bSJeff Roberson static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int); 2845d7ef00cSJeff Roberson #endif 2855d7ef00cSJeff Roberson 286e7d50326SJeff Roberson static void sched_setup(void *dummy); 287237fdd78SRobert Watson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL); 288e7d50326SJeff Roberson 289e7d50326SJeff Roberson static void sched_initticks(void *dummy); 290237fdd78SRobert Watson SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, 291237fdd78SRobert Watson NULL); 292e7d50326SJeff Roberson 293ae7a6b38SJeff Roberson /* 294ae7a6b38SJeff Roberson * Print the threads waiting on a run-queue. 295ae7a6b38SJeff Roberson */ 296e7d50326SJeff Roberson static void 297e7d50326SJeff Roberson runq_print(struct runq *rq) 298e7d50326SJeff Roberson { 299e7d50326SJeff Roberson struct rqhead *rqh; 300e7d50326SJeff Roberson struct td_sched *ts; 301e7d50326SJeff Roberson int pri; 302e7d50326SJeff Roberson int j; 303e7d50326SJeff Roberson int i; 304e7d50326SJeff Roberson 305e7d50326SJeff Roberson for (i = 0; i < RQB_LEN; i++) { 306e7d50326SJeff Roberson printf("\t\trunq bits %d 0x%zx\n", 307e7d50326SJeff Roberson i, rq->rq_status.rqb_bits[i]); 308e7d50326SJeff Roberson for (j = 0; j < RQB_BPW; j++) 309e7d50326SJeff Roberson if (rq->rq_status.rqb_bits[i] & (1ul << j)) { 310e7d50326SJeff Roberson pri = j + (i << RQB_L2BPW); 311e7d50326SJeff Roberson rqh = &rq->rq_queues[pri]; 312e7d50326SJeff Roberson TAILQ_FOREACH(ts, rqh, ts_procq) { 313e7d50326SJeff Roberson printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n", 314431f8906SJulian Elischer ts->ts_thread, ts->ts_thread->td_name, ts->ts_thread->td_priority, ts->ts_rqindex, pri); 315e7d50326SJeff Roberson } 316e7d50326SJeff Roberson } 317e7d50326SJeff Roberson } 318e7d50326SJeff Roberson } 319e7d50326SJeff Roberson 320ae7a6b38SJeff Roberson /* 321ae7a6b38SJeff Roberson * Print the status of a per-cpu thread queue. Should be a ddb show cmd. 322ae7a6b38SJeff Roberson */ 32315dc847eSJeff Roberson void 324ad1e7d28SJulian Elischer tdq_print(int cpu) 32515dc847eSJeff Roberson { 326ad1e7d28SJulian Elischer struct tdq *tdq; 32715dc847eSJeff Roberson 328ad1e7d28SJulian Elischer tdq = TDQ_CPU(cpu); 32915dc847eSJeff Roberson 330c47f202bSJeff Roberson printf("tdq %d:\n", TDQ_ID(tdq)); 33162fa74d9SJeff Roberson printf("\tlock %p\n", TDQ_LOCKPTR(tdq)); 33262fa74d9SJeff Roberson printf("\tLock name: %s\n", tdq->tdq_name); 333d2ad694cSJeff Roberson printf("\tload: %d\n", tdq->tdq_load); 334e7d50326SJeff Roberson printf("\ttimeshare idx: %d\n", tdq->tdq_idx); 3353f872f85SJeff Roberson printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx); 336e7d50326SJeff Roberson printf("\trealtime runq:\n"); 337e7d50326SJeff Roberson runq_print(&tdq->tdq_realtime); 338e7d50326SJeff Roberson printf("\ttimeshare runq:\n"); 339e7d50326SJeff Roberson runq_print(&tdq->tdq_timeshare); 340e7d50326SJeff Roberson printf("\tidle runq:\n"); 341e7d50326SJeff Roberson runq_print(&tdq->tdq_idle); 342d2ad694cSJeff Roberson printf("\tload transferable: %d\n", tdq->tdq_transferable); 343ae7a6b38SJeff Roberson printf("\tlowest priority: %d\n", tdq->tdq_lowpri); 34415dc847eSJeff Roberson } 34515dc847eSJeff Roberson 346ff256d9cSJeff Roberson static inline int 347ff256d9cSJeff Roberson sched_shouldpreempt(int pri, int cpri, int remote) 348ff256d9cSJeff Roberson { 349ff256d9cSJeff Roberson /* 350ff256d9cSJeff Roberson * If the new priority is not better than the current priority there is 351ff256d9cSJeff Roberson * nothing to do. 352ff256d9cSJeff Roberson */ 353ff256d9cSJeff Roberson if (pri >= cpri) 354ff256d9cSJeff Roberson return (0); 355ff256d9cSJeff Roberson /* 356ff256d9cSJeff Roberson * Always preempt idle. 357ff256d9cSJeff Roberson */ 358ff256d9cSJeff Roberson if (cpri >= PRI_MIN_IDLE) 359ff256d9cSJeff Roberson return (1); 360ff256d9cSJeff Roberson /* 361ff256d9cSJeff Roberson * If preemption is disabled don't preempt others. 362ff256d9cSJeff Roberson */ 363ff256d9cSJeff Roberson if (preempt_thresh == 0) 364ff256d9cSJeff Roberson return (0); 365ff256d9cSJeff Roberson /* 366ff256d9cSJeff Roberson * Preempt if we exceed the threshold. 367ff256d9cSJeff Roberson */ 368ff256d9cSJeff Roberson if (pri <= preempt_thresh) 369ff256d9cSJeff Roberson return (1); 370ff256d9cSJeff Roberson /* 371ff256d9cSJeff Roberson * If we're realtime or better and there is timeshare or worse running 372ff256d9cSJeff Roberson * preempt only remote processors. 373ff256d9cSJeff Roberson */ 374ff256d9cSJeff Roberson if (remote && pri <= PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME) 375ff256d9cSJeff Roberson return (1); 376ff256d9cSJeff Roberson return (0); 377ff256d9cSJeff Roberson } 378ff256d9cSJeff Roberson 379ae7a6b38SJeff Roberson #define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS) 380ae7a6b38SJeff Roberson /* 381ae7a6b38SJeff Roberson * Add a thread to the actual run-queue. Keeps transferable counts up to 382ae7a6b38SJeff Roberson * date with what is actually on the run-queue. Selects the correct 383ae7a6b38SJeff Roberson * queue position for timeshare threads. 384ae7a6b38SJeff Roberson */ 385155b9987SJeff Roberson static __inline void 386ad1e7d28SJulian Elischer tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags) 387155b9987SJeff Roberson { 388c143ac21SJeff Roberson u_char pri; 389c143ac21SJeff Roberson 390ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 391ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 39273daf66fSJeff Roberson 39373daf66fSJeff Roberson TD_SET_RUNQ(ts->ts_thread); 394e7d50326SJeff Roberson if (THREAD_CAN_MIGRATE(ts->ts_thread)) { 395d2ad694cSJeff Roberson tdq->tdq_transferable++; 396ad1e7d28SJulian Elischer ts->ts_flags |= TSF_XFERABLE; 39780f86c9fSJeff Roberson } 398e7d50326SJeff Roberson pri = ts->ts_thread->td_priority; 399c143ac21SJeff Roberson if (pri <= PRI_MAX_REALTIME) { 400c143ac21SJeff Roberson ts->ts_runq = &tdq->tdq_realtime; 401c143ac21SJeff Roberson } else if (pri <= PRI_MAX_TIMESHARE) { 402c143ac21SJeff Roberson ts->ts_runq = &tdq->tdq_timeshare; 403e7d50326SJeff Roberson KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE, 404e7d50326SJeff Roberson ("Invalid priority %d on timeshare runq", pri)); 405e7d50326SJeff Roberson /* 406e7d50326SJeff Roberson * This queue contains only priorities between MIN and MAX 407e7d50326SJeff Roberson * realtime. Use the whole queue to represent these values. 408e7d50326SJeff Roberson */ 409c47f202bSJeff Roberson if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) { 410e7d50326SJeff Roberson pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ; 411e7d50326SJeff Roberson pri = (pri + tdq->tdq_idx) % RQ_NQS; 4123f872f85SJeff Roberson /* 4133f872f85SJeff Roberson * This effectively shortens the queue by one so we 4143f872f85SJeff Roberson * can have a one slot difference between idx and 4153f872f85SJeff Roberson * ridx while we wait for threads to drain. 4163f872f85SJeff Roberson */ 4173f872f85SJeff Roberson if (tdq->tdq_ridx != tdq->tdq_idx && 4183f872f85SJeff Roberson pri == tdq->tdq_ridx) 4194499aff6SJeff Roberson pri = (unsigned char)(pri - 1) % RQ_NQS; 420e7d50326SJeff Roberson } else 4213f872f85SJeff Roberson pri = tdq->tdq_ridx; 422e7d50326SJeff Roberson runq_add_pri(ts->ts_runq, ts, pri, flags); 423c143ac21SJeff Roberson return; 424e7d50326SJeff Roberson } else 42573daf66fSJeff Roberson ts->ts_runq = &tdq->tdq_idle; 426c143ac21SJeff Roberson runq_add(ts->ts_runq, ts, flags); 42773daf66fSJeff Roberson } 42873daf66fSJeff Roberson 42973daf66fSJeff Roberson /* 430ae7a6b38SJeff Roberson * Remove a thread from a run-queue. This typically happens when a thread 431ae7a6b38SJeff Roberson * is selected to run. Running threads are not on the queue and the 432ae7a6b38SJeff Roberson * transferable count does not reflect them. 433ae7a6b38SJeff Roberson */ 434155b9987SJeff Roberson static __inline void 435ad1e7d28SJulian Elischer tdq_runq_rem(struct tdq *tdq, struct td_sched *ts) 436155b9987SJeff Roberson { 437ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 438ae7a6b38SJeff Roberson KASSERT(ts->ts_runq != NULL, 439ae7a6b38SJeff Roberson ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread)); 440ad1e7d28SJulian Elischer if (ts->ts_flags & TSF_XFERABLE) { 441d2ad694cSJeff Roberson tdq->tdq_transferable--; 442ad1e7d28SJulian Elischer ts->ts_flags &= ~TSF_XFERABLE; 44380f86c9fSJeff Roberson } 4443f872f85SJeff Roberson if (ts->ts_runq == &tdq->tdq_timeshare) { 4453f872f85SJeff Roberson if (tdq->tdq_idx != tdq->tdq_ridx) 4463f872f85SJeff Roberson runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx); 447e7d50326SJeff Roberson else 4483f872f85SJeff Roberson runq_remove_idx(ts->ts_runq, ts, NULL); 4493f872f85SJeff Roberson } else 450ad1e7d28SJulian Elischer runq_remove(ts->ts_runq, ts); 451155b9987SJeff Roberson } 452155b9987SJeff Roberson 453ae7a6b38SJeff Roberson /* 454ae7a6b38SJeff Roberson * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load 455ae7a6b38SJeff Roberson * for this thread to the referenced thread queue. 456ae7a6b38SJeff Roberson */ 457a8949de2SJeff Roberson static void 458ad1e7d28SJulian Elischer tdq_load_add(struct tdq *tdq, struct td_sched *ts) 4595d7ef00cSJeff Roberson { 460ef1134c9SJeff Roberson int class; 461ae7a6b38SJeff Roberson 462ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 463ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 464ad1e7d28SJulian Elischer class = PRI_BASE(ts->ts_thread->td_pri_class); 465d2ad694cSJeff Roberson tdq->tdq_load++; 466c47f202bSJeff Roberson CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load); 4677b8bfa0dSJeff Roberson if (class != PRI_ITHD && 4687b8bfa0dSJeff Roberson (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 469d2ad694cSJeff Roberson tdq->tdq_sysload++; 4705d7ef00cSJeff Roberson } 47115dc847eSJeff Roberson 472ae7a6b38SJeff Roberson /* 473ae7a6b38SJeff Roberson * Remove the load from a thread that is transitioning to a sleep state or 474ae7a6b38SJeff Roberson * exiting. 475ae7a6b38SJeff Roberson */ 476a8949de2SJeff Roberson static void 477ad1e7d28SJulian Elischer tdq_load_rem(struct tdq *tdq, struct td_sched *ts) 4785d7ef00cSJeff Roberson { 479ef1134c9SJeff Roberson int class; 480ae7a6b38SJeff Roberson 481ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 482ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 483ad1e7d28SJulian Elischer class = PRI_BASE(ts->ts_thread->td_pri_class); 4847b8bfa0dSJeff Roberson if (class != PRI_ITHD && 4857b8bfa0dSJeff Roberson (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 486d2ad694cSJeff Roberson tdq->tdq_sysload--; 487ae7a6b38SJeff Roberson KASSERT(tdq->tdq_load != 0, 488c47f202bSJeff Roberson ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq))); 489d2ad694cSJeff Roberson tdq->tdq_load--; 490d2ad694cSJeff Roberson CTR1(KTR_SCHED, "load: %d", tdq->tdq_load); 49115dc847eSJeff Roberson } 49215dc847eSJeff Roberson 493356500a3SJeff Roberson /* 49462fa74d9SJeff Roberson * Set lowpri to its exact value by searching the run-queue and 49562fa74d9SJeff Roberson * evaluating curthread. curthread may be passed as an optimization. 496356500a3SJeff Roberson */ 49722bf7d9aSJeff Roberson static void 49862fa74d9SJeff Roberson tdq_setlowpri(struct tdq *tdq, struct thread *ctd) 49962fa74d9SJeff Roberson { 50062fa74d9SJeff Roberson struct td_sched *ts; 50162fa74d9SJeff Roberson struct thread *td; 50262fa74d9SJeff Roberson 50362fa74d9SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 50462fa74d9SJeff Roberson if (ctd == NULL) 50562fa74d9SJeff Roberson ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread; 50662fa74d9SJeff Roberson ts = tdq_choose(tdq); 50762fa74d9SJeff Roberson if (ts) 50862fa74d9SJeff Roberson td = ts->ts_thread; 50962fa74d9SJeff Roberson if (ts == NULL || td->td_priority > ctd->td_priority) 51062fa74d9SJeff Roberson tdq->tdq_lowpri = ctd->td_priority; 51162fa74d9SJeff Roberson else 51262fa74d9SJeff Roberson tdq->tdq_lowpri = td->td_priority; 51362fa74d9SJeff Roberson } 51462fa74d9SJeff Roberson 51562fa74d9SJeff Roberson #ifdef SMP 51662fa74d9SJeff Roberson struct cpu_search { 51762fa74d9SJeff Roberson cpumask_t cs_mask; /* Mask of valid cpus. */ 51862fa74d9SJeff Roberson u_int cs_load; 51962fa74d9SJeff Roberson u_int cs_cpu; 52062fa74d9SJeff Roberson int cs_limit; /* Min priority for low min load for high. */ 52162fa74d9SJeff Roberson }; 52262fa74d9SJeff Roberson 52362fa74d9SJeff Roberson #define CPU_SEARCH_LOWEST 0x1 52462fa74d9SJeff Roberson #define CPU_SEARCH_HIGHEST 0x2 52562fa74d9SJeff Roberson #define CPU_SEARCH_BOTH (CPU_SEARCH_LOWEST|CPU_SEARCH_HIGHEST) 52662fa74d9SJeff Roberson 52762fa74d9SJeff Roberson #define CPUMASK_FOREACH(cpu, mask) \ 52862fa74d9SJeff Roberson for ((cpu) = 0; (cpu) < sizeof((mask)) * 8; (cpu)++) \ 52962fa74d9SJeff Roberson if ((mask) & 1 << (cpu)) 53062fa74d9SJeff Roberson 531d628fbfaSJohn Baldwin static __inline int cpu_search(struct cpu_group *cg, struct cpu_search *low, 53262fa74d9SJeff Roberson struct cpu_search *high, const int match); 53362fa74d9SJeff Roberson int cpu_search_lowest(struct cpu_group *cg, struct cpu_search *low); 53462fa74d9SJeff Roberson int cpu_search_highest(struct cpu_group *cg, struct cpu_search *high); 53562fa74d9SJeff Roberson int cpu_search_both(struct cpu_group *cg, struct cpu_search *low, 53662fa74d9SJeff Roberson struct cpu_search *high); 53762fa74d9SJeff Roberson 53862fa74d9SJeff Roberson /* 53962fa74d9SJeff Roberson * This routine compares according to the match argument and should be 54062fa74d9SJeff Roberson * reduced in actual instantiations via constant propagation and dead code 54162fa74d9SJeff Roberson * elimination. 54262fa74d9SJeff Roberson */ 54362fa74d9SJeff Roberson static __inline int 54462fa74d9SJeff Roberson cpu_compare(int cpu, struct cpu_search *low, struct cpu_search *high, 54562fa74d9SJeff Roberson const int match) 54662fa74d9SJeff Roberson { 54762fa74d9SJeff Roberson struct tdq *tdq; 54862fa74d9SJeff Roberson 54962fa74d9SJeff Roberson tdq = TDQ_CPU(cpu); 55062fa74d9SJeff Roberson if (match & CPU_SEARCH_LOWEST) 55162fa74d9SJeff Roberson if (low->cs_mask & (1 << cpu) && 55262fa74d9SJeff Roberson tdq->tdq_load < low->cs_load && 55362fa74d9SJeff Roberson tdq->tdq_lowpri > low->cs_limit) { 55462fa74d9SJeff Roberson low->cs_cpu = cpu; 55562fa74d9SJeff Roberson low->cs_load = tdq->tdq_load; 55662fa74d9SJeff Roberson } 55762fa74d9SJeff Roberson if (match & CPU_SEARCH_HIGHEST) 55862fa74d9SJeff Roberson if (high->cs_mask & (1 << cpu) && 55962fa74d9SJeff Roberson tdq->tdq_load >= high->cs_limit && 56062fa74d9SJeff Roberson tdq->tdq_load > high->cs_load && 56162fa74d9SJeff Roberson tdq->tdq_transferable) { 56262fa74d9SJeff Roberson high->cs_cpu = cpu; 56362fa74d9SJeff Roberson high->cs_load = tdq->tdq_load; 56462fa74d9SJeff Roberson } 56562fa74d9SJeff Roberson return (tdq->tdq_load); 56662fa74d9SJeff Roberson } 56762fa74d9SJeff Roberson 56862fa74d9SJeff Roberson /* 56962fa74d9SJeff Roberson * Search the tree of cpu_groups for the lowest or highest loaded cpu 57062fa74d9SJeff Roberson * according to the match argument. This routine actually compares the 57162fa74d9SJeff Roberson * load on all paths through the tree and finds the least loaded cpu on 57262fa74d9SJeff Roberson * the least loaded path, which may differ from the least loaded cpu in 57362fa74d9SJeff Roberson * the system. This balances work among caches and busses. 57462fa74d9SJeff Roberson * 57562fa74d9SJeff Roberson * This inline is instantiated in three forms below using constants for the 57662fa74d9SJeff Roberson * match argument. It is reduced to the minimum set for each case. It is 57762fa74d9SJeff Roberson * also recursive to the depth of the tree. 57862fa74d9SJeff Roberson */ 579d628fbfaSJohn Baldwin static __inline int 58062fa74d9SJeff Roberson cpu_search(struct cpu_group *cg, struct cpu_search *low, 58162fa74d9SJeff Roberson struct cpu_search *high, const int match) 58262fa74d9SJeff Roberson { 58362fa74d9SJeff Roberson int total; 58462fa74d9SJeff Roberson 58562fa74d9SJeff Roberson total = 0; 58662fa74d9SJeff Roberson if (cg->cg_children) { 58762fa74d9SJeff Roberson struct cpu_search lgroup; 58862fa74d9SJeff Roberson struct cpu_search hgroup; 58962fa74d9SJeff Roberson struct cpu_group *child; 59062fa74d9SJeff Roberson u_int lload; 59162fa74d9SJeff Roberson int hload; 59262fa74d9SJeff Roberson int load; 59362fa74d9SJeff Roberson int i; 59462fa74d9SJeff Roberson 59562fa74d9SJeff Roberson lload = -1; 59662fa74d9SJeff Roberson hload = -1; 59762fa74d9SJeff Roberson for (i = 0; i < cg->cg_children; i++) { 59862fa74d9SJeff Roberson child = &cg->cg_child[i]; 59962fa74d9SJeff Roberson if (match & CPU_SEARCH_LOWEST) { 60062fa74d9SJeff Roberson lgroup = *low; 60162fa74d9SJeff Roberson lgroup.cs_load = -1; 60262fa74d9SJeff Roberson } 60362fa74d9SJeff Roberson if (match & CPU_SEARCH_HIGHEST) { 60462fa74d9SJeff Roberson hgroup = *high; 60562fa74d9SJeff Roberson lgroup.cs_load = 0; 60662fa74d9SJeff Roberson } 60762fa74d9SJeff Roberson switch (match) { 60862fa74d9SJeff Roberson case CPU_SEARCH_LOWEST: 60962fa74d9SJeff Roberson load = cpu_search_lowest(child, &lgroup); 61062fa74d9SJeff Roberson break; 61162fa74d9SJeff Roberson case CPU_SEARCH_HIGHEST: 61262fa74d9SJeff Roberson load = cpu_search_highest(child, &hgroup); 61362fa74d9SJeff Roberson break; 61462fa74d9SJeff Roberson case CPU_SEARCH_BOTH: 61562fa74d9SJeff Roberson load = cpu_search_both(child, &lgroup, &hgroup); 61662fa74d9SJeff Roberson break; 61762fa74d9SJeff Roberson } 61862fa74d9SJeff Roberson total += load; 61962fa74d9SJeff Roberson if (match & CPU_SEARCH_LOWEST) 62062fa74d9SJeff Roberson if (load < lload || low->cs_cpu == -1) { 62162fa74d9SJeff Roberson *low = lgroup; 62262fa74d9SJeff Roberson lload = load; 62362fa74d9SJeff Roberson } 62462fa74d9SJeff Roberson if (match & CPU_SEARCH_HIGHEST) 62562fa74d9SJeff Roberson if (load > hload || high->cs_cpu == -1) { 62662fa74d9SJeff Roberson hload = load; 62762fa74d9SJeff Roberson *high = hgroup; 62862fa74d9SJeff Roberson } 62962fa74d9SJeff Roberson } 63062fa74d9SJeff Roberson } else { 63162fa74d9SJeff Roberson int cpu; 63262fa74d9SJeff Roberson 63362fa74d9SJeff Roberson CPUMASK_FOREACH(cpu, cg->cg_mask) 63462fa74d9SJeff Roberson total += cpu_compare(cpu, low, high, match); 63562fa74d9SJeff Roberson } 63662fa74d9SJeff Roberson return (total); 63762fa74d9SJeff Roberson } 63862fa74d9SJeff Roberson 63962fa74d9SJeff Roberson /* 64062fa74d9SJeff Roberson * cpu_search instantiations must pass constants to maintain the inline 64162fa74d9SJeff Roberson * optimization. 64262fa74d9SJeff Roberson */ 64362fa74d9SJeff Roberson int 64462fa74d9SJeff Roberson cpu_search_lowest(struct cpu_group *cg, struct cpu_search *low) 64562fa74d9SJeff Roberson { 64662fa74d9SJeff Roberson return cpu_search(cg, low, NULL, CPU_SEARCH_LOWEST); 64762fa74d9SJeff Roberson } 64862fa74d9SJeff Roberson 64962fa74d9SJeff Roberson int 65062fa74d9SJeff Roberson cpu_search_highest(struct cpu_group *cg, struct cpu_search *high) 65162fa74d9SJeff Roberson { 65262fa74d9SJeff Roberson return cpu_search(cg, NULL, high, CPU_SEARCH_HIGHEST); 65362fa74d9SJeff Roberson } 65462fa74d9SJeff Roberson 65562fa74d9SJeff Roberson int 65662fa74d9SJeff Roberson cpu_search_both(struct cpu_group *cg, struct cpu_search *low, 65762fa74d9SJeff Roberson struct cpu_search *high) 65862fa74d9SJeff Roberson { 65962fa74d9SJeff Roberson return cpu_search(cg, low, high, CPU_SEARCH_BOTH); 66062fa74d9SJeff Roberson } 66162fa74d9SJeff Roberson 66262fa74d9SJeff Roberson /* 66362fa74d9SJeff Roberson * Find the cpu with the least load via the least loaded path that has a 66462fa74d9SJeff Roberson * lowpri greater than pri pri. A pri of -1 indicates any priority is 66562fa74d9SJeff Roberson * acceptable. 66662fa74d9SJeff Roberson */ 66762fa74d9SJeff Roberson static inline int 66862fa74d9SJeff Roberson sched_lowest(struct cpu_group *cg, cpumask_t mask, int pri) 66962fa74d9SJeff Roberson { 67062fa74d9SJeff Roberson struct cpu_search low; 67162fa74d9SJeff Roberson 67262fa74d9SJeff Roberson low.cs_cpu = -1; 67362fa74d9SJeff Roberson low.cs_load = -1; 67462fa74d9SJeff Roberson low.cs_mask = mask; 67562fa74d9SJeff Roberson low.cs_limit = pri; 67662fa74d9SJeff Roberson cpu_search_lowest(cg, &low); 67762fa74d9SJeff Roberson return low.cs_cpu; 67862fa74d9SJeff Roberson } 67962fa74d9SJeff Roberson 68062fa74d9SJeff Roberson /* 68162fa74d9SJeff Roberson * Find the cpu with the highest load via the highest loaded path. 68262fa74d9SJeff Roberson */ 68362fa74d9SJeff Roberson static inline int 68462fa74d9SJeff Roberson sched_highest(struct cpu_group *cg, cpumask_t mask, int minload) 68562fa74d9SJeff Roberson { 68662fa74d9SJeff Roberson struct cpu_search high; 68762fa74d9SJeff Roberson 68862fa74d9SJeff Roberson high.cs_cpu = -1; 68962fa74d9SJeff Roberson high.cs_load = 0; 69062fa74d9SJeff Roberson high.cs_mask = mask; 69162fa74d9SJeff Roberson high.cs_limit = minload; 69262fa74d9SJeff Roberson cpu_search_highest(cg, &high); 69362fa74d9SJeff Roberson return high.cs_cpu; 69462fa74d9SJeff Roberson } 69562fa74d9SJeff Roberson 69662fa74d9SJeff Roberson /* 69762fa74d9SJeff Roberson * Simultaneously find the highest and lowest loaded cpu reachable via 69862fa74d9SJeff Roberson * cg. 69962fa74d9SJeff Roberson */ 70062fa74d9SJeff Roberson static inline void 70162fa74d9SJeff Roberson sched_both(struct cpu_group *cg, cpumask_t mask, int *lowcpu, int *highcpu) 70262fa74d9SJeff Roberson { 70362fa74d9SJeff Roberson struct cpu_search high; 70462fa74d9SJeff Roberson struct cpu_search low; 70562fa74d9SJeff Roberson 70662fa74d9SJeff Roberson low.cs_cpu = -1; 70762fa74d9SJeff Roberson low.cs_limit = -1; 70862fa74d9SJeff Roberson low.cs_load = -1; 70962fa74d9SJeff Roberson low.cs_mask = mask; 71062fa74d9SJeff Roberson high.cs_load = 0; 71162fa74d9SJeff Roberson high.cs_cpu = -1; 71262fa74d9SJeff Roberson high.cs_limit = -1; 71362fa74d9SJeff Roberson high.cs_mask = mask; 71462fa74d9SJeff Roberson cpu_search_both(cg, &low, &high); 71562fa74d9SJeff Roberson *lowcpu = low.cs_cpu; 71662fa74d9SJeff Roberson *highcpu = high.cs_cpu; 71762fa74d9SJeff Roberson return; 71862fa74d9SJeff Roberson } 71962fa74d9SJeff Roberson 72062fa74d9SJeff Roberson static void 72162fa74d9SJeff Roberson sched_balance_group(struct cpu_group *cg) 72262fa74d9SJeff Roberson { 72362fa74d9SJeff Roberson cpumask_t mask; 72462fa74d9SJeff Roberson int high; 72562fa74d9SJeff Roberson int low; 72662fa74d9SJeff Roberson int i; 72762fa74d9SJeff Roberson 72862fa74d9SJeff Roberson mask = -1; 72962fa74d9SJeff Roberson for (;;) { 73062fa74d9SJeff Roberson sched_both(cg, mask, &low, &high); 73162fa74d9SJeff Roberson if (low == high || low == -1 || high == -1) 73262fa74d9SJeff Roberson break; 73362fa74d9SJeff Roberson if (sched_balance_pair(TDQ_CPU(high), TDQ_CPU(low))) 73462fa74d9SJeff Roberson break; 73562fa74d9SJeff Roberson /* 73662fa74d9SJeff Roberson * If we failed to move any threads determine which cpu 73762fa74d9SJeff Roberson * to kick out of the set and try again. 73862fa74d9SJeff Roberson */ 73962fa74d9SJeff Roberson if (TDQ_CPU(high)->tdq_transferable == 0) 74062fa74d9SJeff Roberson mask &= ~(1 << high); 74162fa74d9SJeff Roberson else 74262fa74d9SJeff Roberson mask &= ~(1 << low); 74362fa74d9SJeff Roberson } 74462fa74d9SJeff Roberson 74562fa74d9SJeff Roberson for (i = 0; i < cg->cg_children; i++) 74662fa74d9SJeff Roberson sched_balance_group(&cg->cg_child[i]); 74762fa74d9SJeff Roberson } 74862fa74d9SJeff Roberson 74962fa74d9SJeff Roberson static void 7507fcf154aSJeff Roberson sched_balance() 751356500a3SJeff Roberson { 7527fcf154aSJeff Roberson struct tdq *tdq; 753356500a3SJeff Roberson 7547fcf154aSJeff Roberson /* 7557fcf154aSJeff Roberson * Select a random time between .5 * balance_interval and 7567fcf154aSJeff Roberson * 1.5 * balance_interval. 7577fcf154aSJeff Roberson */ 7587fcf154aSJeff Roberson balance_ticks = max(balance_interval / 2, 1); 7597fcf154aSJeff Roberson balance_ticks += random() % balance_interval; 760ae7a6b38SJeff Roberson if (smp_started == 0 || rebalance == 0) 761598b368dSJeff Roberson return; 7627fcf154aSJeff Roberson tdq = TDQ_SELF(); 7637fcf154aSJeff Roberson TDQ_UNLOCK(tdq); 76462fa74d9SJeff Roberson sched_balance_group(cpu_top); 7657fcf154aSJeff Roberson TDQ_LOCK(tdq); 766cac77d04SJeff Roberson } 76786f8ae96SJeff Roberson 768ae7a6b38SJeff Roberson /* 769ae7a6b38SJeff Roberson * Lock two thread queues using their address to maintain lock order. 770ae7a6b38SJeff Roberson */ 771ae7a6b38SJeff Roberson static void 772ae7a6b38SJeff Roberson tdq_lock_pair(struct tdq *one, struct tdq *two) 773ae7a6b38SJeff Roberson { 774ae7a6b38SJeff Roberson if (one < two) { 775ae7a6b38SJeff Roberson TDQ_LOCK(one); 776ae7a6b38SJeff Roberson TDQ_LOCK_FLAGS(two, MTX_DUPOK); 777ae7a6b38SJeff Roberson } else { 778ae7a6b38SJeff Roberson TDQ_LOCK(two); 779ae7a6b38SJeff Roberson TDQ_LOCK_FLAGS(one, MTX_DUPOK); 780ae7a6b38SJeff Roberson } 781ae7a6b38SJeff Roberson } 782ae7a6b38SJeff Roberson 783ae7a6b38SJeff Roberson /* 7847fcf154aSJeff Roberson * Unlock two thread queues. Order is not important here. 7857fcf154aSJeff Roberson */ 7867fcf154aSJeff Roberson static void 7877fcf154aSJeff Roberson tdq_unlock_pair(struct tdq *one, struct tdq *two) 7887fcf154aSJeff Roberson { 7897fcf154aSJeff Roberson TDQ_UNLOCK(one); 7907fcf154aSJeff Roberson TDQ_UNLOCK(two); 7917fcf154aSJeff Roberson } 7927fcf154aSJeff Roberson 7937fcf154aSJeff Roberson /* 794ae7a6b38SJeff Roberson * Transfer load between two imbalanced thread queues. 795ae7a6b38SJeff Roberson */ 79662fa74d9SJeff Roberson static int 797ad1e7d28SJulian Elischer sched_balance_pair(struct tdq *high, struct tdq *low) 798cac77d04SJeff Roberson { 799cac77d04SJeff Roberson int transferable; 800cac77d04SJeff Roberson int high_load; 801cac77d04SJeff Roberson int low_load; 80262fa74d9SJeff Roberson int moved; 803cac77d04SJeff Roberson int move; 804cac77d04SJeff Roberson int diff; 805cac77d04SJeff Roberson int i; 806cac77d04SJeff Roberson 807ae7a6b38SJeff Roberson tdq_lock_pair(high, low); 808d2ad694cSJeff Roberson transferable = high->tdq_transferable; 809d2ad694cSJeff Roberson high_load = high->tdq_load; 810d2ad694cSJeff Roberson low_load = low->tdq_load; 81162fa74d9SJeff Roberson moved = 0; 812155b9987SJeff Roberson /* 813155b9987SJeff Roberson * Determine what the imbalance is and then adjust that to how many 814d2ad694cSJeff Roberson * threads we actually have to give up (transferable). 815155b9987SJeff Roberson */ 816ae7a6b38SJeff Roberson if (transferable != 0) { 817cac77d04SJeff Roberson diff = high_load - low_load; 818356500a3SJeff Roberson move = diff / 2; 819356500a3SJeff Roberson if (diff & 0x1) 820356500a3SJeff Roberson move++; 82180f86c9fSJeff Roberson move = min(move, transferable); 822356500a3SJeff Roberson for (i = 0; i < move; i++) 82362fa74d9SJeff Roberson moved += tdq_move(high, low); 824a5423ea3SJeff Roberson /* 825a5423ea3SJeff Roberson * IPI the target cpu to force it to reschedule with the new 826a5423ea3SJeff Roberson * workload. 827a5423ea3SJeff Roberson */ 828a5423ea3SJeff Roberson ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT); 829ae7a6b38SJeff Roberson } 8307fcf154aSJeff Roberson tdq_unlock_pair(high, low); 83162fa74d9SJeff Roberson return (moved); 832356500a3SJeff Roberson } 833356500a3SJeff Roberson 834ae7a6b38SJeff Roberson /* 835ae7a6b38SJeff Roberson * Move a thread from one thread queue to another. 836ae7a6b38SJeff Roberson */ 83762fa74d9SJeff Roberson static int 838ae7a6b38SJeff Roberson tdq_move(struct tdq *from, struct tdq *to) 839356500a3SJeff Roberson { 840ad1e7d28SJulian Elischer struct td_sched *ts; 841ae7a6b38SJeff Roberson struct thread *td; 842ae7a6b38SJeff Roberson struct tdq *tdq; 843ae7a6b38SJeff Roberson int cpu; 844356500a3SJeff Roberson 8457fcf154aSJeff Roberson TDQ_LOCK_ASSERT(from, MA_OWNED); 8467fcf154aSJeff Roberson TDQ_LOCK_ASSERT(to, MA_OWNED); 8477fcf154aSJeff Roberson 848ad1e7d28SJulian Elischer tdq = from; 849ae7a6b38SJeff Roberson cpu = TDQ_ID(to); 85062fa74d9SJeff Roberson ts = tdq_steal(tdq, cpu); 851ad1e7d28SJulian Elischer if (ts == NULL) 85262fa74d9SJeff Roberson return (0); 853ae7a6b38SJeff Roberson td = ts->ts_thread; 854ae7a6b38SJeff Roberson /* 855ae7a6b38SJeff Roberson * Although the run queue is locked the thread may be blocked. Lock 8567fcf154aSJeff Roberson * it to clear this and acquire the run-queue lock. 857ae7a6b38SJeff Roberson */ 858ae7a6b38SJeff Roberson thread_lock(td); 8597fcf154aSJeff Roberson /* Drop recursive lock on from acquired via thread_lock(). */ 860ae7a6b38SJeff Roberson TDQ_UNLOCK(from); 861ae7a6b38SJeff Roberson sched_rem(td); 8627b8bfa0dSJeff Roberson ts->ts_cpu = cpu; 863ae7a6b38SJeff Roberson td->td_lock = TDQ_LOCKPTR(to); 864ae7a6b38SJeff Roberson tdq_add(to, td, SRQ_YIELDING); 86562fa74d9SJeff Roberson return (1); 866356500a3SJeff Roberson } 86722bf7d9aSJeff Roberson 868ae7a6b38SJeff Roberson /* 869ae7a6b38SJeff Roberson * This tdq has idled. Try to steal a thread from another cpu and switch 870ae7a6b38SJeff Roberson * to it. 871ae7a6b38SJeff Roberson */ 87280f86c9fSJeff Roberson static int 873ad1e7d28SJulian Elischer tdq_idled(struct tdq *tdq) 87422bf7d9aSJeff Roberson { 87562fa74d9SJeff Roberson struct cpu_group *cg; 876ad1e7d28SJulian Elischer struct tdq *steal; 87762fa74d9SJeff Roberson cpumask_t mask; 87862fa74d9SJeff Roberson int thresh; 879ae7a6b38SJeff Roberson int cpu; 88080f86c9fSJeff Roberson 88188f530ccSJeff Roberson if (smp_started == 0 || steal_idle == 0) 88288f530ccSJeff Roberson return (1); 88362fa74d9SJeff Roberson mask = -1; 88462fa74d9SJeff Roberson mask &= ~PCPU_GET(cpumask); 88562fa74d9SJeff Roberson /* We don't want to be preempted while we're iterating. */ 886ae7a6b38SJeff Roberson spinlock_enter(); 88762fa74d9SJeff Roberson for (cg = tdq->tdq_cg; cg != NULL; ) { 88862fa74d9SJeff Roberson if ((cg->cg_flags & (CG_FLAG_HTT | CG_FLAG_THREAD)) == 0) 88962fa74d9SJeff Roberson thresh = steal_thresh; 89062fa74d9SJeff Roberson else 89162fa74d9SJeff Roberson thresh = 1; 89262fa74d9SJeff Roberson cpu = sched_highest(cg, mask, thresh); 89362fa74d9SJeff Roberson if (cpu == -1) { 89462fa74d9SJeff Roberson cg = cg->cg_parent; 89580f86c9fSJeff Roberson continue; 8967b8bfa0dSJeff Roberson } 8977b8bfa0dSJeff Roberson steal = TDQ_CPU(cpu); 89862fa74d9SJeff Roberson mask &= ~(1 << cpu); 8997fcf154aSJeff Roberson tdq_lock_pair(tdq, steal); 90062fa74d9SJeff Roberson if (steal->tdq_load < thresh || steal->tdq_transferable == 0) { 9017fcf154aSJeff Roberson tdq_unlock_pair(tdq, steal); 90262fa74d9SJeff Roberson continue; 90362fa74d9SJeff Roberson } 90462fa74d9SJeff Roberson /* 90562fa74d9SJeff Roberson * If a thread was added while interrupts were disabled don't 90662fa74d9SJeff Roberson * steal one here. If we fail to acquire one due to affinity 90762fa74d9SJeff Roberson * restrictions loop again with this cpu removed from the 90862fa74d9SJeff Roberson * set. 90962fa74d9SJeff Roberson */ 91062fa74d9SJeff Roberson if (tdq->tdq_load == 0 && tdq_move(steal, tdq) == 0) { 91162fa74d9SJeff Roberson tdq_unlock_pair(tdq, steal); 91262fa74d9SJeff Roberson continue; 91380f86c9fSJeff Roberson } 914ae7a6b38SJeff Roberson spinlock_exit(); 915ae7a6b38SJeff Roberson TDQ_UNLOCK(steal); 916ae7a6b38SJeff Roberson mi_switch(SW_VOL, NULL); 917ae7a6b38SJeff Roberson thread_unlock(curthread); 9187b8bfa0dSJeff Roberson 9197b8bfa0dSJeff Roberson return (0); 92022bf7d9aSJeff Roberson } 92162fa74d9SJeff Roberson spinlock_exit(); 92262fa74d9SJeff Roberson return (1); 92362fa74d9SJeff Roberson } 92422bf7d9aSJeff Roberson 925ae7a6b38SJeff Roberson /* 926ae7a6b38SJeff Roberson * Notify a remote cpu of new work. Sends an IPI if criteria are met. 927ae7a6b38SJeff Roberson */ 92822bf7d9aSJeff Roberson static void 929ff256d9cSJeff Roberson tdq_notify(struct tdq *tdq, struct td_sched *ts) 93022bf7d9aSJeff Roberson { 931fc3a97dcSJeff Roberson int cpri; 932fc3a97dcSJeff Roberson int pri; 9337b8bfa0dSJeff Roberson int cpu; 93422bf7d9aSJeff Roberson 935ff256d9cSJeff Roberson if (tdq->tdq_ipipending) 936ff256d9cSJeff Roberson return; 9377b8bfa0dSJeff Roberson cpu = ts->ts_cpu; 938fc3a97dcSJeff Roberson pri = ts->ts_thread->td_priority; 939ff256d9cSJeff Roberson cpri = pcpu_find(cpu)->pc_curthread->td_priority; 940ff256d9cSJeff Roberson if (!sched_shouldpreempt(pri, cpri, 1)) 9416b2f763fSJeff Roberson return; 942ff256d9cSJeff Roberson tdq->tdq_ipipending = 1; 94314618990SJeff Roberson ipi_selected(1 << cpu, IPI_PREEMPT); 94422bf7d9aSJeff Roberson } 94522bf7d9aSJeff Roberson 946ae7a6b38SJeff Roberson /* 947ae7a6b38SJeff Roberson * Steals load from a timeshare queue. Honors the rotating queue head 948ae7a6b38SJeff Roberson * index. 949ae7a6b38SJeff Roberson */ 950ae7a6b38SJeff Roberson static struct td_sched * 95162fa74d9SJeff Roberson runq_steal_from(struct runq *rq, int cpu, u_char start) 952ae7a6b38SJeff Roberson { 953ae7a6b38SJeff Roberson struct td_sched *ts; 954ae7a6b38SJeff Roberson struct rqbits *rqb; 955ae7a6b38SJeff Roberson struct rqhead *rqh; 956ae7a6b38SJeff Roberson int first; 957ae7a6b38SJeff Roberson int bit; 958ae7a6b38SJeff Roberson int pri; 959ae7a6b38SJeff Roberson int i; 960ae7a6b38SJeff Roberson 961ae7a6b38SJeff Roberson rqb = &rq->rq_status; 962ae7a6b38SJeff Roberson bit = start & (RQB_BPW -1); 963ae7a6b38SJeff Roberson pri = 0; 964ae7a6b38SJeff Roberson first = 0; 965ae7a6b38SJeff Roberson again: 966ae7a6b38SJeff Roberson for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) { 967ae7a6b38SJeff Roberson if (rqb->rqb_bits[i] == 0) 968ae7a6b38SJeff Roberson continue; 969ae7a6b38SJeff Roberson if (bit != 0) { 970ae7a6b38SJeff Roberson for (pri = bit; pri < RQB_BPW; pri++) 971ae7a6b38SJeff Roberson if (rqb->rqb_bits[i] & (1ul << pri)) 972ae7a6b38SJeff Roberson break; 973ae7a6b38SJeff Roberson if (pri >= RQB_BPW) 974ae7a6b38SJeff Roberson continue; 975ae7a6b38SJeff Roberson } else 976ae7a6b38SJeff Roberson pri = RQB_FFS(rqb->rqb_bits[i]); 977ae7a6b38SJeff Roberson pri += (i << RQB_L2BPW); 978ae7a6b38SJeff Roberson rqh = &rq->rq_queues[pri]; 979ae7a6b38SJeff Roberson TAILQ_FOREACH(ts, rqh, ts_procq) { 98062fa74d9SJeff Roberson if (first && THREAD_CAN_MIGRATE(ts->ts_thread) && 98162fa74d9SJeff Roberson THREAD_CAN_SCHED(ts->ts_thread, cpu)) 982ae7a6b38SJeff Roberson return (ts); 983ae7a6b38SJeff Roberson first = 1; 984ae7a6b38SJeff Roberson } 985ae7a6b38SJeff Roberson } 986ae7a6b38SJeff Roberson if (start != 0) { 987ae7a6b38SJeff Roberson start = 0; 988ae7a6b38SJeff Roberson goto again; 989ae7a6b38SJeff Roberson } 990ae7a6b38SJeff Roberson 991ae7a6b38SJeff Roberson return (NULL); 992ae7a6b38SJeff Roberson } 993ae7a6b38SJeff Roberson 994ae7a6b38SJeff Roberson /* 995ae7a6b38SJeff Roberson * Steals load from a standard linear queue. 996ae7a6b38SJeff Roberson */ 997ad1e7d28SJulian Elischer static struct td_sched * 99862fa74d9SJeff Roberson runq_steal(struct runq *rq, int cpu) 99922bf7d9aSJeff Roberson { 100022bf7d9aSJeff Roberson struct rqhead *rqh; 100122bf7d9aSJeff Roberson struct rqbits *rqb; 1002ad1e7d28SJulian Elischer struct td_sched *ts; 100322bf7d9aSJeff Roberson int word; 100422bf7d9aSJeff Roberson int bit; 100522bf7d9aSJeff Roberson 100622bf7d9aSJeff Roberson rqb = &rq->rq_status; 100722bf7d9aSJeff Roberson for (word = 0; word < RQB_LEN; word++) { 100822bf7d9aSJeff Roberson if (rqb->rqb_bits[word] == 0) 100922bf7d9aSJeff Roberson continue; 101022bf7d9aSJeff Roberson for (bit = 0; bit < RQB_BPW; bit++) { 1011a2640c9bSPeter Wemm if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 101222bf7d9aSJeff Roberson continue; 101322bf7d9aSJeff Roberson rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 101428994a58SJeff Roberson TAILQ_FOREACH(ts, rqh, ts_procq) 101562fa74d9SJeff Roberson if (THREAD_CAN_MIGRATE(ts->ts_thread) && 101662fa74d9SJeff Roberson THREAD_CAN_SCHED(ts->ts_thread, cpu)) 1017ad1e7d28SJulian Elischer return (ts); 101822bf7d9aSJeff Roberson } 101922bf7d9aSJeff Roberson } 102022bf7d9aSJeff Roberson return (NULL); 102122bf7d9aSJeff Roberson } 102222bf7d9aSJeff Roberson 1023ae7a6b38SJeff Roberson /* 1024ae7a6b38SJeff Roberson * Attempt to steal a thread in priority order from a thread queue. 1025ae7a6b38SJeff Roberson */ 1026ad1e7d28SJulian Elischer static struct td_sched * 102762fa74d9SJeff Roberson tdq_steal(struct tdq *tdq, int cpu) 102822bf7d9aSJeff Roberson { 1029ad1e7d28SJulian Elischer struct td_sched *ts; 103022bf7d9aSJeff Roberson 1031ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 103262fa74d9SJeff Roberson if ((ts = runq_steal(&tdq->tdq_realtime, cpu)) != NULL) 1033ad1e7d28SJulian Elischer return (ts); 103462fa74d9SJeff Roberson if ((ts = runq_steal_from(&tdq->tdq_timeshare, cpu, tdq->tdq_ridx)) 103562fa74d9SJeff Roberson != NULL) 1036ad1e7d28SJulian Elischer return (ts); 103762fa74d9SJeff Roberson return (runq_steal(&tdq->tdq_idle, cpu)); 103822bf7d9aSJeff Roberson } 103980f86c9fSJeff Roberson 1040ae7a6b38SJeff Roberson /* 1041ae7a6b38SJeff Roberson * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the 10427fcf154aSJeff Roberson * current lock and returns with the assigned queue locked. 1043ae7a6b38SJeff Roberson */ 1044ae7a6b38SJeff Roberson static inline struct tdq * 1045ae7a6b38SJeff Roberson sched_setcpu(struct td_sched *ts, int cpu, int flags) 104680f86c9fSJeff Roberson { 1047ae7a6b38SJeff Roberson struct thread *td; 1048ae7a6b38SJeff Roberson struct tdq *tdq; 104980f86c9fSJeff Roberson 1050ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 1051ae7a6b38SJeff Roberson 1052ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpu); 1053ae7a6b38SJeff Roberson td = ts->ts_thread; 1054ae7a6b38SJeff Roberson ts->ts_cpu = cpu; 1055c47f202bSJeff Roberson 1056c47f202bSJeff Roberson /* If the lock matches just return the queue. */ 1057ae7a6b38SJeff Roberson if (td->td_lock == TDQ_LOCKPTR(tdq)) 1058ae7a6b38SJeff Roberson return (tdq); 1059ae7a6b38SJeff Roberson #ifdef notyet 106080f86c9fSJeff Roberson /* 1061a5423ea3SJeff Roberson * If the thread isn't running its lockptr is a 1062ae7a6b38SJeff Roberson * turnstile or a sleepqueue. We can just lock_set without 1063ae7a6b38SJeff Roberson * blocking. 1064670c524fSJeff Roberson */ 1065ae7a6b38SJeff Roberson if (TD_CAN_RUN(td)) { 1066ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1067ae7a6b38SJeff Roberson thread_lock_set(td, TDQ_LOCKPTR(tdq)); 1068ae7a6b38SJeff Roberson return (tdq); 1069ae7a6b38SJeff Roberson } 1070ae7a6b38SJeff Roberson #endif 107180f86c9fSJeff Roberson /* 1072ae7a6b38SJeff Roberson * The hard case, migration, we need to block the thread first to 1073ae7a6b38SJeff Roberson * prevent order reversals with other cpus locks. 10747b8bfa0dSJeff Roberson */ 1075ae7a6b38SJeff Roberson thread_lock_block(td); 1076ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1077ae7a6b38SJeff Roberson thread_lock_unblock(td, TDQ_LOCKPTR(tdq)); 1078ae7a6b38SJeff Roberson return (tdq); 107980f86c9fSJeff Roberson } 10802454aaf5SJeff Roberson 1081ae7a6b38SJeff Roberson static int 1082ae7a6b38SJeff Roberson sched_pickcpu(struct td_sched *ts, int flags) 1083ae7a6b38SJeff Roberson { 108462fa74d9SJeff Roberson struct cpu_group *cg; 108562fa74d9SJeff Roberson struct thread *td; 1086ae7a6b38SJeff Roberson struct tdq *tdq; 108762fa74d9SJeff Roberson cpumask_t mask; 10887b8bfa0dSJeff Roberson int self; 10897b8bfa0dSJeff Roberson int pri; 10907b8bfa0dSJeff Roberson int cpu; 10917b8bfa0dSJeff Roberson 109262fa74d9SJeff Roberson self = PCPU_GET(cpuid); 109362fa74d9SJeff Roberson td = ts->ts_thread; 10947b8bfa0dSJeff Roberson if (smp_started == 0) 10957b8bfa0dSJeff Roberson return (self); 109628994a58SJeff Roberson /* 109728994a58SJeff Roberson * Don't migrate a running thread from sched_switch(). 109828994a58SJeff Roberson */ 109962fa74d9SJeff Roberson if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td)) 110062fa74d9SJeff Roberson return (ts->ts_cpu); 11017b8bfa0dSJeff Roberson /* 110262fa74d9SJeff Roberson * Prefer to run interrupt threads on the processors that generate 110362fa74d9SJeff Roberson * the interrupt. 11047b8bfa0dSJeff Roberson */ 110562fa74d9SJeff Roberson if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_SCHED(td, self) && 110662fa74d9SJeff Roberson curthread->td_intr_nesting_level) 110762fa74d9SJeff Roberson ts->ts_cpu = self; 110862fa74d9SJeff Roberson /* 110962fa74d9SJeff Roberson * If the thread can run on the last cpu and the affinity has not 111062fa74d9SJeff Roberson * expired or it is idle run it there. 111162fa74d9SJeff Roberson */ 111262fa74d9SJeff Roberson pri = td->td_priority; 111362fa74d9SJeff Roberson tdq = TDQ_CPU(ts->ts_cpu); 111462fa74d9SJeff Roberson if (THREAD_CAN_SCHED(td, ts->ts_cpu)) { 111562fa74d9SJeff Roberson if (tdq->tdq_lowpri > PRI_MIN_IDLE) 111662fa74d9SJeff Roberson return (ts->ts_cpu); 111762fa74d9SJeff Roberson if (SCHED_AFFINITY(ts, CG_SHARE_L2) && tdq->tdq_lowpri > pri) 11187b8bfa0dSJeff Roberson return (ts->ts_cpu); 11197b8bfa0dSJeff Roberson } 11207b8bfa0dSJeff Roberson /* 112162fa74d9SJeff Roberson * Search for the highest level in the tree that still has affinity. 11227b8bfa0dSJeff Roberson */ 112362fa74d9SJeff Roberson cg = NULL; 112462fa74d9SJeff Roberson for (cg = tdq->tdq_cg; cg != NULL; cg = cg->cg_parent) 112562fa74d9SJeff Roberson if (SCHED_AFFINITY(ts, cg->cg_level)) 112662fa74d9SJeff Roberson break; 112762fa74d9SJeff Roberson cpu = -1; 112862fa74d9SJeff Roberson mask = td->td_cpuset->cs_mask.__bits[0]; 112962fa74d9SJeff Roberson if (cg) 113062fa74d9SJeff Roberson cpu = sched_lowest(cg, mask, pri); 113162fa74d9SJeff Roberson if (cpu == -1) 113262fa74d9SJeff Roberson cpu = sched_lowest(cpu_top, mask, -1); 113362fa74d9SJeff Roberson /* 113462fa74d9SJeff Roberson * Compare the lowest loaded cpu to current cpu. 113562fa74d9SJeff Roberson */ 1136ff256d9cSJeff Roberson if (THREAD_CAN_SCHED(td, self) && TDQ_CPU(self)->tdq_lowpri > pri && 1137ff256d9cSJeff Roberson TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE) 113862fa74d9SJeff Roberson cpu = self; 1139ff256d9cSJeff Roberson KASSERT(cpu != -1, ("sched_pickcpu: Failed to find a cpu.")); 1140ae7a6b38SJeff Roberson return (cpu); 114180f86c9fSJeff Roberson } 114262fa74d9SJeff Roberson #endif 114322bf7d9aSJeff Roberson 114422bf7d9aSJeff Roberson /* 114522bf7d9aSJeff Roberson * Pick the highest priority task we have and return it. 11460c0a98b2SJeff Roberson */ 1147ad1e7d28SJulian Elischer static struct td_sched * 1148ad1e7d28SJulian Elischer tdq_choose(struct tdq *tdq) 11495d7ef00cSJeff Roberson { 1150ad1e7d28SJulian Elischer struct td_sched *ts; 11515d7ef00cSJeff Roberson 1152ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 1153e7d50326SJeff Roberson ts = runq_choose(&tdq->tdq_realtime); 1154dda713dfSJeff Roberson if (ts != NULL) 1155e7d50326SJeff Roberson return (ts); 11563f872f85SJeff Roberson ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx); 1157e7d50326SJeff Roberson if (ts != NULL) { 1158dda713dfSJeff Roberson KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE, 1159e7d50326SJeff Roberson ("tdq_choose: Invalid priority on timeshare queue %d", 1160e7d50326SJeff Roberson ts->ts_thread->td_priority)); 1161ad1e7d28SJulian Elischer return (ts); 116215dc847eSJeff Roberson } 116315dc847eSJeff Roberson 1164e7d50326SJeff Roberson ts = runq_choose(&tdq->tdq_idle); 1165e7d50326SJeff Roberson if (ts != NULL) { 1166e7d50326SJeff Roberson KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE, 1167e7d50326SJeff Roberson ("tdq_choose: Invalid priority on idle queue %d", 1168e7d50326SJeff Roberson ts->ts_thread->td_priority)); 1169e7d50326SJeff Roberson return (ts); 1170e7d50326SJeff Roberson } 1171e7d50326SJeff Roberson 1172e7d50326SJeff Roberson return (NULL); 1173245f3abfSJeff Roberson } 11740a016a05SJeff Roberson 1175ae7a6b38SJeff Roberson /* 1176ae7a6b38SJeff Roberson * Initialize a thread queue. 1177ae7a6b38SJeff Roberson */ 11780a016a05SJeff Roberson static void 1179ad1e7d28SJulian Elischer tdq_setup(struct tdq *tdq) 11800a016a05SJeff Roberson { 1181ae7a6b38SJeff Roberson 1182c47f202bSJeff Roberson if (bootverbose) 1183c47f202bSJeff Roberson printf("ULE: setup cpu %d\n", TDQ_ID(tdq)); 1184e7d50326SJeff Roberson runq_init(&tdq->tdq_realtime); 1185e7d50326SJeff Roberson runq_init(&tdq->tdq_timeshare); 1186d2ad694cSJeff Roberson runq_init(&tdq->tdq_idle); 118762fa74d9SJeff Roberson snprintf(tdq->tdq_name, sizeof(tdq->tdq_name), 118862fa74d9SJeff Roberson "sched lock %d", (int)TDQ_ID(tdq)); 118962fa74d9SJeff Roberson mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock", 119062fa74d9SJeff Roberson MTX_SPIN | MTX_RECURSE); 11910a016a05SJeff Roberson } 11920a016a05SJeff Roberson 1193c47f202bSJeff Roberson #ifdef SMP 1194c47f202bSJeff Roberson static void 1195c47f202bSJeff Roberson sched_setup_smp(void) 1196c47f202bSJeff Roberson { 1197c47f202bSJeff Roberson struct tdq *tdq; 1198c47f202bSJeff Roberson int i; 1199c47f202bSJeff Roberson 120062fa74d9SJeff Roberson cpu_top = smp_topo(); 120162fa74d9SJeff Roberson for (i = 0; i < MAXCPU; i++) { 1202c47f202bSJeff Roberson if (CPU_ABSENT(i)) 1203c47f202bSJeff Roberson continue; 120462fa74d9SJeff Roberson tdq = TDQ_CPU(i); 1205c47f202bSJeff Roberson tdq_setup(tdq); 120662fa74d9SJeff Roberson tdq->tdq_cg = smp_topo_find(cpu_top, i); 120762fa74d9SJeff Roberson if (tdq->tdq_cg == NULL) 120862fa74d9SJeff Roberson panic("Can't find cpu group for %d\n", i); 1209c47f202bSJeff Roberson } 121062fa74d9SJeff Roberson balance_tdq = TDQ_SELF(); 121162fa74d9SJeff Roberson sched_balance(); 1212c47f202bSJeff Roberson } 1213c47f202bSJeff Roberson #endif 1214c47f202bSJeff Roberson 1215ae7a6b38SJeff Roberson /* 1216ae7a6b38SJeff Roberson * Setup the thread queues and initialize the topology based on MD 1217ae7a6b38SJeff Roberson * information. 1218ae7a6b38SJeff Roberson */ 121935e6168fSJeff Roberson static void 122035e6168fSJeff Roberson sched_setup(void *dummy) 122135e6168fSJeff Roberson { 1222ae7a6b38SJeff Roberson struct tdq *tdq; 1223c47f202bSJeff Roberson 1224c47f202bSJeff Roberson tdq = TDQ_SELF(); 12250ec896fdSJeff Roberson #ifdef SMP 1226c47f202bSJeff Roberson sched_setup_smp(); 1227749d01b0SJeff Roberson #else 1228c47f202bSJeff Roberson tdq_setup(tdq); 1229356500a3SJeff Roberson #endif 1230ae7a6b38SJeff Roberson /* 1231ae7a6b38SJeff Roberson * To avoid divide-by-zero, we set realstathz a dummy value 1232ae7a6b38SJeff Roberson * in case which sched_clock() called before sched_initticks(). 1233ae7a6b38SJeff Roberson */ 1234ae7a6b38SJeff Roberson realstathz = hz; 1235ae7a6b38SJeff Roberson sched_slice = (realstathz/10); /* ~100ms */ 1236ae7a6b38SJeff Roberson tickincr = 1 << SCHED_TICK_SHIFT; 1237ae7a6b38SJeff Roberson 1238ae7a6b38SJeff Roberson /* Add thread0's load since it's running. */ 1239ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1240c47f202bSJeff Roberson thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF()); 1241ae7a6b38SJeff Roberson tdq_load_add(tdq, &td_sched0); 124262fa74d9SJeff Roberson tdq->tdq_lowpri = thread0.td_priority; 1243ae7a6b38SJeff Roberson TDQ_UNLOCK(tdq); 124435e6168fSJeff Roberson } 124535e6168fSJeff Roberson 1246ae7a6b38SJeff Roberson /* 1247ae7a6b38SJeff Roberson * This routine determines the tickincr after stathz and hz are setup. 1248ae7a6b38SJeff Roberson */ 1249a1d4fe69SDavid Xu /* ARGSUSED */ 1250a1d4fe69SDavid Xu static void 1251a1d4fe69SDavid Xu sched_initticks(void *dummy) 1252a1d4fe69SDavid Xu { 1253ae7a6b38SJeff Roberson int incr; 1254ae7a6b38SJeff Roberson 1255a1d4fe69SDavid Xu realstathz = stathz ? stathz : hz; 125614618990SJeff Roberson sched_slice = (realstathz/10); /* ~100ms */ 1257a1d4fe69SDavid Xu 1258a1d4fe69SDavid Xu /* 1259e7d50326SJeff Roberson * tickincr is shifted out by 10 to avoid rounding errors due to 12603f872f85SJeff Roberson * hz not being evenly divisible by stathz on all platforms. 1261e7d50326SJeff Roberson */ 1262ae7a6b38SJeff Roberson incr = (hz << SCHED_TICK_SHIFT) / realstathz; 1263e7d50326SJeff Roberson /* 1264e7d50326SJeff Roberson * This does not work for values of stathz that are more than 1265e7d50326SJeff Roberson * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen. 1266a1d4fe69SDavid Xu */ 1267ae7a6b38SJeff Roberson if (incr == 0) 1268ae7a6b38SJeff Roberson incr = 1; 1269ae7a6b38SJeff Roberson tickincr = incr; 12707b8bfa0dSJeff Roberson #ifdef SMP 12719862717aSJeff Roberson /* 12727fcf154aSJeff Roberson * Set the default balance interval now that we know 12737fcf154aSJeff Roberson * what realstathz is. 12747fcf154aSJeff Roberson */ 12757fcf154aSJeff Roberson balance_interval = realstathz; 12767fcf154aSJeff Roberson /* 12779862717aSJeff Roberson * Set steal thresh to log2(mp_ncpu) but no greater than 4. This 12789862717aSJeff Roberson * prevents excess thrashing on large machines and excess idle on 12799862717aSJeff Roberson * smaller machines. 12809862717aSJeff Roberson */ 128162fa74d9SJeff Roberson steal_thresh = min(ffs(mp_ncpus) - 1, 3); 12827b8bfa0dSJeff Roberson affinity = SCHED_AFFINITY_DEFAULT; 12837b8bfa0dSJeff Roberson #endif 1284a1d4fe69SDavid Xu } 1285a1d4fe69SDavid Xu 1286a1d4fe69SDavid Xu 128735e6168fSJeff Roberson /* 1288ae7a6b38SJeff Roberson * This is the core of the interactivity algorithm. Determines a score based 1289ae7a6b38SJeff Roberson * on past behavior. It is the ratio of sleep time to run time scaled to 1290ae7a6b38SJeff Roberson * a [0, 100] integer. This is the voluntary sleep time of a process, which 1291ae7a6b38SJeff Roberson * differs from the cpu usage because it does not account for time spent 1292ae7a6b38SJeff Roberson * waiting on a run-queue. Would be prettier if we had floating point. 1293ae7a6b38SJeff Roberson */ 1294ae7a6b38SJeff Roberson static int 1295ae7a6b38SJeff Roberson sched_interact_score(struct thread *td) 1296ae7a6b38SJeff Roberson { 1297ae7a6b38SJeff Roberson struct td_sched *ts; 1298ae7a6b38SJeff Roberson int div; 1299ae7a6b38SJeff Roberson 1300ae7a6b38SJeff Roberson ts = td->td_sched; 1301ae7a6b38SJeff Roberson /* 1302ae7a6b38SJeff Roberson * The score is only needed if this is likely to be an interactive 1303ae7a6b38SJeff Roberson * task. Don't go through the expense of computing it if there's 1304ae7a6b38SJeff Roberson * no chance. 1305ae7a6b38SJeff Roberson */ 1306ae7a6b38SJeff Roberson if (sched_interact <= SCHED_INTERACT_HALF && 1307ae7a6b38SJeff Roberson ts->ts_runtime >= ts->ts_slptime) 1308ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF); 1309ae7a6b38SJeff Roberson 1310ae7a6b38SJeff Roberson if (ts->ts_runtime > ts->ts_slptime) { 1311ae7a6b38SJeff Roberson div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF); 1312ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF + 1313ae7a6b38SJeff Roberson (SCHED_INTERACT_HALF - (ts->ts_slptime / div))); 1314ae7a6b38SJeff Roberson } 1315ae7a6b38SJeff Roberson if (ts->ts_slptime > ts->ts_runtime) { 1316ae7a6b38SJeff Roberson div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF); 1317ae7a6b38SJeff Roberson return (ts->ts_runtime / div); 1318ae7a6b38SJeff Roberson } 1319ae7a6b38SJeff Roberson /* runtime == slptime */ 1320ae7a6b38SJeff Roberson if (ts->ts_runtime) 1321ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF); 1322ae7a6b38SJeff Roberson 1323ae7a6b38SJeff Roberson /* 1324ae7a6b38SJeff Roberson * This can happen if slptime and runtime are 0. 1325ae7a6b38SJeff Roberson */ 1326ae7a6b38SJeff Roberson return (0); 1327ae7a6b38SJeff Roberson 1328ae7a6b38SJeff Roberson } 1329ae7a6b38SJeff Roberson 1330ae7a6b38SJeff Roberson /* 133135e6168fSJeff Roberson * Scale the scheduling priority according to the "interactivity" of this 133235e6168fSJeff Roberson * process. 133335e6168fSJeff Roberson */ 133415dc847eSJeff Roberson static void 13358460a577SJohn Birrell sched_priority(struct thread *td) 133635e6168fSJeff Roberson { 1337e7d50326SJeff Roberson int score; 133835e6168fSJeff Roberson int pri; 133935e6168fSJeff Roberson 13408460a577SJohn Birrell if (td->td_pri_class != PRI_TIMESHARE) 134115dc847eSJeff Roberson return; 1342e7d50326SJeff Roberson /* 1343e7d50326SJeff Roberson * If the score is interactive we place the thread in the realtime 1344e7d50326SJeff Roberson * queue with a priority that is less than kernel and interrupt 1345e7d50326SJeff Roberson * priorities. These threads are not subject to nice restrictions. 1346e7d50326SJeff Roberson * 1347ae7a6b38SJeff Roberson * Scores greater than this are placed on the normal timeshare queue 1348e7d50326SJeff Roberson * where the priority is partially decided by the most recent cpu 1349e7d50326SJeff Roberson * utilization and the rest is decided by nice value. 1350a5423ea3SJeff Roberson * 1351a5423ea3SJeff Roberson * The nice value of the process has a linear effect on the calculated 1352a5423ea3SJeff Roberson * score. Negative nice values make it easier for a thread to be 1353a5423ea3SJeff Roberson * considered interactive. 1354e7d50326SJeff Roberson */ 1355e270652bSJeff Roberson score = imax(0, sched_interact_score(td) - td->td_proc->p_nice); 1356e7d50326SJeff Roberson if (score < sched_interact) { 1357e7d50326SJeff Roberson pri = PRI_MIN_REALTIME; 1358e7d50326SJeff Roberson pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact) 1359e7d50326SJeff Roberson * score; 1360e7d50326SJeff Roberson KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME, 13619a93305aSJeff Roberson ("sched_priority: invalid interactive priority %d score %d", 13629a93305aSJeff Roberson pri, score)); 1363e7d50326SJeff Roberson } else { 1364e7d50326SJeff Roberson pri = SCHED_PRI_MIN; 1365e7d50326SJeff Roberson if (td->td_sched->ts_ticks) 1366e7d50326SJeff Roberson pri += SCHED_PRI_TICKS(td->td_sched); 1367e7d50326SJeff Roberson pri += SCHED_PRI_NICE(td->td_proc->p_nice); 1368ae7a6b38SJeff Roberson KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE, 1369ae7a6b38SJeff Roberson ("sched_priority: invalid priority %d: nice %d, " 1370ae7a6b38SJeff Roberson "ticks %d ftick %d ltick %d tick pri %d", 1371ae7a6b38SJeff Roberson pri, td->td_proc->p_nice, td->td_sched->ts_ticks, 1372ae7a6b38SJeff Roberson td->td_sched->ts_ftick, td->td_sched->ts_ltick, 1373ae7a6b38SJeff Roberson SCHED_PRI_TICKS(td->td_sched))); 1374e7d50326SJeff Roberson } 13758460a577SJohn Birrell sched_user_prio(td, pri); 137635e6168fSJeff Roberson 137715dc847eSJeff Roberson return; 137835e6168fSJeff Roberson } 137935e6168fSJeff Roberson 138035e6168fSJeff Roberson /* 1381d322132cSJeff Roberson * This routine enforces a maximum limit on the amount of scheduling history 1382ae7a6b38SJeff Roberson * kept. It is called after either the slptime or runtime is adjusted. This 1383ae7a6b38SJeff Roberson * function is ugly due to integer math. 1384d322132cSJeff Roberson */ 13854b60e324SJeff Roberson static void 13868460a577SJohn Birrell sched_interact_update(struct thread *td) 13874b60e324SJeff Roberson { 1388155b6ca1SJeff Roberson struct td_sched *ts; 13899a93305aSJeff Roberson u_int sum; 13903f741ca1SJeff Roberson 1391155b6ca1SJeff Roberson ts = td->td_sched; 1392ae7a6b38SJeff Roberson sum = ts->ts_runtime + ts->ts_slptime; 1393d322132cSJeff Roberson if (sum < SCHED_SLP_RUN_MAX) 1394d322132cSJeff Roberson return; 1395d322132cSJeff Roberson /* 1396155b6ca1SJeff Roberson * This only happens from two places: 1397155b6ca1SJeff Roberson * 1) We have added an unusual amount of run time from fork_exit. 1398155b6ca1SJeff Roberson * 2) We have added an unusual amount of sleep time from sched_sleep(). 1399155b6ca1SJeff Roberson */ 1400155b6ca1SJeff Roberson if (sum > SCHED_SLP_RUN_MAX * 2) { 1401ae7a6b38SJeff Roberson if (ts->ts_runtime > ts->ts_slptime) { 1402ae7a6b38SJeff Roberson ts->ts_runtime = SCHED_SLP_RUN_MAX; 1403ae7a6b38SJeff Roberson ts->ts_slptime = 1; 1404155b6ca1SJeff Roberson } else { 1405ae7a6b38SJeff Roberson ts->ts_slptime = SCHED_SLP_RUN_MAX; 1406ae7a6b38SJeff Roberson ts->ts_runtime = 1; 1407155b6ca1SJeff Roberson } 1408155b6ca1SJeff Roberson return; 1409155b6ca1SJeff Roberson } 1410155b6ca1SJeff Roberson /* 1411d322132cSJeff Roberson * If we have exceeded by more than 1/5th then the algorithm below 1412d322132cSJeff Roberson * will not bring us back into range. Dividing by two here forces 14132454aaf5SJeff Roberson * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1414d322132cSJeff Roberson */ 141537a35e4aSJeff Roberson if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1416ae7a6b38SJeff Roberson ts->ts_runtime /= 2; 1417ae7a6b38SJeff Roberson ts->ts_slptime /= 2; 1418d322132cSJeff Roberson return; 1419d322132cSJeff Roberson } 1420ae7a6b38SJeff Roberson ts->ts_runtime = (ts->ts_runtime / 5) * 4; 1421ae7a6b38SJeff Roberson ts->ts_slptime = (ts->ts_slptime / 5) * 4; 1422d322132cSJeff Roberson } 1423d322132cSJeff Roberson 1424ae7a6b38SJeff Roberson /* 1425ae7a6b38SJeff Roberson * Scale back the interactivity history when a child thread is created. The 1426ae7a6b38SJeff Roberson * history is inherited from the parent but the thread may behave totally 1427ae7a6b38SJeff Roberson * differently. For example, a shell spawning a compiler process. We want 1428ae7a6b38SJeff Roberson * to learn that the compiler is behaving badly very quickly. 1429ae7a6b38SJeff Roberson */ 1430d322132cSJeff Roberson static void 14318460a577SJohn Birrell sched_interact_fork(struct thread *td) 1432d322132cSJeff Roberson { 1433d322132cSJeff Roberson int ratio; 1434d322132cSJeff Roberson int sum; 1435d322132cSJeff Roberson 1436ae7a6b38SJeff Roberson sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime; 1437d322132cSJeff Roberson if (sum > SCHED_SLP_RUN_FORK) { 1438d322132cSJeff Roberson ratio = sum / SCHED_SLP_RUN_FORK; 1439ae7a6b38SJeff Roberson td->td_sched->ts_runtime /= ratio; 1440ae7a6b38SJeff Roberson td->td_sched->ts_slptime /= ratio; 14414b60e324SJeff Roberson } 14424b60e324SJeff Roberson } 14434b60e324SJeff Roberson 144415dc847eSJeff Roberson /* 1445ae7a6b38SJeff Roberson * Called from proc0_init() to setup the scheduler fields. 1446ed062c8dSJulian Elischer */ 1447ed062c8dSJulian Elischer void 1448ed062c8dSJulian Elischer schedinit(void) 1449ed062c8dSJulian Elischer { 1450e7d50326SJeff Roberson 1451ed062c8dSJulian Elischer /* 1452ed062c8dSJulian Elischer * Set up the scheduler specific parts of proc0. 1453ed062c8dSJulian Elischer */ 1454ed062c8dSJulian Elischer proc0.p_sched = NULL; /* XXX */ 1455ad1e7d28SJulian Elischer thread0.td_sched = &td_sched0; 1456e7d50326SJeff Roberson td_sched0.ts_ltick = ticks; 14578ab80cf0SJeff Roberson td_sched0.ts_ftick = ticks; 1458ad1e7d28SJulian Elischer td_sched0.ts_thread = &thread0; 145973daf66fSJeff Roberson td_sched0.ts_slice = sched_slice; 1460ed062c8dSJulian Elischer } 1461ed062c8dSJulian Elischer 1462ed062c8dSJulian Elischer /* 146315dc847eSJeff Roberson * This is only somewhat accurate since given many processes of the same 146415dc847eSJeff Roberson * priority they will switch when their slices run out, which will be 1465e7d50326SJeff Roberson * at most sched_slice stathz ticks. 146615dc847eSJeff Roberson */ 146735e6168fSJeff Roberson int 146835e6168fSJeff Roberson sched_rr_interval(void) 146935e6168fSJeff Roberson { 1470e7d50326SJeff Roberson 1471e7d50326SJeff Roberson /* Convert sched_slice to hz */ 1472e7d50326SJeff Roberson return (hz/(realstathz/sched_slice)); 147335e6168fSJeff Roberson } 147435e6168fSJeff Roberson 1475ae7a6b38SJeff Roberson /* 1476ae7a6b38SJeff Roberson * Update the percent cpu tracking information when it is requested or 1477ae7a6b38SJeff Roberson * the total history exceeds the maximum. We keep a sliding history of 1478ae7a6b38SJeff Roberson * tick counts that slowly decays. This is less precise than the 4BSD 1479ae7a6b38SJeff Roberson * mechanism since it happens with less regular and frequent events. 1480ae7a6b38SJeff Roberson */ 148122bf7d9aSJeff Roberson static void 1482ad1e7d28SJulian Elischer sched_pctcpu_update(struct td_sched *ts) 148335e6168fSJeff Roberson { 1484e7d50326SJeff Roberson 1485e7d50326SJeff Roberson if (ts->ts_ticks == 0) 1486e7d50326SJeff Roberson return; 14878ab80cf0SJeff Roberson if (ticks - (hz / 10) < ts->ts_ltick && 14888ab80cf0SJeff Roberson SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX) 14898ab80cf0SJeff Roberson return; 149035e6168fSJeff Roberson /* 149135e6168fSJeff Roberson * Adjust counters and watermark for pctcpu calc. 1492210491d3SJeff Roberson */ 1493e7d50326SJeff Roberson if (ts->ts_ltick > ticks - SCHED_TICK_TARG) 1494ad1e7d28SJulian Elischer ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) * 1495e7d50326SJeff Roberson SCHED_TICK_TARG; 1496e7d50326SJeff Roberson else 1497ad1e7d28SJulian Elischer ts->ts_ticks = 0; 1498ad1e7d28SJulian Elischer ts->ts_ltick = ticks; 1499e7d50326SJeff Roberson ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG; 150035e6168fSJeff Roberson } 150135e6168fSJeff Roberson 1502ae7a6b38SJeff Roberson /* 1503ae7a6b38SJeff Roberson * Adjust the priority of a thread. Move it to the appropriate run-queue 1504ae7a6b38SJeff Roberson * if necessary. This is the back-end for several priority related 1505ae7a6b38SJeff Roberson * functions. 1506ae7a6b38SJeff Roberson */ 1507e7d50326SJeff Roberson static void 1508f5c157d9SJohn Baldwin sched_thread_priority(struct thread *td, u_char prio) 150935e6168fSJeff Roberson { 1510ad1e7d28SJulian Elischer struct td_sched *ts; 151173daf66fSJeff Roberson struct tdq *tdq; 151273daf66fSJeff Roberson int oldpri; 151335e6168fSJeff Roberson 151481d47d3fSJeff Roberson CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", 1515431f8906SJulian Elischer td, td->td_name, td->td_priority, prio, curthread, 1516431f8906SJulian Elischer curthread->td_name); 1517ad1e7d28SJulian Elischer ts = td->td_sched; 15187b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1519f5c157d9SJohn Baldwin if (td->td_priority == prio) 1520f5c157d9SJohn Baldwin return; 15213f741ca1SJeff Roberson /* 15223f741ca1SJeff Roberson * If the priority has been elevated due to priority 15233f741ca1SJeff Roberson * propagation, we may have to move ourselves to a new 1524e7d50326SJeff Roberson * queue. This could be optimized to not re-add in some 1525e7d50326SJeff Roberson * cases. 1526f2b74cbfSJeff Roberson */ 15276d55b3ecSJeff Roberson if (TD_ON_RUNQ(td) && prio < td->td_priority) { 1528e7d50326SJeff Roberson sched_rem(td); 1529e7d50326SJeff Roberson td->td_priority = prio; 1530ae7a6b38SJeff Roberson sched_add(td, SRQ_BORROWING); 153173daf66fSJeff Roberson return; 153273daf66fSJeff Roberson } 15336d55b3ecSJeff Roberson /* 15346d55b3ecSJeff Roberson * If the thread is currently running we may have to adjust the lowpri 15356d55b3ecSJeff Roberson * information so other cpus are aware of our current priority. 15366d55b3ecSJeff Roberson */ 15376d55b3ecSJeff Roberson if (TD_IS_RUNNING(td)) { 1538ae7a6b38SJeff Roberson tdq = TDQ_CPU(ts->ts_cpu); 153962fa74d9SJeff Roberson oldpri = td->td_priority; 15403f741ca1SJeff Roberson td->td_priority = prio; 154162fa74d9SJeff Roberson if (prio < tdq->tdq_lowpri) 154262fa74d9SJeff Roberson tdq->tdq_lowpri = prio; 154362fa74d9SJeff Roberson else if (tdq->tdq_lowpri == oldpri) 154462fa74d9SJeff Roberson tdq_setlowpri(tdq, td); 15456d55b3ecSJeff Roberson return; 154673daf66fSJeff Roberson } 15476d55b3ecSJeff Roberson td->td_priority = prio; 1548ae7a6b38SJeff Roberson } 154935e6168fSJeff Roberson 1550f5c157d9SJohn Baldwin /* 1551f5c157d9SJohn Baldwin * Update a thread's priority when it is lent another thread's 1552f5c157d9SJohn Baldwin * priority. 1553f5c157d9SJohn Baldwin */ 1554f5c157d9SJohn Baldwin void 1555f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio) 1556f5c157d9SJohn Baldwin { 1557f5c157d9SJohn Baldwin 1558f5c157d9SJohn Baldwin td->td_flags |= TDF_BORROWING; 1559f5c157d9SJohn Baldwin sched_thread_priority(td, prio); 1560f5c157d9SJohn Baldwin } 1561f5c157d9SJohn Baldwin 1562f5c157d9SJohn Baldwin /* 1563f5c157d9SJohn Baldwin * Restore a thread's priority when priority propagation is 1564f5c157d9SJohn Baldwin * over. The prio argument is the minimum priority the thread 1565f5c157d9SJohn Baldwin * needs to have to satisfy other possible priority lending 1566f5c157d9SJohn Baldwin * requests. If the thread's regular priority is less 1567f5c157d9SJohn Baldwin * important than prio, the thread will keep a priority boost 1568f5c157d9SJohn Baldwin * of prio. 1569f5c157d9SJohn Baldwin */ 1570f5c157d9SJohn Baldwin void 1571f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio) 1572f5c157d9SJohn Baldwin { 1573f5c157d9SJohn Baldwin u_char base_pri; 1574f5c157d9SJohn Baldwin 1575f5c157d9SJohn Baldwin if (td->td_base_pri >= PRI_MIN_TIMESHARE && 1576f5c157d9SJohn Baldwin td->td_base_pri <= PRI_MAX_TIMESHARE) 15778460a577SJohn Birrell base_pri = td->td_user_pri; 1578f5c157d9SJohn Baldwin else 1579f5c157d9SJohn Baldwin base_pri = td->td_base_pri; 1580f5c157d9SJohn Baldwin if (prio >= base_pri) { 1581f5c157d9SJohn Baldwin td->td_flags &= ~TDF_BORROWING; 1582f5c157d9SJohn Baldwin sched_thread_priority(td, base_pri); 1583f5c157d9SJohn Baldwin } else 1584f5c157d9SJohn Baldwin sched_lend_prio(td, prio); 1585f5c157d9SJohn Baldwin } 1586f5c157d9SJohn Baldwin 1587ae7a6b38SJeff Roberson /* 1588ae7a6b38SJeff Roberson * Standard entry for setting the priority to an absolute value. 1589ae7a6b38SJeff Roberson */ 1590f5c157d9SJohn Baldwin void 1591f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio) 1592f5c157d9SJohn Baldwin { 1593f5c157d9SJohn Baldwin u_char oldprio; 1594f5c157d9SJohn Baldwin 1595f5c157d9SJohn Baldwin /* First, update the base priority. */ 1596f5c157d9SJohn Baldwin td->td_base_pri = prio; 1597f5c157d9SJohn Baldwin 1598f5c157d9SJohn Baldwin /* 159950aaa791SJohn Baldwin * If the thread is borrowing another thread's priority, don't 1600f5c157d9SJohn Baldwin * ever lower the priority. 1601f5c157d9SJohn Baldwin */ 1602f5c157d9SJohn Baldwin if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 1603f5c157d9SJohn Baldwin return; 1604f5c157d9SJohn Baldwin 1605f5c157d9SJohn Baldwin /* Change the real priority. */ 1606f5c157d9SJohn Baldwin oldprio = td->td_priority; 1607f5c157d9SJohn Baldwin sched_thread_priority(td, prio); 1608f5c157d9SJohn Baldwin 1609f5c157d9SJohn Baldwin /* 1610f5c157d9SJohn Baldwin * If the thread is on a turnstile, then let the turnstile update 1611f5c157d9SJohn Baldwin * its state. 1612f5c157d9SJohn Baldwin */ 1613f5c157d9SJohn Baldwin if (TD_ON_LOCK(td) && oldprio != prio) 1614f5c157d9SJohn Baldwin turnstile_adjust(td, oldprio); 1615f5c157d9SJohn Baldwin } 1616f5c157d9SJohn Baldwin 1617ae7a6b38SJeff Roberson /* 1618ae7a6b38SJeff Roberson * Set the base user priority, does not effect current running priority. 1619ae7a6b38SJeff Roberson */ 162035e6168fSJeff Roberson void 16218460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio) 16223db720fdSDavid Xu { 16233db720fdSDavid Xu u_char oldprio; 16243db720fdSDavid Xu 16258460a577SJohn Birrell td->td_base_user_pri = prio; 1626fc6c30f6SJulian Elischer if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio) 1627fc6c30f6SJulian Elischer return; 16288460a577SJohn Birrell oldprio = td->td_user_pri; 16298460a577SJohn Birrell td->td_user_pri = prio; 16303db720fdSDavid Xu } 16313db720fdSDavid Xu 16323db720fdSDavid Xu void 16333db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio) 16343db720fdSDavid Xu { 16353db720fdSDavid Xu u_char oldprio; 16363db720fdSDavid Xu 1637435806d3SDavid Xu THREAD_LOCK_ASSERT(td, MA_OWNED); 16383db720fdSDavid Xu td->td_flags |= TDF_UBORROWING; 1639f645b5daSMaxim Konovalov oldprio = td->td_user_pri; 16408460a577SJohn Birrell td->td_user_pri = prio; 16413db720fdSDavid Xu } 16423db720fdSDavid Xu 16433db720fdSDavid Xu void 16443db720fdSDavid Xu sched_unlend_user_prio(struct thread *td, u_char prio) 16453db720fdSDavid Xu { 16463db720fdSDavid Xu u_char base_pri; 16473db720fdSDavid Xu 1648435806d3SDavid Xu THREAD_LOCK_ASSERT(td, MA_OWNED); 16498460a577SJohn Birrell base_pri = td->td_base_user_pri; 16503db720fdSDavid Xu if (prio >= base_pri) { 16513db720fdSDavid Xu td->td_flags &= ~TDF_UBORROWING; 16528460a577SJohn Birrell sched_user_prio(td, base_pri); 1653435806d3SDavid Xu } else { 16543db720fdSDavid Xu sched_lend_user_prio(td, prio); 16553db720fdSDavid Xu } 1656435806d3SDavid Xu } 16573db720fdSDavid Xu 1658ae7a6b38SJeff Roberson /* 1659731016feSWojciech A. Koszek * Block a thread for switching. Similar to thread_block() but does not 1660731016feSWojciech A. Koszek * bump the spin count. 1661731016feSWojciech A. Koszek */ 1662731016feSWojciech A. Koszek static inline struct mtx * 1663731016feSWojciech A. Koszek thread_block_switch(struct thread *td) 1664731016feSWojciech A. Koszek { 1665731016feSWojciech A. Koszek struct mtx *lock; 1666731016feSWojciech A. Koszek 1667731016feSWojciech A. Koszek THREAD_LOCK_ASSERT(td, MA_OWNED); 1668731016feSWojciech A. Koszek lock = td->td_lock; 1669731016feSWojciech A. Koszek td->td_lock = &blocked_lock; 1670731016feSWojciech A. Koszek mtx_unlock_spin(lock); 1671731016feSWojciech A. Koszek 1672731016feSWojciech A. Koszek return (lock); 1673731016feSWojciech A. Koszek } 1674731016feSWojciech A. Koszek 1675731016feSWojciech A. Koszek /* 1676c47f202bSJeff Roberson * Handle migration from sched_switch(). This happens only for 1677c47f202bSJeff Roberson * cpu binding. 1678c47f202bSJeff Roberson */ 1679c47f202bSJeff Roberson static struct mtx * 1680c47f202bSJeff Roberson sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags) 1681c47f202bSJeff Roberson { 1682c47f202bSJeff Roberson struct tdq *tdn; 1683c47f202bSJeff Roberson 1684c47f202bSJeff Roberson tdn = TDQ_CPU(td->td_sched->ts_cpu); 1685c47f202bSJeff Roberson #ifdef SMP 168673daf66fSJeff Roberson tdq_load_rem(tdq, td->td_sched); 1687c47f202bSJeff Roberson /* 1688c47f202bSJeff Roberson * Do the lock dance required to avoid LOR. We grab an extra 1689c47f202bSJeff Roberson * spinlock nesting to prevent preemption while we're 1690c47f202bSJeff Roberson * not holding either run-queue lock. 1691c47f202bSJeff Roberson */ 1692c47f202bSJeff Roberson spinlock_enter(); 1693c47f202bSJeff Roberson thread_block_switch(td); /* This releases the lock on tdq. */ 1694c47f202bSJeff Roberson TDQ_LOCK(tdn); 1695c47f202bSJeff Roberson tdq_add(tdn, td, flags); 1696ff256d9cSJeff Roberson tdq_notify(tdn, td->td_sched); 1697c47f202bSJeff Roberson /* 1698c47f202bSJeff Roberson * After we unlock tdn the new cpu still can't switch into this 1699c47f202bSJeff Roberson * thread until we've unblocked it in cpu_switch(). The lock 1700c47f202bSJeff Roberson * pointers may match in the case of HTT cores. Don't unlock here 1701c47f202bSJeff Roberson * or we can deadlock when the other CPU runs the IPI handler. 1702c47f202bSJeff Roberson */ 1703c47f202bSJeff Roberson if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) { 1704c47f202bSJeff Roberson TDQ_UNLOCK(tdn); 1705c47f202bSJeff Roberson TDQ_LOCK(tdq); 1706c47f202bSJeff Roberson } 1707c47f202bSJeff Roberson spinlock_exit(); 1708c47f202bSJeff Roberson #endif 1709c47f202bSJeff Roberson return (TDQ_LOCKPTR(tdn)); 1710c47f202bSJeff Roberson } 1711c47f202bSJeff Roberson 1712c47f202bSJeff Roberson /* 1713ae7a6b38SJeff Roberson * Release a thread that was blocked with thread_block_switch(). 1714ae7a6b38SJeff Roberson */ 1715ae7a6b38SJeff Roberson static inline void 1716ae7a6b38SJeff Roberson thread_unblock_switch(struct thread *td, struct mtx *mtx) 1717ae7a6b38SJeff Roberson { 1718ae7a6b38SJeff Roberson atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock, 1719ae7a6b38SJeff Roberson (uintptr_t)mtx); 1720ae7a6b38SJeff Roberson } 1721ae7a6b38SJeff Roberson 1722ae7a6b38SJeff Roberson /* 1723ae7a6b38SJeff Roberson * Switch threads. This function has to handle threads coming in while 1724ae7a6b38SJeff Roberson * blocked for some reason, running, or idle. It also must deal with 1725ae7a6b38SJeff Roberson * migrating a thread from one queue to another as running threads may 1726ae7a6b38SJeff Roberson * be assigned elsewhere via binding. 1727ae7a6b38SJeff Roberson */ 17283db720fdSDavid Xu void 17293389af30SJulian Elischer sched_switch(struct thread *td, struct thread *newtd, int flags) 173035e6168fSJeff Roberson { 1731c02bbb43SJeff Roberson struct tdq *tdq; 1732ad1e7d28SJulian Elischer struct td_sched *ts; 1733ae7a6b38SJeff Roberson struct mtx *mtx; 1734c47f202bSJeff Roberson int srqflag; 1735ae7a6b38SJeff Roberson int cpuid; 173635e6168fSJeff Roberson 17377b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 17386d55b3ecSJeff Roberson KASSERT(newtd == NULL, ("sched_switch: Unsupported newtd argument")); 173935e6168fSJeff Roberson 1740ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 1741ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpuid); 1742e7d50326SJeff Roberson ts = td->td_sched; 1743c47f202bSJeff Roberson mtx = td->td_lock; 1744ae7a6b38SJeff Roberson ts->ts_rltick = ticks; 1745060563ecSJulian Elischer td->td_lastcpu = td->td_oncpu; 1746060563ecSJulian Elischer td->td_oncpu = NOCPU; 174752eb8464SJohn Baldwin td->td_flags &= ~TDF_NEEDRESCHED; 174877918643SStephan Uphoff td->td_owepreempt = 0; 1749b11fdad0SJeff Roberson /* 1750ae7a6b38SJeff Roberson * The lock pointer in an idle thread should never change. Reset it 1751ae7a6b38SJeff Roberson * to CAN_RUN as well. 1752b11fdad0SJeff Roberson */ 1753486a9414SJulian Elischer if (TD_IS_IDLETHREAD(td)) { 1754ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1755bf0acc27SJohn Baldwin TD_SET_CAN_RUN(td); 17567b20fb19SJeff Roberson } else if (TD_IS_RUNNING(td)) { 1757ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1758c47f202bSJeff Roberson srqflag = (flags & SW_PREEMPT) ? 1759598b368dSJeff Roberson SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 1760c47f202bSJeff Roberson SRQ_OURSELF|SRQ_YIELDING; 1761c47f202bSJeff Roberson if (ts->ts_cpu == cpuid) 176273daf66fSJeff Roberson tdq_runq_add(tdq, ts, srqflag); 1763c47f202bSJeff Roberson else 1764c47f202bSJeff Roberson mtx = sched_switch_migrate(tdq, td, srqflag); 1765ae7a6b38SJeff Roberson } else { 1766ae7a6b38SJeff Roberson /* This thread must be going to sleep. */ 1767ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1768ae7a6b38SJeff Roberson mtx = thread_block_switch(td); 1769ae7a6b38SJeff Roberson tdq_load_rem(tdq, ts); 1770ae7a6b38SJeff Roberson } 1771ae7a6b38SJeff Roberson /* 1772ae7a6b38SJeff Roberson * We enter here with the thread blocked and assigned to the 1773ae7a6b38SJeff Roberson * appropriate cpu run-queue or sleep-queue and with the current 1774ae7a6b38SJeff Roberson * thread-queue locked. 1775ae7a6b38SJeff Roberson */ 1776ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 17772454aaf5SJeff Roberson newtd = choosethread(); 1778ae7a6b38SJeff Roberson /* 1779ae7a6b38SJeff Roberson * Call the MD code to switch contexts if necessary. 1780ae7a6b38SJeff Roberson */ 1781ebccf1e3SJoseph Koshy if (td != newtd) { 1782ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 1783ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1784ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 1785ebccf1e3SJoseph Koshy #endif 1786eea4f254SJeff Roberson lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object); 178759c68134SJeff Roberson TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 1788ae7a6b38SJeff Roberson cpu_switch(td, newtd, mtx); 1789ae7a6b38SJeff Roberson /* 1790ae7a6b38SJeff Roberson * We may return from cpu_switch on a different cpu. However, 1791ae7a6b38SJeff Roberson * we always return with td_lock pointing to the current cpu's 1792ae7a6b38SJeff Roberson * run queue lock. 1793ae7a6b38SJeff Roberson */ 1794ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 1795ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpuid); 1796eea4f254SJeff Roberson lock_profile_obtain_lock_success( 1797eea4f254SJeff Roberson &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__); 1798ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 1799ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1800ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1801ebccf1e3SJoseph Koshy #endif 1802ae7a6b38SJeff Roberson } else 1803ae7a6b38SJeff Roberson thread_unblock_switch(td, mtx); 1804ae7a6b38SJeff Roberson /* 1805ae7a6b38SJeff Roberson * Assert that all went well and return. 1806ae7a6b38SJeff Roberson */ 1807ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED); 1808ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1809ae7a6b38SJeff Roberson td->td_oncpu = cpuid; 181035e6168fSJeff Roberson } 181135e6168fSJeff Roberson 1812ae7a6b38SJeff Roberson /* 1813ae7a6b38SJeff Roberson * Adjust thread priorities as a result of a nice request. 1814ae7a6b38SJeff Roberson */ 181535e6168fSJeff Roberson void 1816fa885116SJulian Elischer sched_nice(struct proc *p, int nice) 181735e6168fSJeff Roberson { 181835e6168fSJeff Roberson struct thread *td; 181935e6168fSJeff Roberson 1820fa885116SJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 1821e7d50326SJeff Roberson 1822fa885116SJulian Elischer p->p_nice = nice; 18238460a577SJohn Birrell FOREACH_THREAD_IN_PROC(p, td) { 18247b20fb19SJeff Roberson thread_lock(td); 18258460a577SJohn Birrell sched_priority(td); 1826e7d50326SJeff Roberson sched_prio(td, td->td_base_user_pri); 18277b20fb19SJeff Roberson thread_unlock(td); 182835e6168fSJeff Roberson } 1829fa885116SJulian Elischer } 183035e6168fSJeff Roberson 1831ae7a6b38SJeff Roberson /* 1832ae7a6b38SJeff Roberson * Record the sleep time for the interactivity scorer. 1833ae7a6b38SJeff Roberson */ 183435e6168fSJeff Roberson void 1835c5aa6b58SJeff Roberson sched_sleep(struct thread *td, int prio) 183635e6168fSJeff Roberson { 1837e7d50326SJeff Roberson 18387b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 183935e6168fSJeff Roberson 184054b0e65fSJeff Roberson td->td_slptick = ticks; 1841c5aa6b58SJeff Roberson if (TD_IS_SUSPENDED(td) || prio <= PSOCK) 1842c5aa6b58SJeff Roberson td->td_flags |= TDF_CANSWAP; 1843c5aa6b58SJeff Roberson if (static_boost && prio) 1844c5aa6b58SJeff Roberson sched_prio(td, prio); 184535e6168fSJeff Roberson } 184635e6168fSJeff Roberson 1847ae7a6b38SJeff Roberson /* 1848ae7a6b38SJeff Roberson * Schedule a thread to resume execution and record how long it voluntarily 1849ae7a6b38SJeff Roberson * slept. We also update the pctcpu, interactivity, and priority. 1850ae7a6b38SJeff Roberson */ 185135e6168fSJeff Roberson void 185235e6168fSJeff Roberson sched_wakeup(struct thread *td) 185335e6168fSJeff Roberson { 185414618990SJeff Roberson struct td_sched *ts; 1855ae7a6b38SJeff Roberson int slptick; 1856e7d50326SJeff Roberson 18577b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 185814618990SJeff Roberson ts = td->td_sched; 1859c5aa6b58SJeff Roberson td->td_flags &= ~TDF_CANSWAP; 186035e6168fSJeff Roberson /* 1861e7d50326SJeff Roberson * If we slept for more than a tick update our interactivity and 1862e7d50326SJeff Roberson * priority. 186335e6168fSJeff Roberson */ 186454b0e65fSJeff Roberson slptick = td->td_slptick; 186554b0e65fSJeff Roberson td->td_slptick = 0; 1866ae7a6b38SJeff Roberson if (slptick && slptick != ticks) { 18679a93305aSJeff Roberson u_int hzticks; 1868f1e8dc4aSJeff Roberson 1869ae7a6b38SJeff Roberson hzticks = (ticks - slptick) << SCHED_TICK_SHIFT; 1870ae7a6b38SJeff Roberson ts->ts_slptime += hzticks; 18718460a577SJohn Birrell sched_interact_update(td); 187214618990SJeff Roberson sched_pctcpu_update(ts); 1873f1e8dc4aSJeff Roberson } 187414618990SJeff Roberson /* Reset the slice value after we sleep. */ 187514618990SJeff Roberson ts->ts_slice = sched_slice; 18767a5e5e2aSJeff Roberson sched_add(td, SRQ_BORING); 187735e6168fSJeff Roberson } 187835e6168fSJeff Roberson 187935e6168fSJeff Roberson /* 188035e6168fSJeff Roberson * Penalize the parent for creating a new child and initialize the child's 188135e6168fSJeff Roberson * priority. 188235e6168fSJeff Roberson */ 188335e6168fSJeff Roberson void 18848460a577SJohn Birrell sched_fork(struct thread *td, struct thread *child) 188515dc847eSJeff Roberson { 18867b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1887ad1e7d28SJulian Elischer sched_fork_thread(td, child); 1888e7d50326SJeff Roberson /* 1889e7d50326SJeff Roberson * Penalize the parent and child for forking. 1890e7d50326SJeff Roberson */ 1891e7d50326SJeff Roberson sched_interact_fork(child); 1892e7d50326SJeff Roberson sched_priority(child); 1893ae7a6b38SJeff Roberson td->td_sched->ts_runtime += tickincr; 1894e7d50326SJeff Roberson sched_interact_update(td); 1895e7d50326SJeff Roberson sched_priority(td); 1896ad1e7d28SJulian Elischer } 1897ad1e7d28SJulian Elischer 1898ae7a6b38SJeff Roberson /* 1899ae7a6b38SJeff Roberson * Fork a new thread, may be within the same process. 1900ae7a6b38SJeff Roberson */ 1901ad1e7d28SJulian Elischer void 1902ad1e7d28SJulian Elischer sched_fork_thread(struct thread *td, struct thread *child) 1903ad1e7d28SJulian Elischer { 1904ad1e7d28SJulian Elischer struct td_sched *ts; 1905ad1e7d28SJulian Elischer struct td_sched *ts2; 19068460a577SJohn Birrell 19078b16c208SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1908e7d50326SJeff Roberson /* 1909e7d50326SJeff Roberson * Initialize child. 1910e7d50326SJeff Roberson */ 1911ad1e7d28SJulian Elischer ts = td->td_sched; 1912ad1e7d28SJulian Elischer ts2 = child->td_sched; 19138b16c208SJeff Roberson child->td_lock = TDQ_LOCKPTR(TDQ_SELF()); 19148b16c208SJeff Roberson child->td_cpuset = cpuset_ref(td->td_cpuset); 19158b16c208SJeff Roberson ts2->ts_thread = child; 1916ad1e7d28SJulian Elischer ts2->ts_cpu = ts->ts_cpu; 19178b16c208SJeff Roberson ts2->ts_flags = 0; 1918e7d50326SJeff Roberson /* 1919e7d50326SJeff Roberson * Grab our parents cpu estimation information and priority. 1920e7d50326SJeff Roberson */ 1921ad1e7d28SJulian Elischer ts2->ts_ticks = ts->ts_ticks; 1922ad1e7d28SJulian Elischer ts2->ts_ltick = ts->ts_ltick; 1923ad1e7d28SJulian Elischer ts2->ts_ftick = ts->ts_ftick; 1924e7d50326SJeff Roberson child->td_user_pri = td->td_user_pri; 1925e7d50326SJeff Roberson child->td_base_user_pri = td->td_base_user_pri; 1926e7d50326SJeff Roberson /* 1927e7d50326SJeff Roberson * And update interactivity score. 1928e7d50326SJeff Roberson */ 1929ae7a6b38SJeff Roberson ts2->ts_slptime = ts->ts_slptime; 1930ae7a6b38SJeff Roberson ts2->ts_runtime = ts->ts_runtime; 1931e7d50326SJeff Roberson ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */ 193215dc847eSJeff Roberson } 193315dc847eSJeff Roberson 1934ae7a6b38SJeff Roberson /* 1935ae7a6b38SJeff Roberson * Adjust the priority class of a thread. 1936ae7a6b38SJeff Roberson */ 193715dc847eSJeff Roberson void 19388460a577SJohn Birrell sched_class(struct thread *td, int class) 193915dc847eSJeff Roberson { 194015dc847eSJeff Roberson 19417b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 19428460a577SJohn Birrell if (td->td_pri_class == class) 194315dc847eSJeff Roberson return; 19448460a577SJohn Birrell td->td_pri_class = class; 194535e6168fSJeff Roberson } 194635e6168fSJeff Roberson 194735e6168fSJeff Roberson /* 194835e6168fSJeff Roberson * Return some of the child's priority and interactivity to the parent. 194935e6168fSJeff Roberson */ 195035e6168fSJeff Roberson void 1951fc6c30f6SJulian Elischer sched_exit(struct proc *p, struct thread *child) 195235e6168fSJeff Roberson { 1953e7d50326SJeff Roberson struct thread *td; 1954141ad61cSJeff Roberson 19558460a577SJohn Birrell CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", 1956431f8906SJulian Elischer child, child->td_name, child->td_priority); 19578460a577SJohn Birrell 1958374ae2a3SJeff Roberson PROC_LOCK_ASSERT(p, MA_OWNED); 1959e7d50326SJeff Roberson td = FIRST_THREAD_IN_PROC(p); 1960e7d50326SJeff Roberson sched_exit_thread(td, child); 1961ad1e7d28SJulian Elischer } 1962ad1e7d28SJulian Elischer 1963ae7a6b38SJeff Roberson /* 1964ae7a6b38SJeff Roberson * Penalize another thread for the time spent on this one. This helps to 1965ae7a6b38SJeff Roberson * worsen the priority and interactivity of processes which schedule batch 1966ae7a6b38SJeff Roberson * jobs such as make. This has little effect on the make process itself but 1967ae7a6b38SJeff Roberson * causes new processes spawned by it to receive worse scores immediately. 1968ae7a6b38SJeff Roberson */ 1969ad1e7d28SJulian Elischer void 1970fc6c30f6SJulian Elischer sched_exit_thread(struct thread *td, struct thread *child) 1971ad1e7d28SJulian Elischer { 1972fc6c30f6SJulian Elischer 1973e7d50326SJeff Roberson CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", 1974431f8906SJulian Elischer child, child->td_name, child->td_priority); 1975e7d50326SJeff Roberson 1976e7d50326SJeff Roberson /* 1977e7d50326SJeff Roberson * Give the child's runtime to the parent without returning the 1978e7d50326SJeff Roberson * sleep time as a penalty to the parent. This causes shells that 1979e7d50326SJeff Roberson * launch expensive things to mark their children as expensive. 1980e7d50326SJeff Roberson */ 19817b20fb19SJeff Roberson thread_lock(td); 1982ae7a6b38SJeff Roberson td->td_sched->ts_runtime += child->td_sched->ts_runtime; 1983fc6c30f6SJulian Elischer sched_interact_update(td); 1984e7d50326SJeff Roberson sched_priority(td); 19857b20fb19SJeff Roberson thread_unlock(td); 1986ad1e7d28SJulian Elischer } 1987ad1e7d28SJulian Elischer 1988ff256d9cSJeff Roberson void 1989ff256d9cSJeff Roberson sched_preempt(struct thread *td) 1990ff256d9cSJeff Roberson { 1991ff256d9cSJeff Roberson struct tdq *tdq; 1992ff256d9cSJeff Roberson 1993ff256d9cSJeff Roberson thread_lock(td); 1994ff256d9cSJeff Roberson tdq = TDQ_SELF(); 1995ff256d9cSJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 1996ff256d9cSJeff Roberson tdq->tdq_ipipending = 0; 1997ff256d9cSJeff Roberson if (td->td_priority > tdq->tdq_lowpri) { 1998ff256d9cSJeff Roberson if (td->td_critnest > 1) 1999ff256d9cSJeff Roberson td->td_owepreempt = 1; 2000ff256d9cSJeff Roberson else 2001ff256d9cSJeff Roberson mi_switch(SW_INVOL | SW_PREEMPT, NULL); 2002ff256d9cSJeff Roberson } 2003ff256d9cSJeff Roberson thread_unlock(td); 2004ff256d9cSJeff Roberson } 2005ff256d9cSJeff Roberson 2006ae7a6b38SJeff Roberson /* 2007ae7a6b38SJeff Roberson * Fix priorities on return to user-space. Priorities may be elevated due 2008ae7a6b38SJeff Roberson * to static priorities in msleep() or similar. 2009ae7a6b38SJeff Roberson */ 2010ad1e7d28SJulian Elischer void 2011ad1e7d28SJulian Elischer sched_userret(struct thread *td) 2012ad1e7d28SJulian Elischer { 2013ad1e7d28SJulian Elischer /* 2014ad1e7d28SJulian Elischer * XXX we cheat slightly on the locking here to avoid locking in 2015ad1e7d28SJulian Elischer * the usual case. Setting td_priority here is essentially an 2016ad1e7d28SJulian Elischer * incomplete workaround for not setting it properly elsewhere. 2017ad1e7d28SJulian Elischer * Now that some interrupt handlers are threads, not setting it 2018ad1e7d28SJulian Elischer * properly elsewhere can clobber it in the window between setting 2019ad1e7d28SJulian Elischer * it here and returning to user mode, so don't waste time setting 2020ad1e7d28SJulian Elischer * it perfectly here. 2021ad1e7d28SJulian Elischer */ 2022ad1e7d28SJulian Elischer KASSERT((td->td_flags & TDF_BORROWING) == 0, 2023ad1e7d28SJulian Elischer ("thread with borrowed priority returning to userland")); 2024ad1e7d28SJulian Elischer if (td->td_priority != td->td_user_pri) { 20257b20fb19SJeff Roberson thread_lock(td); 2026ad1e7d28SJulian Elischer td->td_priority = td->td_user_pri; 2027ad1e7d28SJulian Elischer td->td_base_pri = td->td_user_pri; 202862fa74d9SJeff Roberson tdq_setlowpri(TDQ_SELF(), td); 20297b20fb19SJeff Roberson thread_unlock(td); 2030ad1e7d28SJulian Elischer } 203135e6168fSJeff Roberson } 203235e6168fSJeff Roberson 2033ae7a6b38SJeff Roberson /* 2034ae7a6b38SJeff Roberson * Handle a stathz tick. This is really only relevant for timeshare 2035ae7a6b38SJeff Roberson * threads. 2036ae7a6b38SJeff Roberson */ 203735e6168fSJeff Roberson void 20387cf90fb3SJeff Roberson sched_clock(struct thread *td) 203935e6168fSJeff Roberson { 2040ad1e7d28SJulian Elischer struct tdq *tdq; 2041ad1e7d28SJulian Elischer struct td_sched *ts; 204235e6168fSJeff Roberson 2043ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 20443f872f85SJeff Roberson tdq = TDQ_SELF(); 20457fcf154aSJeff Roberson #ifdef SMP 20467fcf154aSJeff Roberson /* 20477fcf154aSJeff Roberson * We run the long term load balancer infrequently on the first cpu. 20487fcf154aSJeff Roberson */ 20497fcf154aSJeff Roberson if (balance_tdq == tdq) { 20507fcf154aSJeff Roberson if (balance_ticks && --balance_ticks == 0) 20517fcf154aSJeff Roberson sched_balance(); 20527fcf154aSJeff Roberson } 20537fcf154aSJeff Roberson #endif 20543f872f85SJeff Roberson /* 20553f872f85SJeff Roberson * Advance the insert index once for each tick to ensure that all 20563f872f85SJeff Roberson * threads get a chance to run. 20573f872f85SJeff Roberson */ 20583f872f85SJeff Roberson if (tdq->tdq_idx == tdq->tdq_ridx) { 20593f872f85SJeff Roberson tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS; 20603f872f85SJeff Roberson if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx])) 20613f872f85SJeff Roberson tdq->tdq_ridx = tdq->tdq_idx; 20623f872f85SJeff Roberson } 20633f872f85SJeff Roberson ts = td->td_sched; 2064fd0b8c78SJeff Roberson if (td->td_pri_class & PRI_FIFO_BIT) 2065a8949de2SJeff Roberson return; 2066fd0b8c78SJeff Roberson if (td->td_pri_class == PRI_TIMESHARE) { 2067a8949de2SJeff Roberson /* 2068fd0b8c78SJeff Roberson * We used a tick; charge it to the thread so 2069fd0b8c78SJeff Roberson * that we can compute our interactivity. 207015dc847eSJeff Roberson */ 2071ae7a6b38SJeff Roberson td->td_sched->ts_runtime += tickincr; 20728460a577SJohn Birrell sched_interact_update(td); 207373daf66fSJeff Roberson sched_priority(td); 2074fd0b8c78SJeff Roberson } 207535e6168fSJeff Roberson /* 207635e6168fSJeff Roberson * We used up one time slice. 207735e6168fSJeff Roberson */ 2078ad1e7d28SJulian Elischer if (--ts->ts_slice > 0) 207915dc847eSJeff Roberson return; 208035e6168fSJeff Roberson /* 208173daf66fSJeff Roberson * We're out of time, force a requeue at userret(). 208235e6168fSJeff Roberson */ 208373daf66fSJeff Roberson ts->ts_slice = sched_slice; 20844a338afdSJulian Elischer td->td_flags |= TDF_NEEDRESCHED; 208535e6168fSJeff Roberson } 208635e6168fSJeff Roberson 2087ae7a6b38SJeff Roberson /* 2088ae7a6b38SJeff Roberson * Called once per hz tick. Used for cpu utilization information. This 2089ae7a6b38SJeff Roberson * is easier than trying to scale based on stathz. 2090ae7a6b38SJeff Roberson */ 2091ae7a6b38SJeff Roberson void 2092ae7a6b38SJeff Roberson sched_tick(void) 2093ae7a6b38SJeff Roberson { 2094ae7a6b38SJeff Roberson struct td_sched *ts; 2095ae7a6b38SJeff Roberson 2096ae7a6b38SJeff Roberson ts = curthread->td_sched; 2097ae7a6b38SJeff Roberson /* Adjust ticks for pctcpu */ 2098ae7a6b38SJeff Roberson ts->ts_ticks += 1 << SCHED_TICK_SHIFT; 2099ae7a6b38SJeff Roberson ts->ts_ltick = ticks; 2100ae7a6b38SJeff Roberson /* 2101ae7a6b38SJeff Roberson * Update if we've exceeded our desired tick threshhold by over one 2102ae7a6b38SJeff Roberson * second. 2103ae7a6b38SJeff Roberson */ 2104ae7a6b38SJeff Roberson if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick) 2105ae7a6b38SJeff Roberson sched_pctcpu_update(ts); 2106ae7a6b38SJeff Roberson } 2107ae7a6b38SJeff Roberson 2108ae7a6b38SJeff Roberson /* 2109ae7a6b38SJeff Roberson * Return whether the current CPU has runnable tasks. Used for in-kernel 2110ae7a6b38SJeff Roberson * cooperative idle threads. 2111ae7a6b38SJeff Roberson */ 211235e6168fSJeff Roberson int 211335e6168fSJeff Roberson sched_runnable(void) 211435e6168fSJeff Roberson { 2115ad1e7d28SJulian Elischer struct tdq *tdq; 2116b90816f1SJeff Roberson int load; 211735e6168fSJeff Roberson 2118b90816f1SJeff Roberson load = 1; 2119b90816f1SJeff Roberson 2120ad1e7d28SJulian Elischer tdq = TDQ_SELF(); 21213f741ca1SJeff Roberson if ((curthread->td_flags & TDF_IDLETD) != 0) { 2122d2ad694cSJeff Roberson if (tdq->tdq_load > 0) 21233f741ca1SJeff Roberson goto out; 21243f741ca1SJeff Roberson } else 2125d2ad694cSJeff Roberson if (tdq->tdq_load - 1 > 0) 2126b90816f1SJeff Roberson goto out; 2127b90816f1SJeff Roberson load = 0; 2128b90816f1SJeff Roberson out: 2129b90816f1SJeff Roberson return (load); 213035e6168fSJeff Roberson } 213135e6168fSJeff Roberson 2132ae7a6b38SJeff Roberson /* 2133ae7a6b38SJeff Roberson * Choose the highest priority thread to run. The thread is removed from 2134ae7a6b38SJeff Roberson * the run-queue while running however the load remains. For SMP we set 2135ae7a6b38SJeff Roberson * the tdq in the global idle bitmask if it idles here. 2136ae7a6b38SJeff Roberson */ 21377a5e5e2aSJeff Roberson struct thread * 2138c9f25d8fSJeff Roberson sched_choose(void) 2139c9f25d8fSJeff Roberson { 2140ae7a6b38SJeff Roberson struct td_sched *ts; 2141ae7a6b38SJeff Roberson struct tdq *tdq; 2142ae7a6b38SJeff Roberson 2143ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 2144ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2145ad1e7d28SJulian Elischer ts = tdq_choose(tdq); 2146ad1e7d28SJulian Elischer if (ts) { 2147c143ac21SJeff Roberson ts->ts_ltick = ticks; 2148ad1e7d28SJulian Elischer tdq_runq_rem(tdq, ts); 21497a5e5e2aSJeff Roberson return (ts->ts_thread); 215035e6168fSJeff Roberson } 215162fa74d9SJeff Roberson return (PCPU_GET(idlethread)); 21527a5e5e2aSJeff Roberson } 21537a5e5e2aSJeff Roberson 2154ae7a6b38SJeff Roberson /* 2155ae7a6b38SJeff Roberson * Set owepreempt if necessary. Preemption never happens directly in ULE, 2156ae7a6b38SJeff Roberson * we always request it once we exit a critical section. 2157ae7a6b38SJeff Roberson */ 2158ae7a6b38SJeff Roberson static inline void 2159ae7a6b38SJeff Roberson sched_setpreempt(struct thread *td) 21607a5e5e2aSJeff Roberson { 21617a5e5e2aSJeff Roberson struct thread *ctd; 21627a5e5e2aSJeff Roberson int cpri; 21637a5e5e2aSJeff Roberson int pri; 21647a5e5e2aSJeff Roberson 2165ff256d9cSJeff Roberson THREAD_LOCK_ASSERT(curthread, MA_OWNED); 2166ff256d9cSJeff Roberson 21677a5e5e2aSJeff Roberson ctd = curthread; 21687a5e5e2aSJeff Roberson pri = td->td_priority; 21697a5e5e2aSJeff Roberson cpri = ctd->td_priority; 2170ff256d9cSJeff Roberson if (pri < cpri) 2171ff256d9cSJeff Roberson ctd->td_flags |= TDF_NEEDRESCHED; 21727a5e5e2aSJeff Roberson if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd)) 2173ae7a6b38SJeff Roberson return; 2174ff256d9cSJeff Roberson if (!sched_shouldpreempt(pri, cpri, 0)) 2175ae7a6b38SJeff Roberson return; 21767a5e5e2aSJeff Roberson ctd->td_owepreempt = 1; 217735e6168fSJeff Roberson } 217835e6168fSJeff Roberson 2179ae7a6b38SJeff Roberson /* 218073daf66fSJeff Roberson * Add a thread to a thread queue. Select the appropriate runq and add the 218173daf66fSJeff Roberson * thread to it. This is the internal function called when the tdq is 218273daf66fSJeff Roberson * predetermined. 2183ae7a6b38SJeff Roberson */ 218435e6168fSJeff Roberson void 2185ae7a6b38SJeff Roberson tdq_add(struct tdq *tdq, struct thread *td, int flags) 218635e6168fSJeff Roberson { 2187ad1e7d28SJulian Elischer struct td_sched *ts; 2188c9f25d8fSJeff Roberson 2189ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 21907a5e5e2aSJeff Roberson KASSERT((td->td_inhibitors == 0), 21917a5e5e2aSJeff Roberson ("sched_add: trying to run inhibited thread")); 21927a5e5e2aSJeff Roberson KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 21937a5e5e2aSJeff Roberson ("sched_add: bad thread state")); 2194b61ce5b0SJeff Roberson KASSERT(td->td_flags & TDF_INMEM, 2195b61ce5b0SJeff Roberson ("sched_add: thread swapped out")); 2196ae7a6b38SJeff Roberson 2197ae7a6b38SJeff Roberson ts = td->td_sched; 2198ae7a6b38SJeff Roberson if (td->td_priority < tdq->tdq_lowpri) 2199ae7a6b38SJeff Roberson tdq->tdq_lowpri = td->td_priority; 2200ad1e7d28SJulian Elischer tdq_runq_add(tdq, ts, flags); 2201ad1e7d28SJulian Elischer tdq_load_add(tdq, ts); 2202ae7a6b38SJeff Roberson } 2203ae7a6b38SJeff Roberson 2204ae7a6b38SJeff Roberson /* 2205ae7a6b38SJeff Roberson * Select the target thread queue and add a thread to it. Request 2206ae7a6b38SJeff Roberson * preemption or IPI a remote processor if required. 2207ae7a6b38SJeff Roberson */ 2208ae7a6b38SJeff Roberson void 2209ae7a6b38SJeff Roberson sched_add(struct thread *td, int flags) 2210ae7a6b38SJeff Roberson { 2211ae7a6b38SJeff Roberson struct tdq *tdq; 22127b8bfa0dSJeff Roberson #ifdef SMP 221373daf66fSJeff Roberson struct td_sched *ts; 2214ae7a6b38SJeff Roberson int cpu; 2215ae7a6b38SJeff Roberson #endif 2216ae7a6b38SJeff Roberson CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 2217431f8906SJulian Elischer td, td->td_name, td->td_priority, curthread, 2218431f8906SJulian Elischer curthread->td_name); 2219ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2220ae7a6b38SJeff Roberson /* 2221ae7a6b38SJeff Roberson * Recalculate the priority before we select the target cpu or 2222ae7a6b38SJeff Roberson * run-queue. 2223ae7a6b38SJeff Roberson */ 2224ae7a6b38SJeff Roberson if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 2225ae7a6b38SJeff Roberson sched_priority(td); 2226ae7a6b38SJeff Roberson #ifdef SMP 2227ae7a6b38SJeff Roberson /* 2228ae7a6b38SJeff Roberson * Pick the destination cpu and if it isn't ours transfer to the 2229ae7a6b38SJeff Roberson * target cpu. 2230ae7a6b38SJeff Roberson */ 223173daf66fSJeff Roberson ts = td->td_sched; 2232ae7a6b38SJeff Roberson cpu = sched_pickcpu(ts, flags); 2233ae7a6b38SJeff Roberson tdq = sched_setcpu(ts, cpu, flags); 2234ae7a6b38SJeff Roberson tdq_add(tdq, td, flags); 223573daf66fSJeff Roberson if (cpu != PCPU_GET(cpuid)) { 2236ff256d9cSJeff Roberson tdq_notify(tdq, ts); 22377b8bfa0dSJeff Roberson return; 22387b8bfa0dSJeff Roberson } 2239ae7a6b38SJeff Roberson #else 2240ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 2241ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 2242ae7a6b38SJeff Roberson /* 2243ae7a6b38SJeff Roberson * Now that the thread is moving to the run-queue, set the lock 2244ae7a6b38SJeff Roberson * to the scheduler's lock. 2245ae7a6b38SJeff Roberson */ 2246ae7a6b38SJeff Roberson thread_lock_set(td, TDQ_LOCKPTR(tdq)); 2247ae7a6b38SJeff Roberson tdq_add(tdq, td, flags); 22487b8bfa0dSJeff Roberson #endif 2249ae7a6b38SJeff Roberson if (!(flags & SRQ_YIELDING)) 2250ae7a6b38SJeff Roberson sched_setpreempt(td); 225135e6168fSJeff Roberson } 225235e6168fSJeff Roberson 2253ae7a6b38SJeff Roberson /* 2254ae7a6b38SJeff Roberson * Remove a thread from a run-queue without running it. This is used 2255ae7a6b38SJeff Roberson * when we're stealing a thread from a remote queue. Otherwise all threads 2256ae7a6b38SJeff Roberson * exit by calling sched_exit_thread() and sched_throw() themselves. 2257ae7a6b38SJeff Roberson */ 225835e6168fSJeff Roberson void 22597cf90fb3SJeff Roberson sched_rem(struct thread *td) 226035e6168fSJeff Roberson { 2261ad1e7d28SJulian Elischer struct tdq *tdq; 2262ad1e7d28SJulian Elischer struct td_sched *ts; 22637cf90fb3SJeff Roberson 226481d47d3fSJeff Roberson CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 2265431f8906SJulian Elischer td, td->td_name, td->td_priority, curthread, 2266431f8906SJulian Elischer curthread->td_name); 2267ad1e7d28SJulian Elischer ts = td->td_sched; 2268ae7a6b38SJeff Roberson tdq = TDQ_CPU(ts->ts_cpu); 2269ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2270ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 22717a5e5e2aSJeff Roberson KASSERT(TD_ON_RUNQ(td), 2272ad1e7d28SJulian Elischer ("sched_rem: thread not on run queue")); 2273ad1e7d28SJulian Elischer tdq_runq_rem(tdq, ts); 2274ad1e7d28SJulian Elischer tdq_load_rem(tdq, ts); 22757a5e5e2aSJeff Roberson TD_SET_CAN_RUN(td); 227662fa74d9SJeff Roberson if (td->td_priority == tdq->tdq_lowpri) 227762fa74d9SJeff Roberson tdq_setlowpri(tdq, NULL); 227835e6168fSJeff Roberson } 227935e6168fSJeff Roberson 2280ae7a6b38SJeff Roberson /* 2281ae7a6b38SJeff Roberson * Fetch cpu utilization information. Updates on demand. 2282ae7a6b38SJeff Roberson */ 228335e6168fSJeff Roberson fixpt_t 22847cf90fb3SJeff Roberson sched_pctcpu(struct thread *td) 228535e6168fSJeff Roberson { 228635e6168fSJeff Roberson fixpt_t pctcpu; 2287ad1e7d28SJulian Elischer struct td_sched *ts; 228835e6168fSJeff Roberson 228935e6168fSJeff Roberson pctcpu = 0; 2290ad1e7d28SJulian Elischer ts = td->td_sched; 2291ad1e7d28SJulian Elischer if (ts == NULL) 2292484288deSJeff Roberson return (0); 229335e6168fSJeff Roberson 22947b20fb19SJeff Roberson thread_lock(td); 2295ad1e7d28SJulian Elischer if (ts->ts_ticks) { 229635e6168fSJeff Roberson int rtick; 229735e6168fSJeff Roberson 2298ad1e7d28SJulian Elischer sched_pctcpu_update(ts); 229935e6168fSJeff Roberson /* How many rtick per second ? */ 2300e7d50326SJeff Roberson rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz); 2301e7d50326SJeff Roberson pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT; 230235e6168fSJeff Roberson } 23037b20fb19SJeff Roberson thread_unlock(td); 230435e6168fSJeff Roberson 230535e6168fSJeff Roberson return (pctcpu); 230635e6168fSJeff Roberson } 230735e6168fSJeff Roberson 230862fa74d9SJeff Roberson /* 230962fa74d9SJeff Roberson * Enforce affinity settings for a thread. Called after adjustments to 231062fa74d9SJeff Roberson * cpumask. 231162fa74d9SJeff Roberson */ 2312885d51a3SJeff Roberson void 2313885d51a3SJeff Roberson sched_affinity(struct thread *td) 2314885d51a3SJeff Roberson { 231562fa74d9SJeff Roberson #ifdef SMP 231662fa74d9SJeff Roberson struct td_sched *ts; 231762fa74d9SJeff Roberson int cpu; 231862fa74d9SJeff Roberson 231962fa74d9SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 232062fa74d9SJeff Roberson ts = td->td_sched; 232162fa74d9SJeff Roberson if (THREAD_CAN_SCHED(td, ts->ts_cpu)) 232262fa74d9SJeff Roberson return; 232362fa74d9SJeff Roberson if (!TD_IS_RUNNING(td)) 232462fa74d9SJeff Roberson return; 232562fa74d9SJeff Roberson td->td_flags |= TDF_NEEDRESCHED; 232662fa74d9SJeff Roberson if (!THREAD_CAN_MIGRATE(td)) 232762fa74d9SJeff Roberson return; 232862fa74d9SJeff Roberson /* 232962fa74d9SJeff Roberson * Assign the new cpu and force a switch before returning to 233062fa74d9SJeff Roberson * userspace. If the target thread is not running locally send 233162fa74d9SJeff Roberson * an ipi to force the issue. 233262fa74d9SJeff Roberson */ 233362fa74d9SJeff Roberson cpu = ts->ts_cpu; 233462fa74d9SJeff Roberson ts->ts_cpu = sched_pickcpu(ts, 0); 233562fa74d9SJeff Roberson if (cpu != PCPU_GET(cpuid)) 233662fa74d9SJeff Roberson ipi_selected(1 << cpu, IPI_PREEMPT); 233762fa74d9SJeff Roberson #endif 2338885d51a3SJeff Roberson } 2339885d51a3SJeff Roberson 2340ae7a6b38SJeff Roberson /* 2341ae7a6b38SJeff Roberson * Bind a thread to a target cpu. 2342ae7a6b38SJeff Roberson */ 23439bacd788SJeff Roberson void 23449bacd788SJeff Roberson sched_bind(struct thread *td, int cpu) 23459bacd788SJeff Roberson { 2346ad1e7d28SJulian Elischer struct td_sched *ts; 23479bacd788SJeff Roberson 2348c47f202bSJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); 2349ad1e7d28SJulian Elischer ts = td->td_sched; 23506b2f763fSJeff Roberson if (ts->ts_flags & TSF_BOUND) 2351c95d2db2SJeff Roberson sched_unbind(td); 2352ad1e7d28SJulian Elischer ts->ts_flags |= TSF_BOUND; 23536b2f763fSJeff Roberson sched_pin(); 235480f86c9fSJeff Roberson if (PCPU_GET(cpuid) == cpu) 23559bacd788SJeff Roberson return; 23566b2f763fSJeff Roberson ts->ts_cpu = cpu; 23579bacd788SJeff Roberson /* When we return from mi_switch we'll be on the correct cpu. */ 2358279f949eSPoul-Henning Kamp mi_switch(SW_VOL, NULL); 23599bacd788SJeff Roberson } 23609bacd788SJeff Roberson 2361ae7a6b38SJeff Roberson /* 2362ae7a6b38SJeff Roberson * Release a bound thread. 2363ae7a6b38SJeff Roberson */ 23649bacd788SJeff Roberson void 23659bacd788SJeff Roberson sched_unbind(struct thread *td) 23669bacd788SJeff Roberson { 2367e7d50326SJeff Roberson struct td_sched *ts; 2368e7d50326SJeff Roberson 23697b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2370e7d50326SJeff Roberson ts = td->td_sched; 23716b2f763fSJeff Roberson if ((ts->ts_flags & TSF_BOUND) == 0) 23726b2f763fSJeff Roberson return; 2373e7d50326SJeff Roberson ts->ts_flags &= ~TSF_BOUND; 2374e7d50326SJeff Roberson sched_unpin(); 23759bacd788SJeff Roberson } 23769bacd788SJeff Roberson 237735e6168fSJeff Roberson int 2378ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td) 2379ebccf1e3SJoseph Koshy { 23807b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2381ad1e7d28SJulian Elischer return (td->td_sched->ts_flags & TSF_BOUND); 2382ebccf1e3SJoseph Koshy } 2383ebccf1e3SJoseph Koshy 2384ae7a6b38SJeff Roberson /* 2385ae7a6b38SJeff Roberson * Basic yield call. 2386ae7a6b38SJeff Roberson */ 238736ec198bSDavid Xu void 238836ec198bSDavid Xu sched_relinquish(struct thread *td) 238936ec198bSDavid Xu { 23907b20fb19SJeff Roberson thread_lock(td); 23917b20fb19SJeff Roberson SCHED_STAT_INC(switch_relinquish); 239236ec198bSDavid Xu mi_switch(SW_VOL, NULL); 23937b20fb19SJeff Roberson thread_unlock(td); 239436ec198bSDavid Xu } 239536ec198bSDavid Xu 2396ae7a6b38SJeff Roberson /* 2397ae7a6b38SJeff Roberson * Return the total system load. 2398ae7a6b38SJeff Roberson */ 2399ebccf1e3SJoseph Koshy int 240033916c36SJeff Roberson sched_load(void) 240133916c36SJeff Roberson { 240233916c36SJeff Roberson #ifdef SMP 240333916c36SJeff Roberson int total; 240433916c36SJeff Roberson int i; 240533916c36SJeff Roberson 240633916c36SJeff Roberson total = 0; 240762fa74d9SJeff Roberson for (i = 0; i <= mp_maxid; i++) 240862fa74d9SJeff Roberson total += TDQ_CPU(i)->tdq_sysload; 240933916c36SJeff Roberson return (total); 241033916c36SJeff Roberson #else 2411d2ad694cSJeff Roberson return (TDQ_SELF()->tdq_sysload); 241233916c36SJeff Roberson #endif 241333916c36SJeff Roberson } 241433916c36SJeff Roberson 241533916c36SJeff Roberson int 241635e6168fSJeff Roberson sched_sizeof_proc(void) 241735e6168fSJeff Roberson { 241835e6168fSJeff Roberson return (sizeof(struct proc)); 241935e6168fSJeff Roberson } 242035e6168fSJeff Roberson 242135e6168fSJeff Roberson int 242235e6168fSJeff Roberson sched_sizeof_thread(void) 242335e6168fSJeff Roberson { 242435e6168fSJeff Roberson return (sizeof(struct thread) + sizeof(struct td_sched)); 242535e6168fSJeff Roberson } 2426b41f1452SDavid Xu 24277a5e5e2aSJeff Roberson /* 24287a5e5e2aSJeff Roberson * The actual idle process. 24297a5e5e2aSJeff Roberson */ 24307a5e5e2aSJeff Roberson void 24317a5e5e2aSJeff Roberson sched_idletd(void *dummy) 24327a5e5e2aSJeff Roberson { 24337a5e5e2aSJeff Roberson struct thread *td; 2434ae7a6b38SJeff Roberson struct tdq *tdq; 24357a5e5e2aSJeff Roberson 24367a5e5e2aSJeff Roberson td = curthread; 2437ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 24387a5e5e2aSJeff Roberson mtx_assert(&Giant, MA_NOTOWNED); 2439ae7a6b38SJeff Roberson /* ULE relies on preemption for idle interruption. */ 2440ae7a6b38SJeff Roberson for (;;) { 2441ae7a6b38SJeff Roberson #ifdef SMP 2442ae7a6b38SJeff Roberson if (tdq_idled(tdq)) 24437a5e5e2aSJeff Roberson cpu_idle(); 2444ae7a6b38SJeff Roberson #else 2445ae7a6b38SJeff Roberson cpu_idle(); 2446ae7a6b38SJeff Roberson #endif 2447ae7a6b38SJeff Roberson } 2448b41f1452SDavid Xu } 2449e7d50326SJeff Roberson 24507b20fb19SJeff Roberson /* 24517b20fb19SJeff Roberson * A CPU is entering for the first time or a thread is exiting. 24527b20fb19SJeff Roberson */ 24537b20fb19SJeff Roberson void 24547b20fb19SJeff Roberson sched_throw(struct thread *td) 24557b20fb19SJeff Roberson { 245659c68134SJeff Roberson struct thread *newtd; 2457ae7a6b38SJeff Roberson struct tdq *tdq; 2458ae7a6b38SJeff Roberson 2459ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 24607b20fb19SJeff Roberson if (td == NULL) { 2461ae7a6b38SJeff Roberson /* Correct spinlock nesting and acquire the correct lock. */ 2462ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 24637b20fb19SJeff Roberson spinlock_exit(); 24647b20fb19SJeff Roberson } else { 2465ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2466ae7a6b38SJeff Roberson tdq_load_rem(tdq, td->td_sched); 2467eea4f254SJeff Roberson lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object); 24687b20fb19SJeff Roberson } 24697b20fb19SJeff Roberson KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 247059c68134SJeff Roberson newtd = choosethread(); 247159c68134SJeff Roberson TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 24727b20fb19SJeff Roberson PCPU_SET(switchtime, cpu_ticks()); 24737b20fb19SJeff Roberson PCPU_SET(switchticks, ticks); 247459c68134SJeff Roberson cpu_throw(td, newtd); /* doesn't return */ 24757b20fb19SJeff Roberson } 24767b20fb19SJeff Roberson 2477ae7a6b38SJeff Roberson /* 2478ae7a6b38SJeff Roberson * This is called from fork_exit(). Just acquire the correct locks and 2479ae7a6b38SJeff Roberson * let fork do the rest of the work. 2480ae7a6b38SJeff Roberson */ 24817b20fb19SJeff Roberson void 2482fe54587fSJeff Roberson sched_fork_exit(struct thread *td) 24837b20fb19SJeff Roberson { 2484ae7a6b38SJeff Roberson struct td_sched *ts; 2485ae7a6b38SJeff Roberson struct tdq *tdq; 2486ae7a6b38SJeff Roberson int cpuid; 24877b20fb19SJeff Roberson 24887b20fb19SJeff Roberson /* 24897b20fb19SJeff Roberson * Finish setting up thread glue so that it begins execution in a 2490ae7a6b38SJeff Roberson * non-nested critical section with the scheduler lock held. 24917b20fb19SJeff Roberson */ 2492ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 2493ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpuid); 2494ae7a6b38SJeff Roberson ts = td->td_sched; 2495ae7a6b38SJeff Roberson if (TD_IS_IDLETHREAD(td)) 2496ae7a6b38SJeff Roberson td->td_lock = TDQ_LOCKPTR(tdq); 2497ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2498ae7a6b38SJeff Roberson td->td_oncpu = cpuid; 249959c68134SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 2500eea4f254SJeff Roberson lock_profile_obtain_lock_success( 2501eea4f254SJeff Roberson &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__); 25027b20fb19SJeff Roberson } 25037b20fb19SJeff Roberson 2504ae7a6b38SJeff Roberson static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, 2505ae7a6b38SJeff Roberson "Scheduler"); 2506ae7a6b38SJeff Roberson SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0, 2507e7d50326SJeff Roberson "Scheduler name"); 2508ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, 2509ae7a6b38SJeff Roberson "Slice size for timeshare threads"); 2510ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, 2511ae7a6b38SJeff Roberson "Interactivity score threshold"); 2512ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh, 2513ae7a6b38SJeff Roberson 0,"Min priority for preemption, lower priorities have greater precedence"); 2514c5aa6b58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost, 2515c5aa6b58SJeff Roberson 0,"Controls whether static kernel priorities are assigned to sleeping threads."); 25167b8bfa0dSJeff Roberson #ifdef SMP 2517ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0, 2518ae7a6b38SJeff Roberson "Number of hz ticks to keep thread affinity for"); 2519ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0, 2520ae7a6b38SJeff Roberson "Enables the long-term load balancer"); 25217fcf154aSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW, 25227fcf154aSJeff Roberson &balance_interval, 0, 25237fcf154aSJeff Roberson "Average frequency in stathz ticks to run the long-term balancer"); 2524ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0, 2525ae7a6b38SJeff Roberson "Steals work from another hyper-threaded core on idle"); 2526ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0, 2527ae7a6b38SJeff Roberson "Attempts to steal work from other cores before idling"); 252828994a58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0, 252928994a58SJeff Roberson "Minimum load on remote cpu before we'll steal"); 25307b8bfa0dSJeff Roberson #endif 2531e7d50326SJeff Roberson 253254b0e65fSJeff Roberson /* ps compat. All cpu percentages from ULE are weighted. */ 2533a5423ea3SJeff Roberson static int ccpu = 0; 2534e7d50326SJeff Roberson SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 2535e7d50326SJeff Roberson 2536e7d50326SJeff Roberson 2537ed062c8dSJulian Elischer #define KERN_SWITCH_INCLUDE 1 2538ed062c8dSJulian Elischer #include "kern/kern_switch.c" 2539