135e6168fSJeff Roberson /*- 2e7d50326SJeff Roberson * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org> 335e6168fSJeff Roberson * All rights reserved. 435e6168fSJeff Roberson * 535e6168fSJeff Roberson * Redistribution and use in source and binary forms, with or without 635e6168fSJeff Roberson * modification, are permitted provided that the following conditions 735e6168fSJeff Roberson * are met: 835e6168fSJeff Roberson * 1. Redistributions of source code must retain the above copyright 935e6168fSJeff Roberson * notice unmodified, this list of conditions, and the following 1035e6168fSJeff Roberson * disclaimer. 1135e6168fSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 1235e6168fSJeff Roberson * notice, this list of conditions and the following disclaimer in the 1335e6168fSJeff Roberson * documentation and/or other materials provided with the distribution. 1435e6168fSJeff Roberson * 1535e6168fSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1635e6168fSJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1735e6168fSJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1835e6168fSJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1935e6168fSJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2035e6168fSJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2135e6168fSJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2235e6168fSJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2335e6168fSJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2435e6168fSJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2535e6168fSJeff Roberson */ 2635e6168fSJeff Roberson 27ae7a6b38SJeff Roberson /* 28ae7a6b38SJeff Roberson * This file implements the ULE scheduler. ULE supports independent CPU 29ae7a6b38SJeff Roberson * run queues and fine grain locking. It has superior interactive 30ae7a6b38SJeff Roberson * performance under load even on uni-processor systems. 31ae7a6b38SJeff Roberson * 32ae7a6b38SJeff Roberson * etymology: 33a5423ea3SJeff Roberson * ULE is the last three letters in schedule. It owes its name to a 34ae7a6b38SJeff Roberson * generic user created for a scheduling system by Paul Mikesell at 35ae7a6b38SJeff Roberson * Isilon Systems and a general lack of creativity on the part of the author. 36ae7a6b38SJeff Roberson */ 37ae7a6b38SJeff Roberson 38677b542eSDavid E. O'Brien #include <sys/cdefs.h> 39677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 40677b542eSDavid E. O'Brien 414da0d332SPeter Wemm #include "opt_hwpmc_hooks.h" 424da0d332SPeter Wemm #include "opt_sched.h" 439923b511SScott Long 4435e6168fSJeff Roberson #include <sys/param.h> 4535e6168fSJeff Roberson #include <sys/systm.h> 462c3490b1SMarcel Moolenaar #include <sys/kdb.h> 4735e6168fSJeff Roberson #include <sys/kernel.h> 4835e6168fSJeff Roberson #include <sys/ktr.h> 4935e6168fSJeff Roberson #include <sys/lock.h> 5035e6168fSJeff Roberson #include <sys/mutex.h> 5135e6168fSJeff Roberson #include <sys/proc.h> 52245f3abfSJeff Roberson #include <sys/resource.h> 539bacd788SJeff Roberson #include <sys/resourcevar.h> 5435e6168fSJeff Roberson #include <sys/sched.h> 5535e6168fSJeff Roberson #include <sys/smp.h> 5635e6168fSJeff Roberson #include <sys/sx.h> 5735e6168fSJeff Roberson #include <sys/sysctl.h> 5835e6168fSJeff Roberson #include <sys/sysproto.h> 59f5c157d9SJohn Baldwin #include <sys/turnstile.h> 603db720fdSDavid Xu #include <sys/umtx.h> 6135e6168fSJeff Roberson #include <sys/vmmeter.h> 6262fa74d9SJeff Roberson #include <sys/cpuset.h> 6335e6168fSJeff Roberson #ifdef KTRACE 6435e6168fSJeff Roberson #include <sys/uio.h> 6535e6168fSJeff Roberson #include <sys/ktrace.h> 6635e6168fSJeff Roberson #endif 6735e6168fSJeff Roberson 68ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 69ebccf1e3SJoseph Koshy #include <sys/pmckern.h> 70ebccf1e3SJoseph Koshy #endif 71ebccf1e3SJoseph Koshy 7235e6168fSJeff Roberson #include <machine/cpu.h> 7322bf7d9aSJeff Roberson #include <machine/smp.h> 7435e6168fSJeff Roberson 75cbdd62adSPeter Grehan #if !defined(__i386__) && !defined(__amd64__) && !defined(__powerpc__) && !defined(__arm__) 7602e2d6b4SJeff Roberson #error "This architecture is not currently compatible with ULE" 777a5e5e2aSJeff Roberson #endif 787a5e5e2aSJeff Roberson 79ae7a6b38SJeff Roberson #define KTR_ULE 0 8014618990SJeff Roberson 816b2f763fSJeff Roberson /* 82ae7a6b38SJeff Roberson * Thread scheduler specific section. All fields are protected 83ae7a6b38SJeff Roberson * by the thread lock. 84ed062c8dSJulian Elischer */ 85ad1e7d28SJulian Elischer struct td_sched { 86ae7a6b38SJeff Roberson TAILQ_ENTRY(td_sched) ts_procq; /* Run queue. */ 87ae7a6b38SJeff Roberson struct thread *ts_thread; /* Active associated thread. */ 88ae7a6b38SJeff Roberson struct runq *ts_runq; /* Run-queue we're queued on. */ 89ae7a6b38SJeff Roberson short ts_flags; /* TSF_* flags. */ 90ae7a6b38SJeff Roberson u_char ts_rqindex; /* Run queue index. */ 91ad1e7d28SJulian Elischer u_char ts_cpu; /* CPU that we have affinity for. */ 92ae7a6b38SJeff Roberson int ts_slice; /* Ticks of slice remaining. */ 93ae7a6b38SJeff Roberson u_int ts_slptime; /* Number of ticks we vol. slept */ 94ae7a6b38SJeff Roberson u_int ts_runtime; /* Number of ticks we were running */ 95ed062c8dSJulian Elischer /* The following variables are only used for pctcpu calculation */ 96ad1e7d28SJulian Elischer int ts_ltick; /* Last tick that we were running on */ 97ad1e7d28SJulian Elischer int ts_ftick; /* First tick that we were running on */ 98ad1e7d28SJulian Elischer int ts_ticks; /* Tick count */ 997b8bfa0dSJeff Roberson int ts_rltick; /* Real last tick, for affinity. */ 100ed062c8dSJulian Elischer }; 101ad1e7d28SJulian Elischer /* flags kept in ts_flags */ 1027b8bfa0dSJeff Roberson #define TSF_BOUND 0x0001 /* Thread can not migrate. */ 1037b8bfa0dSJeff Roberson #define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */ 10435e6168fSJeff Roberson 105ad1e7d28SJulian Elischer static struct td_sched td_sched0; 10635e6168fSJeff Roberson 10762fa74d9SJeff Roberson #define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0) 10862fa74d9SJeff Roberson #define THREAD_CAN_SCHED(td, cpu) \ 10962fa74d9SJeff Roberson CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask) 11062fa74d9SJeff Roberson 11135e6168fSJeff Roberson /* 112e7d50326SJeff Roberson * Cpu percentage computation macros and defines. 113e1f89c22SJeff Roberson * 114e7d50326SJeff Roberson * SCHED_TICK_SECS: Number of seconds to average the cpu usage across. 115e7d50326SJeff Roberson * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across. 1168ab80cf0SJeff Roberson * SCHED_TICK_MAX: Maximum number of ticks before scaling back. 117e7d50326SJeff Roberson * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results. 118e7d50326SJeff Roberson * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count. 119e7d50326SJeff Roberson * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks. 12035e6168fSJeff Roberson */ 121e7d50326SJeff Roberson #define SCHED_TICK_SECS 10 122e7d50326SJeff Roberson #define SCHED_TICK_TARG (hz * SCHED_TICK_SECS) 1238ab80cf0SJeff Roberson #define SCHED_TICK_MAX (SCHED_TICK_TARG + hz) 124e7d50326SJeff Roberson #define SCHED_TICK_SHIFT 10 125e7d50326SJeff Roberson #define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT) 126eddb4efaSJeff Roberson #define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz)) 12735e6168fSJeff Roberson 12835e6168fSJeff Roberson /* 129e7d50326SJeff Roberson * These macros determine priorities for non-interactive threads. They are 130e7d50326SJeff Roberson * assigned a priority based on their recent cpu utilization as expressed 131e7d50326SJeff Roberson * by the ratio of ticks to the tick total. NHALF priorities at the start 132e7d50326SJeff Roberson * and end of the MIN to MAX timeshare range are only reachable with negative 133e7d50326SJeff Roberson * or positive nice respectively. 134e7d50326SJeff Roberson * 135e7d50326SJeff Roberson * PRI_RANGE: Priority range for utilization dependent priorities. 136e7d50326SJeff Roberson * PRI_NRESV: Number of nice values. 137e7d50326SJeff Roberson * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total. 138e7d50326SJeff Roberson * PRI_NICE: Determines the part of the priority inherited from nice. 139e7d50326SJeff Roberson */ 140e7d50326SJeff Roberson #define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN) 141e7d50326SJeff Roberson #define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 142e7d50326SJeff Roberson #define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF) 143e7d50326SJeff Roberson #define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF) 144dda713dfSJeff Roberson #define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN) 145e7d50326SJeff Roberson #define SCHED_PRI_TICKS(ts) \ 146e7d50326SJeff Roberson (SCHED_TICK_HZ((ts)) / \ 1471e516cf5SJeff Roberson (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE)) 148e7d50326SJeff Roberson #define SCHED_PRI_NICE(nice) (nice) 149e7d50326SJeff Roberson 150e7d50326SJeff Roberson /* 151e7d50326SJeff Roberson * These determine the interactivity of a process. Interactivity differs from 152e7d50326SJeff Roberson * cpu utilization in that it expresses the voluntary time slept vs time ran 153e7d50326SJeff Roberson * while cpu utilization includes all time not running. This more accurately 154e7d50326SJeff Roberson * models the intent of the thread. 15535e6168fSJeff Roberson * 156407b0157SJeff Roberson * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 157407b0157SJeff Roberson * before throttling back. 158d322132cSJeff Roberson * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 159210491d3SJeff Roberson * INTERACT_MAX: Maximum interactivity value. Smaller is better. 160e1f89c22SJeff Roberson * INTERACT_THRESH: Threshhold for placement on the current runq. 16135e6168fSJeff Roberson */ 162e7d50326SJeff Roberson #define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT) 163e7d50326SJeff Roberson #define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT) 164210491d3SJeff Roberson #define SCHED_INTERACT_MAX (100) 165210491d3SJeff Roberson #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 1664c9612c6SJeff Roberson #define SCHED_INTERACT_THRESH (30) 167e1f89c22SJeff Roberson 16835e6168fSJeff Roberson /* 169e7d50326SJeff Roberson * tickincr: Converts a stathz tick into a hz domain scaled by 170e7d50326SJeff Roberson * the shift factor. Without the shift the error rate 171e7d50326SJeff Roberson * due to rounding would be unacceptably high. 172e7d50326SJeff Roberson * realstathz: stathz is sometimes 0 and run off of hz. 173e7d50326SJeff Roberson * sched_slice: Runtime of each thread before rescheduling. 174ae7a6b38SJeff Roberson * preempt_thresh: Priority threshold for preemption and remote IPIs. 17535e6168fSJeff Roberson */ 176e7d50326SJeff Roberson static int sched_interact = SCHED_INTERACT_THRESH; 177e7d50326SJeff Roberson static int realstathz; 178e7d50326SJeff Roberson static int tickincr; 179e7d50326SJeff Roberson static int sched_slice; 18002e2d6b4SJeff Roberson #ifdef PREEMPTION 18102e2d6b4SJeff Roberson #ifdef FULL_PREEMPTION 18202e2d6b4SJeff Roberson static int preempt_thresh = PRI_MAX_IDLE; 18302e2d6b4SJeff Roberson #else 184ae7a6b38SJeff Roberson static int preempt_thresh = PRI_MIN_KERN; 18502e2d6b4SJeff Roberson #endif 18602e2d6b4SJeff Roberson #else 18702e2d6b4SJeff Roberson static int preempt_thresh = 0; 18802e2d6b4SJeff Roberson #endif 189ae7a6b38SJeff Roberson 19035e6168fSJeff Roberson /* 191ae7a6b38SJeff Roberson * tdq - per processor runqs and statistics. All fields are protected by the 192ae7a6b38SJeff Roberson * tdq_lock. The load and lowpri may be accessed without to avoid excess 193ae7a6b38SJeff Roberson * locking in sched_pickcpu(); 19435e6168fSJeff Roberson */ 195ad1e7d28SJulian Elischer struct tdq { 19662fa74d9SJeff Roberson struct cpu_group *tdq_cg; /* Pointer to cpu topology. */ 19762fa74d9SJeff Roberson struct mtx tdq_lock; /* run queue lock. */ 198e7d50326SJeff Roberson struct runq tdq_realtime; /* real-time run queue. */ 199ae7a6b38SJeff Roberson struct runq tdq_timeshare; /* timeshare run queue. */ 200ae7a6b38SJeff Roberson struct runq tdq_idle; /* Queue of IDLE threads. */ 201ae7a6b38SJeff Roberson int tdq_load; /* Aggregate load. */ 20262fa74d9SJeff Roberson int tdq_sysload; /* For loadavg, !ITHD load. */ 203ed0e8f2fSJeff Roberson u_char tdq_idx; /* Current insert index. */ 204ed0e8f2fSJeff Roberson u_char tdq_ridx; /* Current removal index. */ 205ae7a6b38SJeff Roberson u_char tdq_lowpri; /* Lowest priority thread. */ 206ff256d9cSJeff Roberson u_char tdq_ipipending; /* IPI pending. */ 207ae7a6b38SJeff Roberson int tdq_transferable; /* Transferable thread count. */ 20862fa74d9SJeff Roberson char tdq_name[sizeof("sched lock") + 6]; 209ae7a6b38SJeff Roberson } __aligned(64); 21035e6168fSJeff Roberson 2117b8bfa0dSJeff Roberson 21280f86c9fSJeff Roberson #ifdef SMP 21362fa74d9SJeff Roberson struct cpu_group *cpu_top; 2147b8bfa0dSJeff Roberson 21562fa74d9SJeff Roberson #define SCHED_AFFINITY_DEFAULT (max(1, hz / 1000)) 21662fa74d9SJeff Roberson #define SCHED_AFFINITY(ts, t) ((ts)->ts_rltick > ticks - ((t) * affinity)) 2177b8bfa0dSJeff Roberson 2187b8bfa0dSJeff Roberson /* 2197b8bfa0dSJeff Roberson * Run-time tunables. 2207b8bfa0dSJeff Roberson */ 22128994a58SJeff Roberson static int rebalance = 1; 2227fcf154aSJeff Roberson static int balance_interval = 128; /* Default set in sched_initticks(). */ 2237b8bfa0dSJeff Roberson static int affinity; 2247fcf154aSJeff Roberson static int steal_htt = 1; 22528994a58SJeff Roberson static int steal_idle = 1; 22628994a58SJeff Roberson static int steal_thresh = 2; 22780f86c9fSJeff Roberson 22835e6168fSJeff Roberson /* 229d2ad694cSJeff Roberson * One thread queue per processor. 23035e6168fSJeff Roberson */ 231ad1e7d28SJulian Elischer static struct tdq tdq_cpu[MAXCPU]; 2327fcf154aSJeff Roberson static struct tdq *balance_tdq; 2337fcf154aSJeff Roberson static int balance_ticks; 234dc03363dSJeff Roberson 235ad1e7d28SJulian Elischer #define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)]) 236ad1e7d28SJulian Elischer #define TDQ_CPU(x) (&tdq_cpu[(x)]) 237c47f202bSJeff Roberson #define TDQ_ID(x) ((int)((x) - tdq_cpu)) 23880f86c9fSJeff Roberson #else /* !SMP */ 239ad1e7d28SJulian Elischer static struct tdq tdq_cpu; 240dc03363dSJeff Roberson 24136b36916SJeff Roberson #define TDQ_ID(x) (0) 242ad1e7d28SJulian Elischer #define TDQ_SELF() (&tdq_cpu) 243ad1e7d28SJulian Elischer #define TDQ_CPU(x) (&tdq_cpu) 2440a016a05SJeff Roberson #endif 24535e6168fSJeff Roberson 246ae7a6b38SJeff Roberson #define TDQ_LOCK_ASSERT(t, type) mtx_assert(TDQ_LOCKPTR((t)), (type)) 247ae7a6b38SJeff Roberson #define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t))) 248ae7a6b38SJeff Roberson #define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f)) 249ae7a6b38SJeff Roberson #define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t))) 25062fa74d9SJeff Roberson #define TDQ_LOCKPTR(t) (&(t)->tdq_lock) 251ae7a6b38SJeff Roberson 2528460a577SJohn Birrell static void sched_priority(struct thread *); 25321381d1bSJeff Roberson static void sched_thread_priority(struct thread *, u_char); 2548460a577SJohn Birrell static int sched_interact_score(struct thread *); 2558460a577SJohn Birrell static void sched_interact_update(struct thread *); 2568460a577SJohn Birrell static void sched_interact_fork(struct thread *); 257ad1e7d28SJulian Elischer static void sched_pctcpu_update(struct td_sched *); 25835e6168fSJeff Roberson 2595d7ef00cSJeff Roberson /* Operations on per processor queues */ 260ad1e7d28SJulian Elischer static struct td_sched * tdq_choose(struct tdq *); 261ad1e7d28SJulian Elischer static void tdq_setup(struct tdq *); 262ad1e7d28SJulian Elischer static void tdq_load_add(struct tdq *, struct td_sched *); 263ad1e7d28SJulian Elischer static void tdq_load_rem(struct tdq *, struct td_sched *); 264ad1e7d28SJulian Elischer static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int); 265ad1e7d28SJulian Elischer static __inline void tdq_runq_rem(struct tdq *, struct td_sched *); 266ff256d9cSJeff Roberson static inline int sched_shouldpreempt(int, int, int); 267ad1e7d28SJulian Elischer void tdq_print(int cpu); 268e7d50326SJeff Roberson static void runq_print(struct runq *rq); 269ae7a6b38SJeff Roberson static void tdq_add(struct tdq *, struct thread *, int); 2705d7ef00cSJeff Roberson #ifdef SMP 27162fa74d9SJeff Roberson static int tdq_move(struct tdq *, struct tdq *); 272ad1e7d28SJulian Elischer static int tdq_idled(struct tdq *); 273ff256d9cSJeff Roberson static void tdq_notify(struct tdq *, struct td_sched *); 27462fa74d9SJeff Roberson static struct td_sched *tdq_steal(struct tdq *, int); 27562fa74d9SJeff Roberson static struct td_sched *runq_steal(struct runq *, int); 276ae7a6b38SJeff Roberson static int sched_pickcpu(struct td_sched *, int); 2777fcf154aSJeff Roberson static void sched_balance(void); 27862fa74d9SJeff Roberson static int sched_balance_pair(struct tdq *, struct tdq *); 279ae7a6b38SJeff Roberson static inline struct tdq *sched_setcpu(struct td_sched *, int, int); 280ae7a6b38SJeff Roberson static inline struct mtx *thread_block_switch(struct thread *); 281ae7a6b38SJeff Roberson static inline void thread_unblock_switch(struct thread *, struct mtx *); 282c47f202bSJeff Roberson static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int); 2835d7ef00cSJeff Roberson #endif 2845d7ef00cSJeff Roberson 285e7d50326SJeff Roberson static void sched_setup(void *dummy); 286e7d50326SJeff Roberson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 287e7d50326SJeff Roberson 288e7d50326SJeff Roberson static void sched_initticks(void *dummy); 289e7d50326SJeff Roberson SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL) 290e7d50326SJeff Roberson 291ae7a6b38SJeff Roberson /* 292ae7a6b38SJeff Roberson * Print the threads waiting on a run-queue. 293ae7a6b38SJeff Roberson */ 294e7d50326SJeff Roberson static void 295e7d50326SJeff Roberson runq_print(struct runq *rq) 296e7d50326SJeff Roberson { 297e7d50326SJeff Roberson struct rqhead *rqh; 298e7d50326SJeff Roberson struct td_sched *ts; 299e7d50326SJeff Roberson int pri; 300e7d50326SJeff Roberson int j; 301e7d50326SJeff Roberson int i; 302e7d50326SJeff Roberson 303e7d50326SJeff Roberson for (i = 0; i < RQB_LEN; i++) { 304e7d50326SJeff Roberson printf("\t\trunq bits %d 0x%zx\n", 305e7d50326SJeff Roberson i, rq->rq_status.rqb_bits[i]); 306e7d50326SJeff Roberson for (j = 0; j < RQB_BPW; j++) 307e7d50326SJeff Roberson if (rq->rq_status.rqb_bits[i] & (1ul << j)) { 308e7d50326SJeff Roberson pri = j + (i << RQB_L2BPW); 309e7d50326SJeff Roberson rqh = &rq->rq_queues[pri]; 310e7d50326SJeff Roberson TAILQ_FOREACH(ts, rqh, ts_procq) { 311e7d50326SJeff Roberson printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n", 312431f8906SJulian Elischer ts->ts_thread, ts->ts_thread->td_name, ts->ts_thread->td_priority, ts->ts_rqindex, pri); 313e7d50326SJeff Roberson } 314e7d50326SJeff Roberson } 315e7d50326SJeff Roberson } 316e7d50326SJeff Roberson } 317e7d50326SJeff Roberson 318ae7a6b38SJeff Roberson /* 319ae7a6b38SJeff Roberson * Print the status of a per-cpu thread queue. Should be a ddb show cmd. 320ae7a6b38SJeff Roberson */ 32115dc847eSJeff Roberson void 322ad1e7d28SJulian Elischer tdq_print(int cpu) 32315dc847eSJeff Roberson { 324ad1e7d28SJulian Elischer struct tdq *tdq; 32515dc847eSJeff Roberson 326ad1e7d28SJulian Elischer tdq = TDQ_CPU(cpu); 32715dc847eSJeff Roberson 328c47f202bSJeff Roberson printf("tdq %d:\n", TDQ_ID(tdq)); 32962fa74d9SJeff Roberson printf("\tlock %p\n", TDQ_LOCKPTR(tdq)); 33062fa74d9SJeff Roberson printf("\tLock name: %s\n", tdq->tdq_name); 331d2ad694cSJeff Roberson printf("\tload: %d\n", tdq->tdq_load); 332e7d50326SJeff Roberson printf("\ttimeshare idx: %d\n", tdq->tdq_idx); 3333f872f85SJeff Roberson printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx); 334e7d50326SJeff Roberson printf("\trealtime runq:\n"); 335e7d50326SJeff Roberson runq_print(&tdq->tdq_realtime); 336e7d50326SJeff Roberson printf("\ttimeshare runq:\n"); 337e7d50326SJeff Roberson runq_print(&tdq->tdq_timeshare); 338e7d50326SJeff Roberson printf("\tidle runq:\n"); 339e7d50326SJeff Roberson runq_print(&tdq->tdq_idle); 340d2ad694cSJeff Roberson printf("\tload transferable: %d\n", tdq->tdq_transferable); 341ae7a6b38SJeff Roberson printf("\tlowest priority: %d\n", tdq->tdq_lowpri); 34215dc847eSJeff Roberson } 34315dc847eSJeff Roberson 344ff256d9cSJeff Roberson static inline int 345ff256d9cSJeff Roberson sched_shouldpreempt(int pri, int cpri, int remote) 346ff256d9cSJeff Roberson { 347ff256d9cSJeff Roberson /* 348ff256d9cSJeff Roberson * If the new priority is not better than the current priority there is 349ff256d9cSJeff Roberson * nothing to do. 350ff256d9cSJeff Roberson */ 351ff256d9cSJeff Roberson if (pri >= cpri) 352ff256d9cSJeff Roberson return (0); 353ff256d9cSJeff Roberson /* 354ff256d9cSJeff Roberson * Always preempt idle. 355ff256d9cSJeff Roberson */ 356ff256d9cSJeff Roberson if (cpri >= PRI_MIN_IDLE) 357ff256d9cSJeff Roberson return (1); 358ff256d9cSJeff Roberson /* 359ff256d9cSJeff Roberson * If preemption is disabled don't preempt others. 360ff256d9cSJeff Roberson */ 361ff256d9cSJeff Roberson if (preempt_thresh == 0) 362ff256d9cSJeff Roberson return (0); 363ff256d9cSJeff Roberson /* 364ff256d9cSJeff Roberson * Preempt if we exceed the threshold. 365ff256d9cSJeff Roberson */ 366ff256d9cSJeff Roberson if (pri <= preempt_thresh) 367ff256d9cSJeff Roberson return (1); 368ff256d9cSJeff Roberson /* 369ff256d9cSJeff Roberson * If we're realtime or better and there is timeshare or worse running 370ff256d9cSJeff Roberson * preempt only remote processors. 371ff256d9cSJeff Roberson */ 372ff256d9cSJeff Roberson if (remote && pri <= PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME) 373ff256d9cSJeff Roberson return (1); 374ff256d9cSJeff Roberson return (0); 375ff256d9cSJeff Roberson } 376ff256d9cSJeff Roberson 377ae7a6b38SJeff Roberson #define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS) 378ae7a6b38SJeff Roberson /* 379ae7a6b38SJeff Roberson * Add a thread to the actual run-queue. Keeps transferable counts up to 380ae7a6b38SJeff Roberson * date with what is actually on the run-queue. Selects the correct 381ae7a6b38SJeff Roberson * queue position for timeshare threads. 382ae7a6b38SJeff Roberson */ 383155b9987SJeff Roberson static __inline void 384ad1e7d28SJulian Elischer tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags) 385155b9987SJeff Roberson { 386ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 387ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 388e7d50326SJeff Roberson if (THREAD_CAN_MIGRATE(ts->ts_thread)) { 389d2ad694cSJeff Roberson tdq->tdq_transferable++; 390ad1e7d28SJulian Elischer ts->ts_flags |= TSF_XFERABLE; 39180f86c9fSJeff Roberson } 392e7d50326SJeff Roberson if (ts->ts_runq == &tdq->tdq_timeshare) { 393ed0e8f2fSJeff Roberson u_char pri; 394e7d50326SJeff Roberson 395e7d50326SJeff Roberson pri = ts->ts_thread->td_priority; 396e7d50326SJeff Roberson KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE, 397e7d50326SJeff Roberson ("Invalid priority %d on timeshare runq", pri)); 398e7d50326SJeff Roberson /* 399e7d50326SJeff Roberson * This queue contains only priorities between MIN and MAX 400e7d50326SJeff Roberson * realtime. Use the whole queue to represent these values. 401e7d50326SJeff Roberson */ 402c47f202bSJeff Roberson if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) { 403e7d50326SJeff Roberson pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ; 404e7d50326SJeff Roberson pri = (pri + tdq->tdq_idx) % RQ_NQS; 4053f872f85SJeff Roberson /* 4063f872f85SJeff Roberson * This effectively shortens the queue by one so we 4073f872f85SJeff Roberson * can have a one slot difference between idx and 4083f872f85SJeff Roberson * ridx while we wait for threads to drain. 4093f872f85SJeff Roberson */ 4103f872f85SJeff Roberson if (tdq->tdq_ridx != tdq->tdq_idx && 4113f872f85SJeff Roberson pri == tdq->tdq_ridx) 4124499aff6SJeff Roberson pri = (unsigned char)(pri - 1) % RQ_NQS; 413e7d50326SJeff Roberson } else 4143f872f85SJeff Roberson pri = tdq->tdq_ridx; 415e7d50326SJeff Roberson runq_add_pri(ts->ts_runq, ts, pri, flags); 416e7d50326SJeff Roberson } else 417ad1e7d28SJulian Elischer runq_add(ts->ts_runq, ts, flags); 418155b9987SJeff Roberson } 419155b9987SJeff Roberson 420ae7a6b38SJeff Roberson /* 421ae7a6b38SJeff Roberson * Remove a thread from a run-queue. This typically happens when a thread 422ae7a6b38SJeff Roberson * is selected to run. Running threads are not on the queue and the 423ae7a6b38SJeff Roberson * transferable count does not reflect them. 424ae7a6b38SJeff Roberson */ 425155b9987SJeff Roberson static __inline void 426ad1e7d28SJulian Elischer tdq_runq_rem(struct tdq *tdq, struct td_sched *ts) 427155b9987SJeff Roberson { 428ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 429ae7a6b38SJeff Roberson KASSERT(ts->ts_runq != NULL, 430ae7a6b38SJeff Roberson ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread)); 431ad1e7d28SJulian Elischer if (ts->ts_flags & TSF_XFERABLE) { 432d2ad694cSJeff Roberson tdq->tdq_transferable--; 433ad1e7d28SJulian Elischer ts->ts_flags &= ~TSF_XFERABLE; 43480f86c9fSJeff Roberson } 4353f872f85SJeff Roberson if (ts->ts_runq == &tdq->tdq_timeshare) { 4363f872f85SJeff Roberson if (tdq->tdq_idx != tdq->tdq_ridx) 4373f872f85SJeff Roberson runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx); 438e7d50326SJeff Roberson else 4393f872f85SJeff Roberson runq_remove_idx(ts->ts_runq, ts, NULL); 4408ab80cf0SJeff Roberson /* 4418ab80cf0SJeff Roberson * For timeshare threads we update the priority here so 4428ab80cf0SJeff Roberson * the priority reflects the time we've been sleeping. 4438ab80cf0SJeff Roberson */ 4448ab80cf0SJeff Roberson ts->ts_ltick = ticks; 4458ab80cf0SJeff Roberson sched_pctcpu_update(ts); 4468ab80cf0SJeff Roberson sched_priority(ts->ts_thread); 4473f872f85SJeff Roberson } else 448ad1e7d28SJulian Elischer runq_remove(ts->ts_runq, ts); 449155b9987SJeff Roberson } 450155b9987SJeff Roberson 451ae7a6b38SJeff Roberson /* 452ae7a6b38SJeff Roberson * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load 453ae7a6b38SJeff Roberson * for this thread to the referenced thread queue. 454ae7a6b38SJeff Roberson */ 455a8949de2SJeff Roberson static void 456ad1e7d28SJulian Elischer tdq_load_add(struct tdq *tdq, struct td_sched *ts) 4575d7ef00cSJeff Roberson { 458ef1134c9SJeff Roberson int class; 459ae7a6b38SJeff Roberson 460ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 461ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 462ad1e7d28SJulian Elischer class = PRI_BASE(ts->ts_thread->td_pri_class); 463d2ad694cSJeff Roberson tdq->tdq_load++; 464c47f202bSJeff Roberson CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load); 4657b8bfa0dSJeff Roberson if (class != PRI_ITHD && 4667b8bfa0dSJeff Roberson (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 467d2ad694cSJeff Roberson tdq->tdq_sysload++; 4685d7ef00cSJeff Roberson } 46915dc847eSJeff Roberson 470ae7a6b38SJeff Roberson /* 471ae7a6b38SJeff Roberson * Remove the load from a thread that is transitioning to a sleep state or 472ae7a6b38SJeff Roberson * exiting. 473ae7a6b38SJeff Roberson */ 474a8949de2SJeff Roberson static void 475ad1e7d28SJulian Elischer tdq_load_rem(struct tdq *tdq, struct td_sched *ts) 4765d7ef00cSJeff Roberson { 477ef1134c9SJeff Roberson int class; 478ae7a6b38SJeff Roberson 479ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 480ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 481ad1e7d28SJulian Elischer class = PRI_BASE(ts->ts_thread->td_pri_class); 4827b8bfa0dSJeff Roberson if (class != PRI_ITHD && 4837b8bfa0dSJeff Roberson (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 484d2ad694cSJeff Roberson tdq->tdq_sysload--; 485ae7a6b38SJeff Roberson KASSERT(tdq->tdq_load != 0, 486c47f202bSJeff Roberson ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq))); 487d2ad694cSJeff Roberson tdq->tdq_load--; 488d2ad694cSJeff Roberson CTR1(KTR_SCHED, "load: %d", tdq->tdq_load); 489ad1e7d28SJulian Elischer ts->ts_runq = NULL; 49015dc847eSJeff Roberson } 49115dc847eSJeff Roberson 492356500a3SJeff Roberson /* 49362fa74d9SJeff Roberson * Set lowpri to its exact value by searching the run-queue and 49462fa74d9SJeff Roberson * evaluating curthread. curthread may be passed as an optimization. 495356500a3SJeff Roberson */ 49622bf7d9aSJeff Roberson static void 49762fa74d9SJeff Roberson tdq_setlowpri(struct tdq *tdq, struct thread *ctd) 49862fa74d9SJeff Roberson { 49962fa74d9SJeff Roberson struct td_sched *ts; 50062fa74d9SJeff Roberson struct thread *td; 50162fa74d9SJeff Roberson 50262fa74d9SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 50362fa74d9SJeff Roberson if (ctd == NULL) 50462fa74d9SJeff Roberson ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread; 50562fa74d9SJeff Roberson ts = tdq_choose(tdq); 50662fa74d9SJeff Roberson if (ts) 50762fa74d9SJeff Roberson td = ts->ts_thread; 50862fa74d9SJeff Roberson if (ts == NULL || td->td_priority > ctd->td_priority) 50962fa74d9SJeff Roberson tdq->tdq_lowpri = ctd->td_priority; 51062fa74d9SJeff Roberson else 51162fa74d9SJeff Roberson tdq->tdq_lowpri = td->td_priority; 51262fa74d9SJeff Roberson } 51362fa74d9SJeff Roberson 51462fa74d9SJeff Roberson #ifdef SMP 51562fa74d9SJeff Roberson struct cpu_search { 51662fa74d9SJeff Roberson cpumask_t cs_mask; /* Mask of valid cpus. */ 51762fa74d9SJeff Roberson u_int cs_load; 51862fa74d9SJeff Roberson u_int cs_cpu; 51962fa74d9SJeff Roberson int cs_limit; /* Min priority for low min load for high. */ 52062fa74d9SJeff Roberson }; 52162fa74d9SJeff Roberson 52262fa74d9SJeff Roberson #define CPU_SEARCH_LOWEST 0x1 52362fa74d9SJeff Roberson #define CPU_SEARCH_HIGHEST 0x2 52462fa74d9SJeff Roberson #define CPU_SEARCH_BOTH (CPU_SEARCH_LOWEST|CPU_SEARCH_HIGHEST) 52562fa74d9SJeff Roberson 52662fa74d9SJeff Roberson #define CPUMASK_FOREACH(cpu, mask) \ 52762fa74d9SJeff Roberson for ((cpu) = 0; (cpu) < sizeof((mask)) * 8; (cpu)++) \ 52862fa74d9SJeff Roberson if ((mask) & 1 << (cpu)) 52962fa74d9SJeff Roberson 53062fa74d9SJeff Roberson __inline int cpu_search(struct cpu_group *cg, struct cpu_search *low, 53162fa74d9SJeff Roberson struct cpu_search *high, const int match); 53262fa74d9SJeff Roberson int cpu_search_lowest(struct cpu_group *cg, struct cpu_search *low); 53362fa74d9SJeff Roberson int cpu_search_highest(struct cpu_group *cg, struct cpu_search *high); 53462fa74d9SJeff Roberson int cpu_search_both(struct cpu_group *cg, struct cpu_search *low, 53562fa74d9SJeff Roberson struct cpu_search *high); 53662fa74d9SJeff Roberson 53762fa74d9SJeff Roberson /* 53862fa74d9SJeff Roberson * This routine compares according to the match argument and should be 53962fa74d9SJeff Roberson * reduced in actual instantiations via constant propagation and dead code 54062fa74d9SJeff Roberson * elimination. 54162fa74d9SJeff Roberson */ 54262fa74d9SJeff Roberson static __inline int 54362fa74d9SJeff Roberson cpu_compare(int cpu, struct cpu_search *low, struct cpu_search *high, 54462fa74d9SJeff Roberson const int match) 54562fa74d9SJeff Roberson { 54662fa74d9SJeff Roberson struct tdq *tdq; 54762fa74d9SJeff Roberson 54862fa74d9SJeff Roberson tdq = TDQ_CPU(cpu); 54962fa74d9SJeff Roberson if (match & CPU_SEARCH_LOWEST) 55062fa74d9SJeff Roberson if (low->cs_mask & (1 << cpu) && 55162fa74d9SJeff Roberson tdq->tdq_load < low->cs_load && 55262fa74d9SJeff Roberson tdq->tdq_lowpri > low->cs_limit) { 55362fa74d9SJeff Roberson low->cs_cpu = cpu; 55462fa74d9SJeff Roberson low->cs_load = tdq->tdq_load; 55562fa74d9SJeff Roberson } 55662fa74d9SJeff Roberson if (match & CPU_SEARCH_HIGHEST) 55762fa74d9SJeff Roberson if (high->cs_mask & (1 << cpu) && 55862fa74d9SJeff Roberson tdq->tdq_load >= high->cs_limit && 55962fa74d9SJeff Roberson tdq->tdq_load > high->cs_load && 56062fa74d9SJeff Roberson tdq->tdq_transferable) { 56162fa74d9SJeff Roberson high->cs_cpu = cpu; 56262fa74d9SJeff Roberson high->cs_load = tdq->tdq_load; 56362fa74d9SJeff Roberson } 56462fa74d9SJeff Roberson return (tdq->tdq_load); 56562fa74d9SJeff Roberson } 56662fa74d9SJeff Roberson 56762fa74d9SJeff Roberson /* 56862fa74d9SJeff Roberson * Search the tree of cpu_groups for the lowest or highest loaded cpu 56962fa74d9SJeff Roberson * according to the match argument. This routine actually compares the 57062fa74d9SJeff Roberson * load on all paths through the tree and finds the least loaded cpu on 57162fa74d9SJeff Roberson * the least loaded path, which may differ from the least loaded cpu in 57262fa74d9SJeff Roberson * the system. This balances work among caches and busses. 57362fa74d9SJeff Roberson * 57462fa74d9SJeff Roberson * This inline is instantiated in three forms below using constants for the 57562fa74d9SJeff Roberson * match argument. It is reduced to the minimum set for each case. It is 57662fa74d9SJeff Roberson * also recursive to the depth of the tree. 57762fa74d9SJeff Roberson */ 57862fa74d9SJeff Roberson static inline int 57962fa74d9SJeff Roberson cpu_search(struct cpu_group *cg, struct cpu_search *low, 58062fa74d9SJeff Roberson struct cpu_search *high, const int match) 58162fa74d9SJeff Roberson { 58262fa74d9SJeff Roberson int total; 58362fa74d9SJeff Roberson 58462fa74d9SJeff Roberson total = 0; 58562fa74d9SJeff Roberson if (cg->cg_children) { 58662fa74d9SJeff Roberson struct cpu_search lgroup; 58762fa74d9SJeff Roberson struct cpu_search hgroup; 58862fa74d9SJeff Roberson struct cpu_group *child; 58962fa74d9SJeff Roberson u_int lload; 59062fa74d9SJeff Roberson int hload; 59162fa74d9SJeff Roberson int load; 59262fa74d9SJeff Roberson int i; 59362fa74d9SJeff Roberson 59462fa74d9SJeff Roberson lload = -1; 59562fa74d9SJeff Roberson hload = -1; 59662fa74d9SJeff Roberson for (i = 0; i < cg->cg_children; i++) { 59762fa74d9SJeff Roberson child = &cg->cg_child[i]; 59862fa74d9SJeff Roberson if (match & CPU_SEARCH_LOWEST) { 59962fa74d9SJeff Roberson lgroup = *low; 60062fa74d9SJeff Roberson lgroup.cs_load = -1; 60162fa74d9SJeff Roberson } 60262fa74d9SJeff Roberson if (match & CPU_SEARCH_HIGHEST) { 60362fa74d9SJeff Roberson hgroup = *high; 60462fa74d9SJeff Roberson lgroup.cs_load = 0; 60562fa74d9SJeff Roberson } 60662fa74d9SJeff Roberson switch (match) { 60762fa74d9SJeff Roberson case CPU_SEARCH_LOWEST: 60862fa74d9SJeff Roberson load = cpu_search_lowest(child, &lgroup); 60962fa74d9SJeff Roberson break; 61062fa74d9SJeff Roberson case CPU_SEARCH_HIGHEST: 61162fa74d9SJeff Roberson load = cpu_search_highest(child, &hgroup); 61262fa74d9SJeff Roberson break; 61362fa74d9SJeff Roberson case CPU_SEARCH_BOTH: 61462fa74d9SJeff Roberson load = cpu_search_both(child, &lgroup, &hgroup); 61562fa74d9SJeff Roberson break; 61662fa74d9SJeff Roberson } 61762fa74d9SJeff Roberson total += load; 61862fa74d9SJeff Roberson if (match & CPU_SEARCH_LOWEST) 61962fa74d9SJeff Roberson if (load < lload || low->cs_cpu == -1) { 62062fa74d9SJeff Roberson *low = lgroup; 62162fa74d9SJeff Roberson lload = load; 62262fa74d9SJeff Roberson } 62362fa74d9SJeff Roberson if (match & CPU_SEARCH_HIGHEST) 62462fa74d9SJeff Roberson if (load > hload || high->cs_cpu == -1) { 62562fa74d9SJeff Roberson hload = load; 62662fa74d9SJeff Roberson *high = hgroup; 62762fa74d9SJeff Roberson } 62862fa74d9SJeff Roberson } 62962fa74d9SJeff Roberson } else { 63062fa74d9SJeff Roberson int cpu; 63162fa74d9SJeff Roberson 63262fa74d9SJeff Roberson CPUMASK_FOREACH(cpu, cg->cg_mask) 63362fa74d9SJeff Roberson total += cpu_compare(cpu, low, high, match); 63462fa74d9SJeff Roberson } 63562fa74d9SJeff Roberson return (total); 63662fa74d9SJeff Roberson } 63762fa74d9SJeff Roberson 63862fa74d9SJeff Roberson /* 63962fa74d9SJeff Roberson * cpu_search instantiations must pass constants to maintain the inline 64062fa74d9SJeff Roberson * optimization. 64162fa74d9SJeff Roberson */ 64262fa74d9SJeff Roberson int 64362fa74d9SJeff Roberson cpu_search_lowest(struct cpu_group *cg, struct cpu_search *low) 64462fa74d9SJeff Roberson { 64562fa74d9SJeff Roberson return cpu_search(cg, low, NULL, CPU_SEARCH_LOWEST); 64662fa74d9SJeff Roberson } 64762fa74d9SJeff Roberson 64862fa74d9SJeff Roberson int 64962fa74d9SJeff Roberson cpu_search_highest(struct cpu_group *cg, struct cpu_search *high) 65062fa74d9SJeff Roberson { 65162fa74d9SJeff Roberson return cpu_search(cg, NULL, high, CPU_SEARCH_HIGHEST); 65262fa74d9SJeff Roberson } 65362fa74d9SJeff Roberson 65462fa74d9SJeff Roberson int 65562fa74d9SJeff Roberson cpu_search_both(struct cpu_group *cg, struct cpu_search *low, 65662fa74d9SJeff Roberson struct cpu_search *high) 65762fa74d9SJeff Roberson { 65862fa74d9SJeff Roberson return cpu_search(cg, low, high, CPU_SEARCH_BOTH); 65962fa74d9SJeff Roberson } 66062fa74d9SJeff Roberson 66162fa74d9SJeff Roberson /* 66262fa74d9SJeff Roberson * Find the cpu with the least load via the least loaded path that has a 66362fa74d9SJeff Roberson * lowpri greater than pri pri. A pri of -1 indicates any priority is 66462fa74d9SJeff Roberson * acceptable. 66562fa74d9SJeff Roberson */ 66662fa74d9SJeff Roberson static inline int 66762fa74d9SJeff Roberson sched_lowest(struct cpu_group *cg, cpumask_t mask, int pri) 66862fa74d9SJeff Roberson { 66962fa74d9SJeff Roberson struct cpu_search low; 67062fa74d9SJeff Roberson 67162fa74d9SJeff Roberson low.cs_cpu = -1; 67262fa74d9SJeff Roberson low.cs_load = -1; 67362fa74d9SJeff Roberson low.cs_mask = mask; 67462fa74d9SJeff Roberson low.cs_limit = pri; 67562fa74d9SJeff Roberson cpu_search_lowest(cg, &low); 67662fa74d9SJeff Roberson return low.cs_cpu; 67762fa74d9SJeff Roberson } 67862fa74d9SJeff Roberson 67962fa74d9SJeff Roberson /* 68062fa74d9SJeff Roberson * Find the cpu with the highest load via the highest loaded path. 68162fa74d9SJeff Roberson */ 68262fa74d9SJeff Roberson static inline int 68362fa74d9SJeff Roberson sched_highest(struct cpu_group *cg, cpumask_t mask, int minload) 68462fa74d9SJeff Roberson { 68562fa74d9SJeff Roberson struct cpu_search high; 68662fa74d9SJeff Roberson 68762fa74d9SJeff Roberson high.cs_cpu = -1; 68862fa74d9SJeff Roberson high.cs_load = 0; 68962fa74d9SJeff Roberson high.cs_mask = mask; 69062fa74d9SJeff Roberson high.cs_limit = minload; 69162fa74d9SJeff Roberson cpu_search_highest(cg, &high); 69262fa74d9SJeff Roberson return high.cs_cpu; 69362fa74d9SJeff Roberson } 69462fa74d9SJeff Roberson 69562fa74d9SJeff Roberson /* 69662fa74d9SJeff Roberson * Simultaneously find the highest and lowest loaded cpu reachable via 69762fa74d9SJeff Roberson * cg. 69862fa74d9SJeff Roberson */ 69962fa74d9SJeff Roberson static inline void 70062fa74d9SJeff Roberson sched_both(struct cpu_group *cg, cpumask_t mask, int *lowcpu, int *highcpu) 70162fa74d9SJeff Roberson { 70262fa74d9SJeff Roberson struct cpu_search high; 70362fa74d9SJeff Roberson struct cpu_search low; 70462fa74d9SJeff Roberson 70562fa74d9SJeff Roberson low.cs_cpu = -1; 70662fa74d9SJeff Roberson low.cs_limit = -1; 70762fa74d9SJeff Roberson low.cs_load = -1; 70862fa74d9SJeff Roberson low.cs_mask = mask; 70962fa74d9SJeff Roberson high.cs_load = 0; 71062fa74d9SJeff Roberson high.cs_cpu = -1; 71162fa74d9SJeff Roberson high.cs_limit = -1; 71262fa74d9SJeff Roberson high.cs_mask = mask; 71362fa74d9SJeff Roberson cpu_search_both(cg, &low, &high); 71462fa74d9SJeff Roberson *lowcpu = low.cs_cpu; 71562fa74d9SJeff Roberson *highcpu = high.cs_cpu; 71662fa74d9SJeff Roberson return; 71762fa74d9SJeff Roberson } 71862fa74d9SJeff Roberson 71962fa74d9SJeff Roberson static void 72062fa74d9SJeff Roberson sched_balance_group(struct cpu_group *cg) 72162fa74d9SJeff Roberson { 72262fa74d9SJeff Roberson cpumask_t mask; 72362fa74d9SJeff Roberson int high; 72462fa74d9SJeff Roberson int low; 72562fa74d9SJeff Roberson int i; 72662fa74d9SJeff Roberson 72762fa74d9SJeff Roberson mask = -1; 72862fa74d9SJeff Roberson for (;;) { 72962fa74d9SJeff Roberson sched_both(cg, mask, &low, &high); 73062fa74d9SJeff Roberson if (low == high || low == -1 || high == -1) 73162fa74d9SJeff Roberson break; 73262fa74d9SJeff Roberson if (sched_balance_pair(TDQ_CPU(high), TDQ_CPU(low))) 73362fa74d9SJeff Roberson break; 73462fa74d9SJeff Roberson /* 73562fa74d9SJeff Roberson * If we failed to move any threads determine which cpu 73662fa74d9SJeff Roberson * to kick out of the set and try again. 73762fa74d9SJeff Roberson */ 73862fa74d9SJeff Roberson if (TDQ_CPU(high)->tdq_transferable == 0) 73962fa74d9SJeff Roberson mask &= ~(1 << high); 74062fa74d9SJeff Roberson else 74162fa74d9SJeff Roberson mask &= ~(1 << low); 74262fa74d9SJeff Roberson } 74362fa74d9SJeff Roberson 74462fa74d9SJeff Roberson for (i = 0; i < cg->cg_children; i++) 74562fa74d9SJeff Roberson sched_balance_group(&cg->cg_child[i]); 74662fa74d9SJeff Roberson } 74762fa74d9SJeff Roberson 74862fa74d9SJeff Roberson static void 7497fcf154aSJeff Roberson sched_balance() 750356500a3SJeff Roberson { 7517fcf154aSJeff Roberson struct tdq *tdq; 752356500a3SJeff Roberson 7537fcf154aSJeff Roberson /* 7547fcf154aSJeff Roberson * Select a random time between .5 * balance_interval and 7557fcf154aSJeff Roberson * 1.5 * balance_interval. 7567fcf154aSJeff Roberson */ 7577fcf154aSJeff Roberson balance_ticks = max(balance_interval / 2, 1); 7587fcf154aSJeff Roberson balance_ticks += random() % balance_interval; 759ae7a6b38SJeff Roberson if (smp_started == 0 || rebalance == 0) 760598b368dSJeff Roberson return; 7617fcf154aSJeff Roberson tdq = TDQ_SELF(); 7627fcf154aSJeff Roberson TDQ_UNLOCK(tdq); 76362fa74d9SJeff Roberson sched_balance_group(cpu_top); 7647fcf154aSJeff Roberson TDQ_LOCK(tdq); 765cac77d04SJeff Roberson } 76686f8ae96SJeff Roberson 767ae7a6b38SJeff Roberson /* 768ae7a6b38SJeff Roberson * Lock two thread queues using their address to maintain lock order. 769ae7a6b38SJeff Roberson */ 770ae7a6b38SJeff Roberson static void 771ae7a6b38SJeff Roberson tdq_lock_pair(struct tdq *one, struct tdq *two) 772ae7a6b38SJeff Roberson { 773ae7a6b38SJeff Roberson if (one < two) { 774ae7a6b38SJeff Roberson TDQ_LOCK(one); 775ae7a6b38SJeff Roberson TDQ_LOCK_FLAGS(two, MTX_DUPOK); 776ae7a6b38SJeff Roberson } else { 777ae7a6b38SJeff Roberson TDQ_LOCK(two); 778ae7a6b38SJeff Roberson TDQ_LOCK_FLAGS(one, MTX_DUPOK); 779ae7a6b38SJeff Roberson } 780ae7a6b38SJeff Roberson } 781ae7a6b38SJeff Roberson 782ae7a6b38SJeff Roberson /* 7837fcf154aSJeff Roberson * Unlock two thread queues. Order is not important here. 7847fcf154aSJeff Roberson */ 7857fcf154aSJeff Roberson static void 7867fcf154aSJeff Roberson tdq_unlock_pair(struct tdq *one, struct tdq *two) 7877fcf154aSJeff Roberson { 7887fcf154aSJeff Roberson TDQ_UNLOCK(one); 7897fcf154aSJeff Roberson TDQ_UNLOCK(two); 7907fcf154aSJeff Roberson } 7917fcf154aSJeff Roberson 7927fcf154aSJeff Roberson /* 793ae7a6b38SJeff Roberson * Transfer load between two imbalanced thread queues. 794ae7a6b38SJeff Roberson */ 79562fa74d9SJeff Roberson static int 796ad1e7d28SJulian Elischer sched_balance_pair(struct tdq *high, struct tdq *low) 797cac77d04SJeff Roberson { 798cac77d04SJeff Roberson int transferable; 799cac77d04SJeff Roberson int high_load; 800cac77d04SJeff Roberson int low_load; 80162fa74d9SJeff Roberson int moved; 802cac77d04SJeff Roberson int move; 803cac77d04SJeff Roberson int diff; 804cac77d04SJeff Roberson int i; 805cac77d04SJeff Roberson 806ae7a6b38SJeff Roberson tdq_lock_pair(high, low); 807d2ad694cSJeff Roberson transferable = high->tdq_transferable; 808d2ad694cSJeff Roberson high_load = high->tdq_load; 809d2ad694cSJeff Roberson low_load = low->tdq_load; 81062fa74d9SJeff Roberson moved = 0; 811155b9987SJeff Roberson /* 812155b9987SJeff Roberson * Determine what the imbalance is and then adjust that to how many 813d2ad694cSJeff Roberson * threads we actually have to give up (transferable). 814155b9987SJeff Roberson */ 815ae7a6b38SJeff Roberson if (transferable != 0) { 816cac77d04SJeff Roberson diff = high_load - low_load; 817356500a3SJeff Roberson move = diff / 2; 818356500a3SJeff Roberson if (diff & 0x1) 819356500a3SJeff Roberson move++; 82080f86c9fSJeff Roberson move = min(move, transferable); 821356500a3SJeff Roberson for (i = 0; i < move; i++) 82262fa74d9SJeff Roberson moved += tdq_move(high, low); 823a5423ea3SJeff Roberson /* 824a5423ea3SJeff Roberson * IPI the target cpu to force it to reschedule with the new 825a5423ea3SJeff Roberson * workload. 826a5423ea3SJeff Roberson */ 827a5423ea3SJeff Roberson ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT); 828ae7a6b38SJeff Roberson } 8297fcf154aSJeff Roberson tdq_unlock_pair(high, low); 83062fa74d9SJeff Roberson return (moved); 831356500a3SJeff Roberson } 832356500a3SJeff Roberson 833ae7a6b38SJeff Roberson /* 834ae7a6b38SJeff Roberson * Move a thread from one thread queue to another. 835ae7a6b38SJeff Roberson */ 83662fa74d9SJeff Roberson static int 837ae7a6b38SJeff Roberson tdq_move(struct tdq *from, struct tdq *to) 838356500a3SJeff Roberson { 839ad1e7d28SJulian Elischer struct td_sched *ts; 840ae7a6b38SJeff Roberson struct thread *td; 841ae7a6b38SJeff Roberson struct tdq *tdq; 842ae7a6b38SJeff Roberson int cpu; 843356500a3SJeff Roberson 8447fcf154aSJeff Roberson TDQ_LOCK_ASSERT(from, MA_OWNED); 8457fcf154aSJeff Roberson TDQ_LOCK_ASSERT(to, MA_OWNED); 8467fcf154aSJeff Roberson 847ad1e7d28SJulian Elischer tdq = from; 848ae7a6b38SJeff Roberson cpu = TDQ_ID(to); 84962fa74d9SJeff Roberson ts = tdq_steal(tdq, cpu); 850ad1e7d28SJulian Elischer if (ts == NULL) 85162fa74d9SJeff Roberson return (0); 852ae7a6b38SJeff Roberson td = ts->ts_thread; 853ae7a6b38SJeff Roberson /* 854ae7a6b38SJeff Roberson * Although the run queue is locked the thread may be blocked. Lock 8557fcf154aSJeff Roberson * it to clear this and acquire the run-queue lock. 856ae7a6b38SJeff Roberson */ 857ae7a6b38SJeff Roberson thread_lock(td); 8587fcf154aSJeff Roberson /* Drop recursive lock on from acquired via thread_lock(). */ 859ae7a6b38SJeff Roberson TDQ_UNLOCK(from); 860ae7a6b38SJeff Roberson sched_rem(td); 8617b8bfa0dSJeff Roberson ts->ts_cpu = cpu; 862ae7a6b38SJeff Roberson td->td_lock = TDQ_LOCKPTR(to); 863ae7a6b38SJeff Roberson tdq_add(to, td, SRQ_YIELDING); 86462fa74d9SJeff Roberson return (1); 865356500a3SJeff Roberson } 86622bf7d9aSJeff Roberson 867ae7a6b38SJeff Roberson /* 868ae7a6b38SJeff Roberson * This tdq has idled. Try to steal a thread from another cpu and switch 869ae7a6b38SJeff Roberson * to it. 870ae7a6b38SJeff Roberson */ 87180f86c9fSJeff Roberson static int 872ad1e7d28SJulian Elischer tdq_idled(struct tdq *tdq) 87322bf7d9aSJeff Roberson { 87462fa74d9SJeff Roberson struct cpu_group *cg; 875ad1e7d28SJulian Elischer struct tdq *steal; 87662fa74d9SJeff Roberson cpumask_t mask; 87762fa74d9SJeff Roberson int thresh; 878ae7a6b38SJeff Roberson int cpu; 87980f86c9fSJeff Roberson 88088f530ccSJeff Roberson if (smp_started == 0 || steal_idle == 0) 88188f530ccSJeff Roberson return (1); 88262fa74d9SJeff Roberson mask = -1; 88362fa74d9SJeff Roberson mask &= ~PCPU_GET(cpumask); 88462fa74d9SJeff Roberson /* We don't want to be preempted while we're iterating. */ 885ae7a6b38SJeff Roberson spinlock_enter(); 88662fa74d9SJeff Roberson for (cg = tdq->tdq_cg; cg != NULL; ) { 88762fa74d9SJeff Roberson if ((cg->cg_flags & (CG_FLAG_HTT | CG_FLAG_THREAD)) == 0) 88862fa74d9SJeff Roberson thresh = steal_thresh; 88962fa74d9SJeff Roberson else 89062fa74d9SJeff Roberson thresh = 1; 89162fa74d9SJeff Roberson cpu = sched_highest(cg, mask, thresh); 89262fa74d9SJeff Roberson if (cpu == -1) { 89362fa74d9SJeff Roberson cg = cg->cg_parent; 89480f86c9fSJeff Roberson continue; 8957b8bfa0dSJeff Roberson } 8967b8bfa0dSJeff Roberson steal = TDQ_CPU(cpu); 89762fa74d9SJeff Roberson mask &= ~(1 << cpu); 8987fcf154aSJeff Roberson tdq_lock_pair(tdq, steal); 89962fa74d9SJeff Roberson if (steal->tdq_load < thresh || steal->tdq_transferable == 0) { 9007fcf154aSJeff Roberson tdq_unlock_pair(tdq, steal); 90162fa74d9SJeff Roberson continue; 90262fa74d9SJeff Roberson } 90362fa74d9SJeff Roberson /* 90462fa74d9SJeff Roberson * If a thread was added while interrupts were disabled don't 90562fa74d9SJeff Roberson * steal one here. If we fail to acquire one due to affinity 90662fa74d9SJeff Roberson * restrictions loop again with this cpu removed from the 90762fa74d9SJeff Roberson * set. 90862fa74d9SJeff Roberson */ 90962fa74d9SJeff Roberson if (tdq->tdq_load == 0 && tdq_move(steal, tdq) == 0) { 91062fa74d9SJeff Roberson tdq_unlock_pair(tdq, steal); 91162fa74d9SJeff Roberson continue; 91280f86c9fSJeff Roberson } 913ae7a6b38SJeff Roberson spinlock_exit(); 914ae7a6b38SJeff Roberson TDQ_UNLOCK(steal); 915ae7a6b38SJeff Roberson mi_switch(SW_VOL, NULL); 916ae7a6b38SJeff Roberson thread_unlock(curthread); 9177b8bfa0dSJeff Roberson 9187b8bfa0dSJeff Roberson return (0); 91922bf7d9aSJeff Roberson } 92062fa74d9SJeff Roberson spinlock_exit(); 92162fa74d9SJeff Roberson return (1); 92262fa74d9SJeff Roberson } 92322bf7d9aSJeff Roberson 924ae7a6b38SJeff Roberson /* 925ae7a6b38SJeff Roberson * Notify a remote cpu of new work. Sends an IPI if criteria are met. 926ae7a6b38SJeff Roberson */ 92722bf7d9aSJeff Roberson static void 928ff256d9cSJeff Roberson tdq_notify(struct tdq *tdq, struct td_sched *ts) 92922bf7d9aSJeff Roberson { 930fc3a97dcSJeff Roberson int cpri; 931fc3a97dcSJeff Roberson int pri; 9327b8bfa0dSJeff Roberson int cpu; 93322bf7d9aSJeff Roberson 934ff256d9cSJeff Roberson if (tdq->tdq_ipipending) 935ff256d9cSJeff Roberson return; 9367b8bfa0dSJeff Roberson cpu = ts->ts_cpu; 937fc3a97dcSJeff Roberson pri = ts->ts_thread->td_priority; 938ff256d9cSJeff Roberson cpri = pcpu_find(cpu)->pc_curthread->td_priority; 939ff256d9cSJeff Roberson if (!sched_shouldpreempt(pri, cpri, 1)) 9406b2f763fSJeff Roberson return; 941ff256d9cSJeff Roberson tdq->tdq_ipipending = 1; 94214618990SJeff Roberson ipi_selected(1 << cpu, IPI_PREEMPT); 94322bf7d9aSJeff Roberson } 94422bf7d9aSJeff Roberson 945ae7a6b38SJeff Roberson /* 946ae7a6b38SJeff Roberson * Steals load from a timeshare queue. Honors the rotating queue head 947ae7a6b38SJeff Roberson * index. 948ae7a6b38SJeff Roberson */ 949ae7a6b38SJeff Roberson static struct td_sched * 95062fa74d9SJeff Roberson runq_steal_from(struct runq *rq, int cpu, u_char start) 951ae7a6b38SJeff Roberson { 952ae7a6b38SJeff Roberson struct td_sched *ts; 953ae7a6b38SJeff Roberson struct rqbits *rqb; 954ae7a6b38SJeff Roberson struct rqhead *rqh; 955ae7a6b38SJeff Roberson int first; 956ae7a6b38SJeff Roberson int bit; 957ae7a6b38SJeff Roberson int pri; 958ae7a6b38SJeff Roberson int i; 959ae7a6b38SJeff Roberson 960ae7a6b38SJeff Roberson rqb = &rq->rq_status; 961ae7a6b38SJeff Roberson bit = start & (RQB_BPW -1); 962ae7a6b38SJeff Roberson pri = 0; 963ae7a6b38SJeff Roberson first = 0; 964ae7a6b38SJeff Roberson again: 965ae7a6b38SJeff Roberson for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) { 966ae7a6b38SJeff Roberson if (rqb->rqb_bits[i] == 0) 967ae7a6b38SJeff Roberson continue; 968ae7a6b38SJeff Roberson if (bit != 0) { 969ae7a6b38SJeff Roberson for (pri = bit; pri < RQB_BPW; pri++) 970ae7a6b38SJeff Roberson if (rqb->rqb_bits[i] & (1ul << pri)) 971ae7a6b38SJeff Roberson break; 972ae7a6b38SJeff Roberson if (pri >= RQB_BPW) 973ae7a6b38SJeff Roberson continue; 974ae7a6b38SJeff Roberson } else 975ae7a6b38SJeff Roberson pri = RQB_FFS(rqb->rqb_bits[i]); 976ae7a6b38SJeff Roberson pri += (i << RQB_L2BPW); 977ae7a6b38SJeff Roberson rqh = &rq->rq_queues[pri]; 978ae7a6b38SJeff Roberson TAILQ_FOREACH(ts, rqh, ts_procq) { 97962fa74d9SJeff Roberson if (first && THREAD_CAN_MIGRATE(ts->ts_thread) && 98062fa74d9SJeff Roberson THREAD_CAN_SCHED(ts->ts_thread, cpu)) 981ae7a6b38SJeff Roberson return (ts); 982ae7a6b38SJeff Roberson first = 1; 983ae7a6b38SJeff Roberson } 984ae7a6b38SJeff Roberson } 985ae7a6b38SJeff Roberson if (start != 0) { 986ae7a6b38SJeff Roberson start = 0; 987ae7a6b38SJeff Roberson goto again; 988ae7a6b38SJeff Roberson } 989ae7a6b38SJeff Roberson 990ae7a6b38SJeff Roberson return (NULL); 991ae7a6b38SJeff Roberson } 992ae7a6b38SJeff Roberson 993ae7a6b38SJeff Roberson /* 994ae7a6b38SJeff Roberson * Steals load from a standard linear queue. 995ae7a6b38SJeff Roberson */ 996ad1e7d28SJulian Elischer static struct td_sched * 99762fa74d9SJeff Roberson runq_steal(struct runq *rq, int cpu) 99822bf7d9aSJeff Roberson { 99922bf7d9aSJeff Roberson struct rqhead *rqh; 100022bf7d9aSJeff Roberson struct rqbits *rqb; 1001ad1e7d28SJulian Elischer struct td_sched *ts; 100222bf7d9aSJeff Roberson int word; 100322bf7d9aSJeff Roberson int bit; 100422bf7d9aSJeff Roberson 100522bf7d9aSJeff Roberson rqb = &rq->rq_status; 100622bf7d9aSJeff Roberson for (word = 0; word < RQB_LEN; word++) { 100722bf7d9aSJeff Roberson if (rqb->rqb_bits[word] == 0) 100822bf7d9aSJeff Roberson continue; 100922bf7d9aSJeff Roberson for (bit = 0; bit < RQB_BPW; bit++) { 1010a2640c9bSPeter Wemm if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 101122bf7d9aSJeff Roberson continue; 101222bf7d9aSJeff Roberson rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 101328994a58SJeff Roberson TAILQ_FOREACH(ts, rqh, ts_procq) 101462fa74d9SJeff Roberson if (THREAD_CAN_MIGRATE(ts->ts_thread) && 101562fa74d9SJeff Roberson THREAD_CAN_SCHED(ts->ts_thread, cpu)) 1016ad1e7d28SJulian Elischer return (ts); 101722bf7d9aSJeff Roberson } 101822bf7d9aSJeff Roberson } 101922bf7d9aSJeff Roberson return (NULL); 102022bf7d9aSJeff Roberson } 102122bf7d9aSJeff Roberson 1022ae7a6b38SJeff Roberson /* 1023ae7a6b38SJeff Roberson * Attempt to steal a thread in priority order from a thread queue. 1024ae7a6b38SJeff Roberson */ 1025ad1e7d28SJulian Elischer static struct td_sched * 102662fa74d9SJeff Roberson tdq_steal(struct tdq *tdq, int cpu) 102722bf7d9aSJeff Roberson { 1028ad1e7d28SJulian Elischer struct td_sched *ts; 102922bf7d9aSJeff Roberson 1030ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 103162fa74d9SJeff Roberson if ((ts = runq_steal(&tdq->tdq_realtime, cpu)) != NULL) 1032ad1e7d28SJulian Elischer return (ts); 103362fa74d9SJeff Roberson if ((ts = runq_steal_from(&tdq->tdq_timeshare, cpu, tdq->tdq_ridx)) 103462fa74d9SJeff Roberson != NULL) 1035ad1e7d28SJulian Elischer return (ts); 103662fa74d9SJeff Roberson return (runq_steal(&tdq->tdq_idle, cpu)); 103722bf7d9aSJeff Roberson } 103880f86c9fSJeff Roberson 1039ae7a6b38SJeff Roberson /* 1040ae7a6b38SJeff Roberson * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the 10417fcf154aSJeff Roberson * current lock and returns with the assigned queue locked. 1042ae7a6b38SJeff Roberson */ 1043ae7a6b38SJeff Roberson static inline struct tdq * 1044ae7a6b38SJeff Roberson sched_setcpu(struct td_sched *ts, int cpu, int flags) 104580f86c9fSJeff Roberson { 1046ae7a6b38SJeff Roberson struct thread *td; 1047ae7a6b38SJeff Roberson struct tdq *tdq; 104880f86c9fSJeff Roberson 1049ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 1050ae7a6b38SJeff Roberson 1051ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpu); 1052ae7a6b38SJeff Roberson td = ts->ts_thread; 1053ae7a6b38SJeff Roberson ts->ts_cpu = cpu; 1054c47f202bSJeff Roberson 1055c47f202bSJeff Roberson /* If the lock matches just return the queue. */ 1056ae7a6b38SJeff Roberson if (td->td_lock == TDQ_LOCKPTR(tdq)) 1057ae7a6b38SJeff Roberson return (tdq); 1058ae7a6b38SJeff Roberson #ifdef notyet 105980f86c9fSJeff Roberson /* 1060a5423ea3SJeff Roberson * If the thread isn't running its lockptr is a 1061ae7a6b38SJeff Roberson * turnstile or a sleepqueue. We can just lock_set without 1062ae7a6b38SJeff Roberson * blocking. 1063670c524fSJeff Roberson */ 1064ae7a6b38SJeff Roberson if (TD_CAN_RUN(td)) { 1065ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1066ae7a6b38SJeff Roberson thread_lock_set(td, TDQ_LOCKPTR(tdq)); 1067ae7a6b38SJeff Roberson return (tdq); 1068ae7a6b38SJeff Roberson } 1069ae7a6b38SJeff Roberson #endif 107080f86c9fSJeff Roberson /* 1071ae7a6b38SJeff Roberson * The hard case, migration, we need to block the thread first to 1072ae7a6b38SJeff Roberson * prevent order reversals with other cpus locks. 10737b8bfa0dSJeff Roberson */ 1074ae7a6b38SJeff Roberson thread_lock_block(td); 1075ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1076ae7a6b38SJeff Roberson thread_lock_unblock(td, TDQ_LOCKPTR(tdq)); 1077ae7a6b38SJeff Roberson return (tdq); 107880f86c9fSJeff Roberson } 10792454aaf5SJeff Roberson 1080ae7a6b38SJeff Roberson static int 1081ae7a6b38SJeff Roberson sched_pickcpu(struct td_sched *ts, int flags) 1082ae7a6b38SJeff Roberson { 108362fa74d9SJeff Roberson struct cpu_group *cg; 108462fa74d9SJeff Roberson struct thread *td; 1085ae7a6b38SJeff Roberson struct tdq *tdq; 108662fa74d9SJeff Roberson cpumask_t mask; 10877b8bfa0dSJeff Roberson int self; 10887b8bfa0dSJeff Roberson int pri; 10897b8bfa0dSJeff Roberson int cpu; 10907b8bfa0dSJeff Roberson 109162fa74d9SJeff Roberson self = PCPU_GET(cpuid); 109262fa74d9SJeff Roberson td = ts->ts_thread; 10937b8bfa0dSJeff Roberson if (smp_started == 0) 10947b8bfa0dSJeff Roberson return (self); 109528994a58SJeff Roberson /* 109628994a58SJeff Roberson * Don't migrate a running thread from sched_switch(). 109728994a58SJeff Roberson */ 109862fa74d9SJeff Roberson if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td)) 109962fa74d9SJeff Roberson return (ts->ts_cpu); 11007b8bfa0dSJeff Roberson /* 110162fa74d9SJeff Roberson * Prefer to run interrupt threads on the processors that generate 110262fa74d9SJeff Roberson * the interrupt. 11037b8bfa0dSJeff Roberson */ 110462fa74d9SJeff Roberson if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_SCHED(td, self) && 110562fa74d9SJeff Roberson curthread->td_intr_nesting_level) 110662fa74d9SJeff Roberson ts->ts_cpu = self; 110762fa74d9SJeff Roberson /* 110862fa74d9SJeff Roberson * If the thread can run on the last cpu and the affinity has not 110962fa74d9SJeff Roberson * expired or it is idle run it there. 111062fa74d9SJeff Roberson */ 111162fa74d9SJeff Roberson pri = td->td_priority; 111262fa74d9SJeff Roberson tdq = TDQ_CPU(ts->ts_cpu); 111362fa74d9SJeff Roberson if (THREAD_CAN_SCHED(td, ts->ts_cpu)) { 111462fa74d9SJeff Roberson if (tdq->tdq_lowpri > PRI_MIN_IDLE) 111562fa74d9SJeff Roberson return (ts->ts_cpu); 111662fa74d9SJeff Roberson if (SCHED_AFFINITY(ts, CG_SHARE_L2) && tdq->tdq_lowpri > pri) 11177b8bfa0dSJeff Roberson return (ts->ts_cpu); 11187b8bfa0dSJeff Roberson } 11197b8bfa0dSJeff Roberson /* 112062fa74d9SJeff Roberson * Search for the highest level in the tree that still has affinity. 11217b8bfa0dSJeff Roberson */ 112262fa74d9SJeff Roberson cg = NULL; 112362fa74d9SJeff Roberson for (cg = tdq->tdq_cg; cg != NULL; cg = cg->cg_parent) 112462fa74d9SJeff Roberson if (SCHED_AFFINITY(ts, cg->cg_level)) 112562fa74d9SJeff Roberson break; 112662fa74d9SJeff Roberson cpu = -1; 112762fa74d9SJeff Roberson mask = td->td_cpuset->cs_mask.__bits[0]; 112862fa74d9SJeff Roberson if (cg) 112962fa74d9SJeff Roberson cpu = sched_lowest(cg, mask, pri); 113062fa74d9SJeff Roberson if (cpu == -1) 113162fa74d9SJeff Roberson cpu = sched_lowest(cpu_top, mask, -1); 113262fa74d9SJeff Roberson /* 113362fa74d9SJeff Roberson * Compare the lowest loaded cpu to current cpu. 113462fa74d9SJeff Roberson */ 1135ff256d9cSJeff Roberson if (THREAD_CAN_SCHED(td, self) && TDQ_CPU(self)->tdq_lowpri > pri && 1136ff256d9cSJeff Roberson TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE) 113762fa74d9SJeff Roberson cpu = self; 1138ff256d9cSJeff Roberson KASSERT(cpu != -1, ("sched_pickcpu: Failed to find a cpu.")); 1139ae7a6b38SJeff Roberson return (cpu); 114080f86c9fSJeff Roberson } 114162fa74d9SJeff Roberson #endif 114222bf7d9aSJeff Roberson 114322bf7d9aSJeff Roberson /* 114422bf7d9aSJeff Roberson * Pick the highest priority task we have and return it. 11450c0a98b2SJeff Roberson */ 1146ad1e7d28SJulian Elischer static struct td_sched * 1147ad1e7d28SJulian Elischer tdq_choose(struct tdq *tdq) 11485d7ef00cSJeff Roberson { 1149ad1e7d28SJulian Elischer struct td_sched *ts; 11505d7ef00cSJeff Roberson 1151ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 1152e7d50326SJeff Roberson ts = runq_choose(&tdq->tdq_realtime); 1153dda713dfSJeff Roberson if (ts != NULL) 1154e7d50326SJeff Roberson return (ts); 11553f872f85SJeff Roberson ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx); 1156e7d50326SJeff Roberson if (ts != NULL) { 1157dda713dfSJeff Roberson KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE, 1158e7d50326SJeff Roberson ("tdq_choose: Invalid priority on timeshare queue %d", 1159e7d50326SJeff Roberson ts->ts_thread->td_priority)); 1160ad1e7d28SJulian Elischer return (ts); 116115dc847eSJeff Roberson } 116215dc847eSJeff Roberson 1163e7d50326SJeff Roberson ts = runq_choose(&tdq->tdq_idle); 1164e7d50326SJeff Roberson if (ts != NULL) { 1165e7d50326SJeff Roberson KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE, 1166e7d50326SJeff Roberson ("tdq_choose: Invalid priority on idle queue %d", 1167e7d50326SJeff Roberson ts->ts_thread->td_priority)); 1168e7d50326SJeff Roberson return (ts); 1169e7d50326SJeff Roberson } 1170e7d50326SJeff Roberson 1171e7d50326SJeff Roberson return (NULL); 1172245f3abfSJeff Roberson } 11730a016a05SJeff Roberson 1174ae7a6b38SJeff Roberson /* 1175ae7a6b38SJeff Roberson * Initialize a thread queue. 1176ae7a6b38SJeff Roberson */ 11770a016a05SJeff Roberson static void 1178ad1e7d28SJulian Elischer tdq_setup(struct tdq *tdq) 11790a016a05SJeff Roberson { 1180ae7a6b38SJeff Roberson 1181c47f202bSJeff Roberson if (bootverbose) 1182c47f202bSJeff Roberson printf("ULE: setup cpu %d\n", TDQ_ID(tdq)); 1183e7d50326SJeff Roberson runq_init(&tdq->tdq_realtime); 1184e7d50326SJeff Roberson runq_init(&tdq->tdq_timeshare); 1185d2ad694cSJeff Roberson runq_init(&tdq->tdq_idle); 118662fa74d9SJeff Roberson snprintf(tdq->tdq_name, sizeof(tdq->tdq_name), 118762fa74d9SJeff Roberson "sched lock %d", (int)TDQ_ID(tdq)); 118862fa74d9SJeff Roberson mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock", 118962fa74d9SJeff Roberson MTX_SPIN | MTX_RECURSE); 11900a016a05SJeff Roberson } 11910a016a05SJeff Roberson 1192c47f202bSJeff Roberson #ifdef SMP 1193c47f202bSJeff Roberson static void 1194c47f202bSJeff Roberson sched_setup_smp(void) 1195c47f202bSJeff Roberson { 1196c47f202bSJeff Roberson struct tdq *tdq; 1197c47f202bSJeff Roberson int i; 1198c47f202bSJeff Roberson 119962fa74d9SJeff Roberson cpu_top = smp_topo(); 120062fa74d9SJeff Roberson for (i = 0; i < MAXCPU; i++) { 1201c47f202bSJeff Roberson if (CPU_ABSENT(i)) 1202c47f202bSJeff Roberson continue; 120362fa74d9SJeff Roberson tdq = TDQ_CPU(i); 1204c47f202bSJeff Roberson tdq_setup(tdq); 120562fa74d9SJeff Roberson tdq->tdq_cg = smp_topo_find(cpu_top, i); 120662fa74d9SJeff Roberson if (tdq->tdq_cg == NULL) 120762fa74d9SJeff Roberson panic("Can't find cpu group for %d\n", i); 1208c47f202bSJeff Roberson } 120962fa74d9SJeff Roberson balance_tdq = TDQ_SELF(); 121062fa74d9SJeff Roberson sched_balance(); 1211c47f202bSJeff Roberson } 1212c47f202bSJeff Roberson #endif 1213c47f202bSJeff Roberson 1214ae7a6b38SJeff Roberson /* 1215ae7a6b38SJeff Roberson * Setup the thread queues and initialize the topology based on MD 1216ae7a6b38SJeff Roberson * information. 1217ae7a6b38SJeff Roberson */ 121835e6168fSJeff Roberson static void 121935e6168fSJeff Roberson sched_setup(void *dummy) 122035e6168fSJeff Roberson { 1221ae7a6b38SJeff Roberson struct tdq *tdq; 1222c47f202bSJeff Roberson 1223c47f202bSJeff Roberson tdq = TDQ_SELF(); 12240ec896fdSJeff Roberson #ifdef SMP 1225c47f202bSJeff Roberson sched_setup_smp(); 1226749d01b0SJeff Roberson #else 1227c47f202bSJeff Roberson tdq_setup(tdq); 1228356500a3SJeff Roberson #endif 1229ae7a6b38SJeff Roberson /* 1230ae7a6b38SJeff Roberson * To avoid divide-by-zero, we set realstathz a dummy value 1231ae7a6b38SJeff Roberson * in case which sched_clock() called before sched_initticks(). 1232ae7a6b38SJeff Roberson */ 1233ae7a6b38SJeff Roberson realstathz = hz; 1234ae7a6b38SJeff Roberson sched_slice = (realstathz/10); /* ~100ms */ 1235ae7a6b38SJeff Roberson tickincr = 1 << SCHED_TICK_SHIFT; 1236ae7a6b38SJeff Roberson 1237ae7a6b38SJeff Roberson /* Add thread0's load since it's running. */ 1238ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1239c47f202bSJeff Roberson thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF()); 1240ae7a6b38SJeff Roberson tdq_load_add(tdq, &td_sched0); 124162fa74d9SJeff Roberson tdq->tdq_lowpri = thread0.td_priority; 1242ae7a6b38SJeff Roberson TDQ_UNLOCK(tdq); 124335e6168fSJeff Roberson } 124435e6168fSJeff Roberson 1245ae7a6b38SJeff Roberson /* 1246ae7a6b38SJeff Roberson * This routine determines the tickincr after stathz and hz are setup. 1247ae7a6b38SJeff Roberson */ 1248a1d4fe69SDavid Xu /* ARGSUSED */ 1249a1d4fe69SDavid Xu static void 1250a1d4fe69SDavid Xu sched_initticks(void *dummy) 1251a1d4fe69SDavid Xu { 1252ae7a6b38SJeff Roberson int incr; 1253ae7a6b38SJeff Roberson 1254a1d4fe69SDavid Xu realstathz = stathz ? stathz : hz; 125514618990SJeff Roberson sched_slice = (realstathz/10); /* ~100ms */ 1256a1d4fe69SDavid Xu 1257a1d4fe69SDavid Xu /* 1258e7d50326SJeff Roberson * tickincr is shifted out by 10 to avoid rounding errors due to 12593f872f85SJeff Roberson * hz not being evenly divisible by stathz on all platforms. 1260e7d50326SJeff Roberson */ 1261ae7a6b38SJeff Roberson incr = (hz << SCHED_TICK_SHIFT) / realstathz; 1262e7d50326SJeff Roberson /* 1263e7d50326SJeff Roberson * This does not work for values of stathz that are more than 1264e7d50326SJeff Roberson * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen. 1265a1d4fe69SDavid Xu */ 1266ae7a6b38SJeff Roberson if (incr == 0) 1267ae7a6b38SJeff Roberson incr = 1; 1268ae7a6b38SJeff Roberson tickincr = incr; 12697b8bfa0dSJeff Roberson #ifdef SMP 12709862717aSJeff Roberson /* 12717fcf154aSJeff Roberson * Set the default balance interval now that we know 12727fcf154aSJeff Roberson * what realstathz is. 12737fcf154aSJeff Roberson */ 12747fcf154aSJeff Roberson balance_interval = realstathz; 12757fcf154aSJeff Roberson /* 12769862717aSJeff Roberson * Set steal thresh to log2(mp_ncpu) but no greater than 4. This 12779862717aSJeff Roberson * prevents excess thrashing on large machines and excess idle on 12789862717aSJeff Roberson * smaller machines. 12799862717aSJeff Roberson */ 128062fa74d9SJeff Roberson steal_thresh = min(ffs(mp_ncpus) - 1, 3); 12817b8bfa0dSJeff Roberson affinity = SCHED_AFFINITY_DEFAULT; 12827b8bfa0dSJeff Roberson #endif 1283a1d4fe69SDavid Xu } 1284a1d4fe69SDavid Xu 1285a1d4fe69SDavid Xu 128635e6168fSJeff Roberson /* 1287ae7a6b38SJeff Roberson * This is the core of the interactivity algorithm. Determines a score based 1288ae7a6b38SJeff Roberson * on past behavior. It is the ratio of sleep time to run time scaled to 1289ae7a6b38SJeff Roberson * a [0, 100] integer. This is the voluntary sleep time of a process, which 1290ae7a6b38SJeff Roberson * differs from the cpu usage because it does not account for time spent 1291ae7a6b38SJeff Roberson * waiting on a run-queue. Would be prettier if we had floating point. 1292ae7a6b38SJeff Roberson */ 1293ae7a6b38SJeff Roberson static int 1294ae7a6b38SJeff Roberson sched_interact_score(struct thread *td) 1295ae7a6b38SJeff Roberson { 1296ae7a6b38SJeff Roberson struct td_sched *ts; 1297ae7a6b38SJeff Roberson int div; 1298ae7a6b38SJeff Roberson 1299ae7a6b38SJeff Roberson ts = td->td_sched; 1300ae7a6b38SJeff Roberson /* 1301ae7a6b38SJeff Roberson * The score is only needed if this is likely to be an interactive 1302ae7a6b38SJeff Roberson * task. Don't go through the expense of computing it if there's 1303ae7a6b38SJeff Roberson * no chance. 1304ae7a6b38SJeff Roberson */ 1305ae7a6b38SJeff Roberson if (sched_interact <= SCHED_INTERACT_HALF && 1306ae7a6b38SJeff Roberson ts->ts_runtime >= ts->ts_slptime) 1307ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF); 1308ae7a6b38SJeff Roberson 1309ae7a6b38SJeff Roberson if (ts->ts_runtime > ts->ts_slptime) { 1310ae7a6b38SJeff Roberson div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF); 1311ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF + 1312ae7a6b38SJeff Roberson (SCHED_INTERACT_HALF - (ts->ts_slptime / div))); 1313ae7a6b38SJeff Roberson } 1314ae7a6b38SJeff Roberson if (ts->ts_slptime > ts->ts_runtime) { 1315ae7a6b38SJeff Roberson div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF); 1316ae7a6b38SJeff Roberson return (ts->ts_runtime / div); 1317ae7a6b38SJeff Roberson } 1318ae7a6b38SJeff Roberson /* runtime == slptime */ 1319ae7a6b38SJeff Roberson if (ts->ts_runtime) 1320ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF); 1321ae7a6b38SJeff Roberson 1322ae7a6b38SJeff Roberson /* 1323ae7a6b38SJeff Roberson * This can happen if slptime and runtime are 0. 1324ae7a6b38SJeff Roberson */ 1325ae7a6b38SJeff Roberson return (0); 1326ae7a6b38SJeff Roberson 1327ae7a6b38SJeff Roberson } 1328ae7a6b38SJeff Roberson 1329ae7a6b38SJeff Roberson /* 133035e6168fSJeff Roberson * Scale the scheduling priority according to the "interactivity" of this 133135e6168fSJeff Roberson * process. 133235e6168fSJeff Roberson */ 133315dc847eSJeff Roberson static void 13348460a577SJohn Birrell sched_priority(struct thread *td) 133535e6168fSJeff Roberson { 1336e7d50326SJeff Roberson int score; 133735e6168fSJeff Roberson int pri; 133835e6168fSJeff Roberson 13398460a577SJohn Birrell if (td->td_pri_class != PRI_TIMESHARE) 134015dc847eSJeff Roberson return; 1341e7d50326SJeff Roberson /* 1342e7d50326SJeff Roberson * If the score is interactive we place the thread in the realtime 1343e7d50326SJeff Roberson * queue with a priority that is less than kernel and interrupt 1344e7d50326SJeff Roberson * priorities. These threads are not subject to nice restrictions. 1345e7d50326SJeff Roberson * 1346ae7a6b38SJeff Roberson * Scores greater than this are placed on the normal timeshare queue 1347e7d50326SJeff Roberson * where the priority is partially decided by the most recent cpu 1348e7d50326SJeff Roberson * utilization and the rest is decided by nice value. 1349a5423ea3SJeff Roberson * 1350a5423ea3SJeff Roberson * The nice value of the process has a linear effect on the calculated 1351a5423ea3SJeff Roberson * score. Negative nice values make it easier for a thread to be 1352a5423ea3SJeff Roberson * considered interactive. 1353e7d50326SJeff Roberson */ 1354e270652bSJeff Roberson score = imax(0, sched_interact_score(td) - td->td_proc->p_nice); 1355e7d50326SJeff Roberson if (score < sched_interact) { 1356e7d50326SJeff Roberson pri = PRI_MIN_REALTIME; 1357e7d50326SJeff Roberson pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact) 1358e7d50326SJeff Roberson * score; 1359e7d50326SJeff Roberson KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME, 13609a93305aSJeff Roberson ("sched_priority: invalid interactive priority %d score %d", 13619a93305aSJeff Roberson pri, score)); 1362e7d50326SJeff Roberson } else { 1363e7d50326SJeff Roberson pri = SCHED_PRI_MIN; 1364e7d50326SJeff Roberson if (td->td_sched->ts_ticks) 1365e7d50326SJeff Roberson pri += SCHED_PRI_TICKS(td->td_sched); 1366e7d50326SJeff Roberson pri += SCHED_PRI_NICE(td->td_proc->p_nice); 1367ae7a6b38SJeff Roberson KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE, 1368ae7a6b38SJeff Roberson ("sched_priority: invalid priority %d: nice %d, " 1369ae7a6b38SJeff Roberson "ticks %d ftick %d ltick %d tick pri %d", 1370ae7a6b38SJeff Roberson pri, td->td_proc->p_nice, td->td_sched->ts_ticks, 1371ae7a6b38SJeff Roberson td->td_sched->ts_ftick, td->td_sched->ts_ltick, 1372ae7a6b38SJeff Roberson SCHED_PRI_TICKS(td->td_sched))); 1373e7d50326SJeff Roberson } 13748460a577SJohn Birrell sched_user_prio(td, pri); 137535e6168fSJeff Roberson 137615dc847eSJeff Roberson return; 137735e6168fSJeff Roberson } 137835e6168fSJeff Roberson 137935e6168fSJeff Roberson /* 1380d322132cSJeff Roberson * This routine enforces a maximum limit on the amount of scheduling history 1381ae7a6b38SJeff Roberson * kept. It is called after either the slptime or runtime is adjusted. This 1382ae7a6b38SJeff Roberson * function is ugly due to integer math. 1383d322132cSJeff Roberson */ 13844b60e324SJeff Roberson static void 13858460a577SJohn Birrell sched_interact_update(struct thread *td) 13864b60e324SJeff Roberson { 1387155b6ca1SJeff Roberson struct td_sched *ts; 13889a93305aSJeff Roberson u_int sum; 13893f741ca1SJeff Roberson 1390155b6ca1SJeff Roberson ts = td->td_sched; 1391ae7a6b38SJeff Roberson sum = ts->ts_runtime + ts->ts_slptime; 1392d322132cSJeff Roberson if (sum < SCHED_SLP_RUN_MAX) 1393d322132cSJeff Roberson return; 1394d322132cSJeff Roberson /* 1395155b6ca1SJeff Roberson * This only happens from two places: 1396155b6ca1SJeff Roberson * 1) We have added an unusual amount of run time from fork_exit. 1397155b6ca1SJeff Roberson * 2) We have added an unusual amount of sleep time from sched_sleep(). 1398155b6ca1SJeff Roberson */ 1399155b6ca1SJeff Roberson if (sum > SCHED_SLP_RUN_MAX * 2) { 1400ae7a6b38SJeff Roberson if (ts->ts_runtime > ts->ts_slptime) { 1401ae7a6b38SJeff Roberson ts->ts_runtime = SCHED_SLP_RUN_MAX; 1402ae7a6b38SJeff Roberson ts->ts_slptime = 1; 1403155b6ca1SJeff Roberson } else { 1404ae7a6b38SJeff Roberson ts->ts_slptime = SCHED_SLP_RUN_MAX; 1405ae7a6b38SJeff Roberson ts->ts_runtime = 1; 1406155b6ca1SJeff Roberson } 1407155b6ca1SJeff Roberson return; 1408155b6ca1SJeff Roberson } 1409155b6ca1SJeff Roberson /* 1410d322132cSJeff Roberson * If we have exceeded by more than 1/5th then the algorithm below 1411d322132cSJeff Roberson * will not bring us back into range. Dividing by two here forces 14122454aaf5SJeff Roberson * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1413d322132cSJeff Roberson */ 141437a35e4aSJeff Roberson if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1415ae7a6b38SJeff Roberson ts->ts_runtime /= 2; 1416ae7a6b38SJeff Roberson ts->ts_slptime /= 2; 1417d322132cSJeff Roberson return; 1418d322132cSJeff Roberson } 1419ae7a6b38SJeff Roberson ts->ts_runtime = (ts->ts_runtime / 5) * 4; 1420ae7a6b38SJeff Roberson ts->ts_slptime = (ts->ts_slptime / 5) * 4; 1421d322132cSJeff Roberson } 1422d322132cSJeff Roberson 1423ae7a6b38SJeff Roberson /* 1424ae7a6b38SJeff Roberson * Scale back the interactivity history when a child thread is created. The 1425ae7a6b38SJeff Roberson * history is inherited from the parent but the thread may behave totally 1426ae7a6b38SJeff Roberson * differently. For example, a shell spawning a compiler process. We want 1427ae7a6b38SJeff Roberson * to learn that the compiler is behaving badly very quickly. 1428ae7a6b38SJeff Roberson */ 1429d322132cSJeff Roberson static void 14308460a577SJohn Birrell sched_interact_fork(struct thread *td) 1431d322132cSJeff Roberson { 1432d322132cSJeff Roberson int ratio; 1433d322132cSJeff Roberson int sum; 1434d322132cSJeff Roberson 1435ae7a6b38SJeff Roberson sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime; 1436d322132cSJeff Roberson if (sum > SCHED_SLP_RUN_FORK) { 1437d322132cSJeff Roberson ratio = sum / SCHED_SLP_RUN_FORK; 1438ae7a6b38SJeff Roberson td->td_sched->ts_runtime /= ratio; 1439ae7a6b38SJeff Roberson td->td_sched->ts_slptime /= ratio; 14404b60e324SJeff Roberson } 14414b60e324SJeff Roberson } 14424b60e324SJeff Roberson 144315dc847eSJeff Roberson /* 1444ae7a6b38SJeff Roberson * Called from proc0_init() to setup the scheduler fields. 1445ed062c8dSJulian Elischer */ 1446ed062c8dSJulian Elischer void 1447ed062c8dSJulian Elischer schedinit(void) 1448ed062c8dSJulian Elischer { 1449e7d50326SJeff Roberson 1450ed062c8dSJulian Elischer /* 1451ed062c8dSJulian Elischer * Set up the scheduler specific parts of proc0. 1452ed062c8dSJulian Elischer */ 1453ed062c8dSJulian Elischer proc0.p_sched = NULL; /* XXX */ 1454ad1e7d28SJulian Elischer thread0.td_sched = &td_sched0; 1455e7d50326SJeff Roberson td_sched0.ts_ltick = ticks; 14568ab80cf0SJeff Roberson td_sched0.ts_ftick = ticks; 1457ad1e7d28SJulian Elischer td_sched0.ts_thread = &thread0; 1458ed062c8dSJulian Elischer } 1459ed062c8dSJulian Elischer 1460ed062c8dSJulian Elischer /* 146115dc847eSJeff Roberson * This is only somewhat accurate since given many processes of the same 146215dc847eSJeff Roberson * priority they will switch when their slices run out, which will be 1463e7d50326SJeff Roberson * at most sched_slice stathz ticks. 146415dc847eSJeff Roberson */ 146535e6168fSJeff Roberson int 146635e6168fSJeff Roberson sched_rr_interval(void) 146735e6168fSJeff Roberson { 1468e7d50326SJeff Roberson 1469e7d50326SJeff Roberson /* Convert sched_slice to hz */ 1470e7d50326SJeff Roberson return (hz/(realstathz/sched_slice)); 147135e6168fSJeff Roberson } 147235e6168fSJeff Roberson 1473ae7a6b38SJeff Roberson /* 1474ae7a6b38SJeff Roberson * Update the percent cpu tracking information when it is requested or 1475ae7a6b38SJeff Roberson * the total history exceeds the maximum. We keep a sliding history of 1476ae7a6b38SJeff Roberson * tick counts that slowly decays. This is less precise than the 4BSD 1477ae7a6b38SJeff Roberson * mechanism since it happens with less regular and frequent events. 1478ae7a6b38SJeff Roberson */ 147922bf7d9aSJeff Roberson static void 1480ad1e7d28SJulian Elischer sched_pctcpu_update(struct td_sched *ts) 148135e6168fSJeff Roberson { 1482e7d50326SJeff Roberson 1483e7d50326SJeff Roberson if (ts->ts_ticks == 0) 1484e7d50326SJeff Roberson return; 14858ab80cf0SJeff Roberson if (ticks - (hz / 10) < ts->ts_ltick && 14868ab80cf0SJeff Roberson SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX) 14878ab80cf0SJeff Roberson return; 148835e6168fSJeff Roberson /* 148935e6168fSJeff Roberson * Adjust counters and watermark for pctcpu calc. 1490210491d3SJeff Roberson */ 1491e7d50326SJeff Roberson if (ts->ts_ltick > ticks - SCHED_TICK_TARG) 1492ad1e7d28SJulian Elischer ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) * 1493e7d50326SJeff Roberson SCHED_TICK_TARG; 1494e7d50326SJeff Roberson else 1495ad1e7d28SJulian Elischer ts->ts_ticks = 0; 1496ad1e7d28SJulian Elischer ts->ts_ltick = ticks; 1497e7d50326SJeff Roberson ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG; 149835e6168fSJeff Roberson } 149935e6168fSJeff Roberson 1500ae7a6b38SJeff Roberson /* 1501ae7a6b38SJeff Roberson * Adjust the priority of a thread. Move it to the appropriate run-queue 1502ae7a6b38SJeff Roberson * if necessary. This is the back-end for several priority related 1503ae7a6b38SJeff Roberson * functions. 1504ae7a6b38SJeff Roberson */ 1505e7d50326SJeff Roberson static void 1506f5c157d9SJohn Baldwin sched_thread_priority(struct thread *td, u_char prio) 150735e6168fSJeff Roberson { 1508ad1e7d28SJulian Elischer struct td_sched *ts; 150935e6168fSJeff Roberson 151081d47d3fSJeff Roberson CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", 1511431f8906SJulian Elischer td, td->td_name, td->td_priority, prio, curthread, 1512431f8906SJulian Elischer curthread->td_name); 1513ad1e7d28SJulian Elischer ts = td->td_sched; 15147b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1515f5c157d9SJohn Baldwin if (td->td_priority == prio) 1516f5c157d9SJohn Baldwin return; 1517e7d50326SJeff Roberson 15183f872f85SJeff Roberson if (TD_ON_RUNQ(td) && prio < td->td_priority) { 15193f741ca1SJeff Roberson /* 15203f741ca1SJeff Roberson * If the priority has been elevated due to priority 15213f741ca1SJeff Roberson * propagation, we may have to move ourselves to a new 1522e7d50326SJeff Roberson * queue. This could be optimized to not re-add in some 1523e7d50326SJeff Roberson * cases. 1524f2b74cbfSJeff Roberson */ 1525e7d50326SJeff Roberson sched_rem(td); 1526e7d50326SJeff Roberson td->td_priority = prio; 1527ae7a6b38SJeff Roberson sched_add(td, SRQ_BORROWING); 1528317da705SJeff Roberson } else if (TD_IS_RUNNING(td)) { 1529ae7a6b38SJeff Roberson struct tdq *tdq; 153062fa74d9SJeff Roberson int oldpri; 1531ae7a6b38SJeff Roberson 1532ae7a6b38SJeff Roberson tdq = TDQ_CPU(ts->ts_cpu); 153362fa74d9SJeff Roberson oldpri = td->td_priority; 15343f741ca1SJeff Roberson td->td_priority = prio; 153562fa74d9SJeff Roberson if (prio < tdq->tdq_lowpri) 153662fa74d9SJeff Roberson tdq->tdq_lowpri = prio; 153762fa74d9SJeff Roberson else if (tdq->tdq_lowpri == oldpri) 153862fa74d9SJeff Roberson tdq_setlowpri(tdq, td); 1539317da705SJeff Roberson } else 1540317da705SJeff Roberson td->td_priority = prio; 1541ae7a6b38SJeff Roberson } 154235e6168fSJeff Roberson 1543f5c157d9SJohn Baldwin /* 1544f5c157d9SJohn Baldwin * Update a thread's priority when it is lent another thread's 1545f5c157d9SJohn Baldwin * priority. 1546f5c157d9SJohn Baldwin */ 1547f5c157d9SJohn Baldwin void 1548f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio) 1549f5c157d9SJohn Baldwin { 1550f5c157d9SJohn Baldwin 1551f5c157d9SJohn Baldwin td->td_flags |= TDF_BORROWING; 1552f5c157d9SJohn Baldwin sched_thread_priority(td, prio); 1553f5c157d9SJohn Baldwin } 1554f5c157d9SJohn Baldwin 1555f5c157d9SJohn Baldwin /* 1556f5c157d9SJohn Baldwin * Restore a thread's priority when priority propagation is 1557f5c157d9SJohn Baldwin * over. The prio argument is the minimum priority the thread 1558f5c157d9SJohn Baldwin * needs to have to satisfy other possible priority lending 1559f5c157d9SJohn Baldwin * requests. If the thread's regular priority is less 1560f5c157d9SJohn Baldwin * important than prio, the thread will keep a priority boost 1561f5c157d9SJohn Baldwin * of prio. 1562f5c157d9SJohn Baldwin */ 1563f5c157d9SJohn Baldwin void 1564f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio) 1565f5c157d9SJohn Baldwin { 1566f5c157d9SJohn Baldwin u_char base_pri; 1567f5c157d9SJohn Baldwin 1568f5c157d9SJohn Baldwin if (td->td_base_pri >= PRI_MIN_TIMESHARE && 1569f5c157d9SJohn Baldwin td->td_base_pri <= PRI_MAX_TIMESHARE) 15708460a577SJohn Birrell base_pri = td->td_user_pri; 1571f5c157d9SJohn Baldwin else 1572f5c157d9SJohn Baldwin base_pri = td->td_base_pri; 1573f5c157d9SJohn Baldwin if (prio >= base_pri) { 1574f5c157d9SJohn Baldwin td->td_flags &= ~TDF_BORROWING; 1575f5c157d9SJohn Baldwin sched_thread_priority(td, base_pri); 1576f5c157d9SJohn Baldwin } else 1577f5c157d9SJohn Baldwin sched_lend_prio(td, prio); 1578f5c157d9SJohn Baldwin } 1579f5c157d9SJohn Baldwin 1580ae7a6b38SJeff Roberson /* 1581ae7a6b38SJeff Roberson * Standard entry for setting the priority to an absolute value. 1582ae7a6b38SJeff Roberson */ 1583f5c157d9SJohn Baldwin void 1584f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio) 1585f5c157d9SJohn Baldwin { 1586f5c157d9SJohn Baldwin u_char oldprio; 1587f5c157d9SJohn Baldwin 1588f5c157d9SJohn Baldwin /* First, update the base priority. */ 1589f5c157d9SJohn Baldwin td->td_base_pri = prio; 1590f5c157d9SJohn Baldwin 1591f5c157d9SJohn Baldwin /* 159250aaa791SJohn Baldwin * If the thread is borrowing another thread's priority, don't 1593f5c157d9SJohn Baldwin * ever lower the priority. 1594f5c157d9SJohn Baldwin */ 1595f5c157d9SJohn Baldwin if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 1596f5c157d9SJohn Baldwin return; 1597f5c157d9SJohn Baldwin 1598f5c157d9SJohn Baldwin /* Change the real priority. */ 1599f5c157d9SJohn Baldwin oldprio = td->td_priority; 1600f5c157d9SJohn Baldwin sched_thread_priority(td, prio); 1601f5c157d9SJohn Baldwin 1602f5c157d9SJohn Baldwin /* 1603f5c157d9SJohn Baldwin * If the thread is on a turnstile, then let the turnstile update 1604f5c157d9SJohn Baldwin * its state. 1605f5c157d9SJohn Baldwin */ 1606f5c157d9SJohn Baldwin if (TD_ON_LOCK(td) && oldprio != prio) 1607f5c157d9SJohn Baldwin turnstile_adjust(td, oldprio); 1608f5c157d9SJohn Baldwin } 1609f5c157d9SJohn Baldwin 1610ae7a6b38SJeff Roberson /* 1611ae7a6b38SJeff Roberson * Set the base user priority, does not effect current running priority. 1612ae7a6b38SJeff Roberson */ 161335e6168fSJeff Roberson void 16148460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio) 16153db720fdSDavid Xu { 16163db720fdSDavid Xu u_char oldprio; 16173db720fdSDavid Xu 16188460a577SJohn Birrell td->td_base_user_pri = prio; 1619fc6c30f6SJulian Elischer if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio) 1620fc6c30f6SJulian Elischer return; 16218460a577SJohn Birrell oldprio = td->td_user_pri; 16228460a577SJohn Birrell td->td_user_pri = prio; 16233db720fdSDavid Xu } 16243db720fdSDavid Xu 16253db720fdSDavid Xu void 16263db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio) 16273db720fdSDavid Xu { 16283db720fdSDavid Xu u_char oldprio; 16293db720fdSDavid Xu 1630435806d3SDavid Xu THREAD_LOCK_ASSERT(td, MA_OWNED); 16313db720fdSDavid Xu td->td_flags |= TDF_UBORROWING; 1632f645b5daSMaxim Konovalov oldprio = td->td_user_pri; 16338460a577SJohn Birrell td->td_user_pri = prio; 16343db720fdSDavid Xu } 16353db720fdSDavid Xu 16363db720fdSDavid Xu void 16373db720fdSDavid Xu sched_unlend_user_prio(struct thread *td, u_char prio) 16383db720fdSDavid Xu { 16393db720fdSDavid Xu u_char base_pri; 16403db720fdSDavid Xu 1641435806d3SDavid Xu THREAD_LOCK_ASSERT(td, MA_OWNED); 16428460a577SJohn Birrell base_pri = td->td_base_user_pri; 16433db720fdSDavid Xu if (prio >= base_pri) { 16443db720fdSDavid Xu td->td_flags &= ~TDF_UBORROWING; 16458460a577SJohn Birrell sched_user_prio(td, base_pri); 1646435806d3SDavid Xu } else { 16473db720fdSDavid Xu sched_lend_user_prio(td, prio); 16483db720fdSDavid Xu } 1649435806d3SDavid Xu } 16503db720fdSDavid Xu 1651ae7a6b38SJeff Roberson /* 165208c9a16cSJeff Roberson * Add the thread passed as 'newtd' to the run queue before selecting 165308c9a16cSJeff Roberson * the next thread to run. This is only used for KSE. 165408c9a16cSJeff Roberson */ 165508c9a16cSJeff Roberson static void 165608c9a16cSJeff Roberson sched_switchin(struct tdq *tdq, struct thread *td) 165708c9a16cSJeff Roberson { 165808c9a16cSJeff Roberson #ifdef SMP 165908c9a16cSJeff Roberson spinlock_enter(); 166008c9a16cSJeff Roberson TDQ_UNLOCK(tdq); 166108c9a16cSJeff Roberson thread_lock(td); 166208c9a16cSJeff Roberson spinlock_exit(); 166308c9a16cSJeff Roberson sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING); 166408c9a16cSJeff Roberson #else 166508c9a16cSJeff Roberson td->td_lock = TDQ_LOCKPTR(tdq); 166608c9a16cSJeff Roberson #endif 166708c9a16cSJeff Roberson tdq_add(tdq, td, SRQ_YIELDING); 166808c9a16cSJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 166908c9a16cSJeff Roberson } 167008c9a16cSJeff Roberson 167108c9a16cSJeff Roberson /* 1672731016feSWojciech A. Koszek * Block a thread for switching. Similar to thread_block() but does not 1673731016feSWojciech A. Koszek * bump the spin count. 1674731016feSWojciech A. Koszek */ 1675731016feSWojciech A. Koszek static inline struct mtx * 1676731016feSWojciech A. Koszek thread_block_switch(struct thread *td) 1677731016feSWojciech A. Koszek { 1678731016feSWojciech A. Koszek struct mtx *lock; 1679731016feSWojciech A. Koszek 1680731016feSWojciech A. Koszek THREAD_LOCK_ASSERT(td, MA_OWNED); 1681731016feSWojciech A. Koszek lock = td->td_lock; 1682731016feSWojciech A. Koszek td->td_lock = &blocked_lock; 1683731016feSWojciech A. Koszek mtx_unlock_spin(lock); 1684731016feSWojciech A. Koszek 1685731016feSWojciech A. Koszek return (lock); 1686731016feSWojciech A. Koszek } 1687731016feSWojciech A. Koszek 1688731016feSWojciech A. Koszek /* 1689c47f202bSJeff Roberson * Handle migration from sched_switch(). This happens only for 1690c47f202bSJeff Roberson * cpu binding. 1691c47f202bSJeff Roberson */ 1692c47f202bSJeff Roberson static struct mtx * 1693c47f202bSJeff Roberson sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags) 1694c47f202bSJeff Roberson { 1695c47f202bSJeff Roberson struct tdq *tdn; 1696c47f202bSJeff Roberson 1697c47f202bSJeff Roberson tdn = TDQ_CPU(td->td_sched->ts_cpu); 1698c47f202bSJeff Roberson #ifdef SMP 1699c47f202bSJeff Roberson /* 1700c47f202bSJeff Roberson * Do the lock dance required to avoid LOR. We grab an extra 1701c47f202bSJeff Roberson * spinlock nesting to prevent preemption while we're 1702c47f202bSJeff Roberson * not holding either run-queue lock. 1703c47f202bSJeff Roberson */ 1704c47f202bSJeff Roberson spinlock_enter(); 1705c47f202bSJeff Roberson thread_block_switch(td); /* This releases the lock on tdq. */ 1706c47f202bSJeff Roberson TDQ_LOCK(tdn); 1707c47f202bSJeff Roberson tdq_add(tdn, td, flags); 1708ff256d9cSJeff Roberson tdq_notify(tdn, td->td_sched); 1709c47f202bSJeff Roberson /* 1710c47f202bSJeff Roberson * After we unlock tdn the new cpu still can't switch into this 1711c47f202bSJeff Roberson * thread until we've unblocked it in cpu_switch(). The lock 1712c47f202bSJeff Roberson * pointers may match in the case of HTT cores. Don't unlock here 1713c47f202bSJeff Roberson * or we can deadlock when the other CPU runs the IPI handler. 1714c47f202bSJeff Roberson */ 1715c47f202bSJeff Roberson if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) { 1716c47f202bSJeff Roberson TDQ_UNLOCK(tdn); 1717c47f202bSJeff Roberson TDQ_LOCK(tdq); 1718c47f202bSJeff Roberson } 1719c47f202bSJeff Roberson spinlock_exit(); 1720c47f202bSJeff Roberson #endif 1721c47f202bSJeff Roberson return (TDQ_LOCKPTR(tdn)); 1722c47f202bSJeff Roberson } 1723c47f202bSJeff Roberson 1724c47f202bSJeff Roberson /* 1725ae7a6b38SJeff Roberson * Release a thread that was blocked with thread_block_switch(). 1726ae7a6b38SJeff Roberson */ 1727ae7a6b38SJeff Roberson static inline void 1728ae7a6b38SJeff Roberson thread_unblock_switch(struct thread *td, struct mtx *mtx) 1729ae7a6b38SJeff Roberson { 1730ae7a6b38SJeff Roberson atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock, 1731ae7a6b38SJeff Roberson (uintptr_t)mtx); 1732ae7a6b38SJeff Roberson } 1733ae7a6b38SJeff Roberson 1734ae7a6b38SJeff Roberson /* 1735ae7a6b38SJeff Roberson * Switch threads. This function has to handle threads coming in while 1736ae7a6b38SJeff Roberson * blocked for some reason, running, or idle. It also must deal with 1737ae7a6b38SJeff Roberson * migrating a thread from one queue to another as running threads may 1738ae7a6b38SJeff Roberson * be assigned elsewhere via binding. 1739ae7a6b38SJeff Roberson */ 17403db720fdSDavid Xu void 17413389af30SJulian Elischer sched_switch(struct thread *td, struct thread *newtd, int flags) 174235e6168fSJeff Roberson { 1743c02bbb43SJeff Roberson struct tdq *tdq; 1744ad1e7d28SJulian Elischer struct td_sched *ts; 1745ae7a6b38SJeff Roberson struct mtx *mtx; 1746c47f202bSJeff Roberson int srqflag; 1747ae7a6b38SJeff Roberson int cpuid; 174835e6168fSJeff Roberson 17497b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 175035e6168fSJeff Roberson 1751ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 1752ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpuid); 1753e7d50326SJeff Roberson ts = td->td_sched; 1754c47f202bSJeff Roberson mtx = td->td_lock; 1755ae7a6b38SJeff Roberson ts->ts_rltick = ticks; 1756060563ecSJulian Elischer td->td_lastcpu = td->td_oncpu; 1757060563ecSJulian Elischer td->td_oncpu = NOCPU; 175852eb8464SJohn Baldwin td->td_flags &= ~TDF_NEEDRESCHED; 175977918643SStephan Uphoff td->td_owepreempt = 0; 1760b11fdad0SJeff Roberson /* 1761ae7a6b38SJeff Roberson * The lock pointer in an idle thread should never change. Reset it 1762ae7a6b38SJeff Roberson * to CAN_RUN as well. 1763b11fdad0SJeff Roberson */ 1764486a9414SJulian Elischer if (TD_IS_IDLETHREAD(td)) { 1765ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1766bf0acc27SJohn Baldwin TD_SET_CAN_RUN(td); 17677b20fb19SJeff Roberson } else if (TD_IS_RUNNING(td)) { 1768ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 17697b20fb19SJeff Roberson tdq_load_rem(tdq, ts); 1770c47f202bSJeff Roberson srqflag = (flags & SW_PREEMPT) ? 1771598b368dSJeff Roberson SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 1772c47f202bSJeff Roberson SRQ_OURSELF|SRQ_YIELDING; 1773c47f202bSJeff Roberson if (ts->ts_cpu == cpuid) 1774c47f202bSJeff Roberson tdq_add(tdq, td, srqflag); 1775c47f202bSJeff Roberson else 1776c47f202bSJeff Roberson mtx = sched_switch_migrate(tdq, td, srqflag); 1777ae7a6b38SJeff Roberson } else { 1778ae7a6b38SJeff Roberson /* This thread must be going to sleep. */ 1779ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1780ae7a6b38SJeff Roberson mtx = thread_block_switch(td); 1781ae7a6b38SJeff Roberson tdq_load_rem(tdq, ts); 1782ae7a6b38SJeff Roberson } 1783ae7a6b38SJeff Roberson /* 1784ae7a6b38SJeff Roberson * We enter here with the thread blocked and assigned to the 1785ae7a6b38SJeff Roberson * appropriate cpu run-queue or sleep-queue and with the current 1786ae7a6b38SJeff Roberson * thread-queue locked. 1787ae7a6b38SJeff Roberson */ 1788ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 1789ae7a6b38SJeff Roberson /* 179008c9a16cSJeff Roberson * If KSE assigned a new thread just add it here and let choosethread 179108c9a16cSJeff Roberson * select the best one. 1792ae7a6b38SJeff Roberson */ 179308c9a16cSJeff Roberson if (newtd != NULL) 179408c9a16cSJeff Roberson sched_switchin(tdq, newtd); 17952454aaf5SJeff Roberson newtd = choosethread(); 1796ae7a6b38SJeff Roberson /* 1797ae7a6b38SJeff Roberson * Call the MD code to switch contexts if necessary. 1798ae7a6b38SJeff Roberson */ 1799ebccf1e3SJoseph Koshy if (td != newtd) { 1800ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 1801ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1802ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 1803ebccf1e3SJoseph Koshy #endif 1804eea4f254SJeff Roberson lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object); 180559c68134SJeff Roberson TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 1806ae7a6b38SJeff Roberson cpu_switch(td, newtd, mtx); 1807ae7a6b38SJeff Roberson /* 1808ae7a6b38SJeff Roberson * We may return from cpu_switch on a different cpu. However, 1809ae7a6b38SJeff Roberson * we always return with td_lock pointing to the current cpu's 1810ae7a6b38SJeff Roberson * run queue lock. 1811ae7a6b38SJeff Roberson */ 1812ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 1813ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpuid); 1814eea4f254SJeff Roberson lock_profile_obtain_lock_success( 1815eea4f254SJeff Roberson &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__); 1816ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 1817ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1818ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1819ebccf1e3SJoseph Koshy #endif 1820ae7a6b38SJeff Roberson } else 1821ae7a6b38SJeff Roberson thread_unblock_switch(td, mtx); 1822ae7a6b38SJeff Roberson /* 182362fa74d9SJeff Roberson * We should always get here with the lowest priority td possible. 182462fa74d9SJeff Roberson */ 182562fa74d9SJeff Roberson tdq->tdq_lowpri = td->td_priority; 182662fa74d9SJeff Roberson /* 1827ae7a6b38SJeff Roberson * Assert that all went well and return. 1828ae7a6b38SJeff Roberson */ 1829ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED); 1830ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1831ae7a6b38SJeff Roberson td->td_oncpu = cpuid; 183235e6168fSJeff Roberson } 183335e6168fSJeff Roberson 1834ae7a6b38SJeff Roberson /* 1835ae7a6b38SJeff Roberson * Adjust thread priorities as a result of a nice request. 1836ae7a6b38SJeff Roberson */ 183735e6168fSJeff Roberson void 1838fa885116SJulian Elischer sched_nice(struct proc *p, int nice) 183935e6168fSJeff Roberson { 184035e6168fSJeff Roberson struct thread *td; 184135e6168fSJeff Roberson 1842fa885116SJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 18437b20fb19SJeff Roberson PROC_SLOCK_ASSERT(p, MA_OWNED); 1844e7d50326SJeff Roberson 1845fa885116SJulian Elischer p->p_nice = nice; 18468460a577SJohn Birrell FOREACH_THREAD_IN_PROC(p, td) { 18477b20fb19SJeff Roberson thread_lock(td); 18488460a577SJohn Birrell sched_priority(td); 1849e7d50326SJeff Roberson sched_prio(td, td->td_base_user_pri); 18507b20fb19SJeff Roberson thread_unlock(td); 185135e6168fSJeff Roberson } 1852fa885116SJulian Elischer } 185335e6168fSJeff Roberson 1854ae7a6b38SJeff Roberson /* 1855ae7a6b38SJeff Roberson * Record the sleep time for the interactivity scorer. 1856ae7a6b38SJeff Roberson */ 185735e6168fSJeff Roberson void 185844f3b092SJohn Baldwin sched_sleep(struct thread *td) 185935e6168fSJeff Roberson { 1860e7d50326SJeff Roberson 18617b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 186235e6168fSJeff Roberson 186354b0e65fSJeff Roberson td->td_slptick = ticks; 186435e6168fSJeff Roberson } 186535e6168fSJeff Roberson 1866ae7a6b38SJeff Roberson /* 1867ae7a6b38SJeff Roberson * Schedule a thread to resume execution and record how long it voluntarily 1868ae7a6b38SJeff Roberson * slept. We also update the pctcpu, interactivity, and priority. 1869ae7a6b38SJeff Roberson */ 187035e6168fSJeff Roberson void 187135e6168fSJeff Roberson sched_wakeup(struct thread *td) 187235e6168fSJeff Roberson { 187314618990SJeff Roberson struct td_sched *ts; 1874ae7a6b38SJeff Roberson int slptick; 1875e7d50326SJeff Roberson 18767b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 187714618990SJeff Roberson ts = td->td_sched; 187835e6168fSJeff Roberson /* 1879e7d50326SJeff Roberson * If we slept for more than a tick update our interactivity and 1880e7d50326SJeff Roberson * priority. 188135e6168fSJeff Roberson */ 188254b0e65fSJeff Roberson slptick = td->td_slptick; 188354b0e65fSJeff Roberson td->td_slptick = 0; 1884ae7a6b38SJeff Roberson if (slptick && slptick != ticks) { 18859a93305aSJeff Roberson u_int hzticks; 1886f1e8dc4aSJeff Roberson 1887ae7a6b38SJeff Roberson hzticks = (ticks - slptick) << SCHED_TICK_SHIFT; 1888ae7a6b38SJeff Roberson ts->ts_slptime += hzticks; 18898460a577SJohn Birrell sched_interact_update(td); 189014618990SJeff Roberson sched_pctcpu_update(ts); 18918460a577SJohn Birrell sched_priority(td); 1892f1e8dc4aSJeff Roberson } 189314618990SJeff Roberson /* Reset the slice value after we sleep. */ 189414618990SJeff Roberson ts->ts_slice = sched_slice; 18957a5e5e2aSJeff Roberson sched_add(td, SRQ_BORING); 189635e6168fSJeff Roberson } 189735e6168fSJeff Roberson 189835e6168fSJeff Roberson /* 189935e6168fSJeff Roberson * Penalize the parent for creating a new child and initialize the child's 190035e6168fSJeff Roberson * priority. 190135e6168fSJeff Roberson */ 190235e6168fSJeff Roberson void 19038460a577SJohn Birrell sched_fork(struct thread *td, struct thread *child) 190415dc847eSJeff Roberson { 19057b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1906ad1e7d28SJulian Elischer sched_fork_thread(td, child); 1907e7d50326SJeff Roberson /* 1908e7d50326SJeff Roberson * Penalize the parent and child for forking. 1909e7d50326SJeff Roberson */ 1910e7d50326SJeff Roberson sched_interact_fork(child); 1911e7d50326SJeff Roberson sched_priority(child); 1912ae7a6b38SJeff Roberson td->td_sched->ts_runtime += tickincr; 1913e7d50326SJeff Roberson sched_interact_update(td); 1914e7d50326SJeff Roberson sched_priority(td); 1915ad1e7d28SJulian Elischer } 1916ad1e7d28SJulian Elischer 1917ae7a6b38SJeff Roberson /* 1918ae7a6b38SJeff Roberson * Fork a new thread, may be within the same process. 1919ae7a6b38SJeff Roberson */ 1920ad1e7d28SJulian Elischer void 1921ad1e7d28SJulian Elischer sched_fork_thread(struct thread *td, struct thread *child) 1922ad1e7d28SJulian Elischer { 1923ad1e7d28SJulian Elischer struct td_sched *ts; 1924ad1e7d28SJulian Elischer struct td_sched *ts2; 19258460a577SJohn Birrell 1926e7d50326SJeff Roberson /* 1927e7d50326SJeff Roberson * Initialize child. 1928e7d50326SJeff Roberson */ 19297b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1930ed062c8dSJulian Elischer sched_newthread(child); 1931ae7a6b38SJeff Roberson child->td_lock = TDQ_LOCKPTR(TDQ_SELF()); 193262fa74d9SJeff Roberson child->td_cpuset = cpuset_ref(td->td_cpuset); 1933ad1e7d28SJulian Elischer ts = td->td_sched; 1934ad1e7d28SJulian Elischer ts2 = child->td_sched; 1935ad1e7d28SJulian Elischer ts2->ts_cpu = ts->ts_cpu; 1936ad1e7d28SJulian Elischer ts2->ts_runq = NULL; 1937e7d50326SJeff Roberson /* 1938e7d50326SJeff Roberson * Grab our parents cpu estimation information and priority. 1939e7d50326SJeff Roberson */ 1940ad1e7d28SJulian Elischer ts2->ts_ticks = ts->ts_ticks; 1941ad1e7d28SJulian Elischer ts2->ts_ltick = ts->ts_ltick; 1942ad1e7d28SJulian Elischer ts2->ts_ftick = ts->ts_ftick; 1943e7d50326SJeff Roberson child->td_user_pri = td->td_user_pri; 1944e7d50326SJeff Roberson child->td_base_user_pri = td->td_base_user_pri; 1945e7d50326SJeff Roberson /* 1946e7d50326SJeff Roberson * And update interactivity score. 1947e7d50326SJeff Roberson */ 1948ae7a6b38SJeff Roberson ts2->ts_slptime = ts->ts_slptime; 1949ae7a6b38SJeff Roberson ts2->ts_runtime = ts->ts_runtime; 1950e7d50326SJeff Roberson ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */ 195115dc847eSJeff Roberson } 195215dc847eSJeff Roberson 1953ae7a6b38SJeff Roberson /* 1954ae7a6b38SJeff Roberson * Adjust the priority class of a thread. 1955ae7a6b38SJeff Roberson */ 195615dc847eSJeff Roberson void 19578460a577SJohn Birrell sched_class(struct thread *td, int class) 195815dc847eSJeff Roberson { 195915dc847eSJeff Roberson 19607b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 19618460a577SJohn Birrell if (td->td_pri_class == class) 196215dc847eSJeff Roberson return; 1963155b9987SJeff Roberson /* 1964155b9987SJeff Roberson * On SMP if we're on the RUNQ we must adjust the transferable 1965155b9987SJeff Roberson * count because could be changing to or from an interrupt 1966155b9987SJeff Roberson * class. 1967155b9987SJeff Roberson */ 19687a5e5e2aSJeff Roberson if (TD_ON_RUNQ(td)) { 19691e516cf5SJeff Roberson struct tdq *tdq; 19701e516cf5SJeff Roberson 19711e516cf5SJeff Roberson tdq = TDQ_CPU(td->td_sched->ts_cpu); 197262fa74d9SJeff Roberson if (THREAD_CAN_MIGRATE(td)) 1973d2ad694cSJeff Roberson tdq->tdq_transferable--; 19741e516cf5SJeff Roberson td->td_pri_class = class; 197562fa74d9SJeff Roberson if (THREAD_CAN_MIGRATE(td)) 1976d2ad694cSJeff Roberson tdq->tdq_transferable++; 197780f86c9fSJeff Roberson } 19788460a577SJohn Birrell td->td_pri_class = class; 197935e6168fSJeff Roberson } 198035e6168fSJeff Roberson 198135e6168fSJeff Roberson /* 198235e6168fSJeff Roberson * Return some of the child's priority and interactivity to the parent. 198335e6168fSJeff Roberson */ 198435e6168fSJeff Roberson void 1985fc6c30f6SJulian Elischer sched_exit(struct proc *p, struct thread *child) 198635e6168fSJeff Roberson { 1987e7d50326SJeff Roberson struct thread *td; 1988141ad61cSJeff Roberson 19898460a577SJohn Birrell CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", 1990431f8906SJulian Elischer child, child->td_name, child->td_priority); 19918460a577SJohn Birrell 19927b20fb19SJeff Roberson PROC_SLOCK_ASSERT(p, MA_OWNED); 1993e7d50326SJeff Roberson td = FIRST_THREAD_IN_PROC(p); 1994e7d50326SJeff Roberson sched_exit_thread(td, child); 1995ad1e7d28SJulian Elischer } 1996ad1e7d28SJulian Elischer 1997ae7a6b38SJeff Roberson /* 1998ae7a6b38SJeff Roberson * Penalize another thread for the time spent on this one. This helps to 1999ae7a6b38SJeff Roberson * worsen the priority and interactivity of processes which schedule batch 2000ae7a6b38SJeff Roberson * jobs such as make. This has little effect on the make process itself but 2001ae7a6b38SJeff Roberson * causes new processes spawned by it to receive worse scores immediately. 2002ae7a6b38SJeff Roberson */ 2003ad1e7d28SJulian Elischer void 2004fc6c30f6SJulian Elischer sched_exit_thread(struct thread *td, struct thread *child) 2005ad1e7d28SJulian Elischer { 2006fc6c30f6SJulian Elischer 2007e7d50326SJeff Roberson CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", 2008431f8906SJulian Elischer child, child->td_name, child->td_priority); 2009e7d50326SJeff Roberson 2010e7d50326SJeff Roberson #ifdef KSE 2011e7d50326SJeff Roberson /* 2012e7d50326SJeff Roberson * KSE forks and exits so often that this penalty causes short-lived 2013e7d50326SJeff Roberson * threads to always be non-interactive. This causes mozilla to 2014e7d50326SJeff Roberson * crawl under load. 2015e7d50326SJeff Roberson */ 2016e7d50326SJeff Roberson if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc) 2017e7d50326SJeff Roberson return; 2018e7d50326SJeff Roberson #endif 2019e7d50326SJeff Roberson /* 2020e7d50326SJeff Roberson * Give the child's runtime to the parent without returning the 2021e7d50326SJeff Roberson * sleep time as a penalty to the parent. This causes shells that 2022e7d50326SJeff Roberson * launch expensive things to mark their children as expensive. 2023e7d50326SJeff Roberson */ 20247b20fb19SJeff Roberson thread_lock(td); 2025ae7a6b38SJeff Roberson td->td_sched->ts_runtime += child->td_sched->ts_runtime; 2026fc6c30f6SJulian Elischer sched_interact_update(td); 2027e7d50326SJeff Roberson sched_priority(td); 20287b20fb19SJeff Roberson thread_unlock(td); 2029ad1e7d28SJulian Elischer } 2030ad1e7d28SJulian Elischer 2031ff256d9cSJeff Roberson void 2032ff256d9cSJeff Roberson sched_preempt(struct thread *td) 2033ff256d9cSJeff Roberson { 2034ff256d9cSJeff Roberson struct tdq *tdq; 2035ff256d9cSJeff Roberson 2036ff256d9cSJeff Roberson thread_lock(td); 2037ff256d9cSJeff Roberson tdq = TDQ_SELF(); 2038ff256d9cSJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2039ff256d9cSJeff Roberson tdq->tdq_ipipending = 0; 2040ff256d9cSJeff Roberson if (td->td_priority > tdq->tdq_lowpri) { 2041ff256d9cSJeff Roberson if (td->td_critnest > 1) 2042ff256d9cSJeff Roberson td->td_owepreempt = 1; 2043ff256d9cSJeff Roberson else 2044ff256d9cSJeff Roberson mi_switch(SW_INVOL | SW_PREEMPT, NULL); 2045ff256d9cSJeff Roberson } 2046ff256d9cSJeff Roberson thread_unlock(td); 2047ff256d9cSJeff Roberson } 2048ff256d9cSJeff Roberson 2049ae7a6b38SJeff Roberson /* 2050ae7a6b38SJeff Roberson * Fix priorities on return to user-space. Priorities may be elevated due 2051ae7a6b38SJeff Roberson * to static priorities in msleep() or similar. 2052ae7a6b38SJeff Roberson */ 2053ad1e7d28SJulian Elischer void 2054ad1e7d28SJulian Elischer sched_userret(struct thread *td) 2055ad1e7d28SJulian Elischer { 2056ad1e7d28SJulian Elischer /* 2057ad1e7d28SJulian Elischer * XXX we cheat slightly on the locking here to avoid locking in 2058ad1e7d28SJulian Elischer * the usual case. Setting td_priority here is essentially an 2059ad1e7d28SJulian Elischer * incomplete workaround for not setting it properly elsewhere. 2060ad1e7d28SJulian Elischer * Now that some interrupt handlers are threads, not setting it 2061ad1e7d28SJulian Elischer * properly elsewhere can clobber it in the window between setting 2062ad1e7d28SJulian Elischer * it here and returning to user mode, so don't waste time setting 2063ad1e7d28SJulian Elischer * it perfectly here. 2064ad1e7d28SJulian Elischer */ 2065ad1e7d28SJulian Elischer KASSERT((td->td_flags & TDF_BORROWING) == 0, 2066ad1e7d28SJulian Elischer ("thread with borrowed priority returning to userland")); 2067ad1e7d28SJulian Elischer if (td->td_priority != td->td_user_pri) { 20687b20fb19SJeff Roberson thread_lock(td); 2069ad1e7d28SJulian Elischer td->td_priority = td->td_user_pri; 2070ad1e7d28SJulian Elischer td->td_base_pri = td->td_user_pri; 207162fa74d9SJeff Roberson tdq_setlowpri(TDQ_SELF(), td); 20727b20fb19SJeff Roberson thread_unlock(td); 2073ad1e7d28SJulian Elischer } 207435e6168fSJeff Roberson } 207535e6168fSJeff Roberson 2076ae7a6b38SJeff Roberson /* 2077ae7a6b38SJeff Roberson * Handle a stathz tick. This is really only relevant for timeshare 2078ae7a6b38SJeff Roberson * threads. 2079ae7a6b38SJeff Roberson */ 208035e6168fSJeff Roberson void 20817cf90fb3SJeff Roberson sched_clock(struct thread *td) 208235e6168fSJeff Roberson { 2083ad1e7d28SJulian Elischer struct tdq *tdq; 2084ad1e7d28SJulian Elischer struct td_sched *ts; 208535e6168fSJeff Roberson 2086ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 20873f872f85SJeff Roberson tdq = TDQ_SELF(); 20887fcf154aSJeff Roberson #ifdef SMP 20897fcf154aSJeff Roberson /* 20907fcf154aSJeff Roberson * We run the long term load balancer infrequently on the first cpu. 20917fcf154aSJeff Roberson */ 20927fcf154aSJeff Roberson if (balance_tdq == tdq) { 20937fcf154aSJeff Roberson if (balance_ticks && --balance_ticks == 0) 20947fcf154aSJeff Roberson sched_balance(); 20957fcf154aSJeff Roberson } 20967fcf154aSJeff Roberson #endif 20973f872f85SJeff Roberson /* 20983f872f85SJeff Roberson * Advance the insert index once for each tick to ensure that all 20993f872f85SJeff Roberson * threads get a chance to run. 21003f872f85SJeff Roberson */ 21013f872f85SJeff Roberson if (tdq->tdq_idx == tdq->tdq_ridx) { 21023f872f85SJeff Roberson tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS; 21033f872f85SJeff Roberson if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx])) 21043f872f85SJeff Roberson tdq->tdq_ridx = tdq->tdq_idx; 21053f872f85SJeff Roberson } 21063f872f85SJeff Roberson ts = td->td_sched; 2107fd0b8c78SJeff Roberson if (td->td_pri_class & PRI_FIFO_BIT) 2108a8949de2SJeff Roberson return; 2109fd0b8c78SJeff Roberson if (td->td_pri_class == PRI_TIMESHARE) { 2110a8949de2SJeff Roberson /* 2111fd0b8c78SJeff Roberson * We used a tick; charge it to the thread so 2112fd0b8c78SJeff Roberson * that we can compute our interactivity. 211315dc847eSJeff Roberson */ 2114ae7a6b38SJeff Roberson td->td_sched->ts_runtime += tickincr; 21158460a577SJohn Birrell sched_interact_update(td); 2116fd0b8c78SJeff Roberson } 211735e6168fSJeff Roberson /* 211835e6168fSJeff Roberson * We used up one time slice. 211935e6168fSJeff Roberson */ 2120ad1e7d28SJulian Elischer if (--ts->ts_slice > 0) 212115dc847eSJeff Roberson return; 212235e6168fSJeff Roberson /* 212315dc847eSJeff Roberson * We're out of time, recompute priorities and requeue. 212435e6168fSJeff Roberson */ 21258460a577SJohn Birrell sched_priority(td); 21264a338afdSJulian Elischer td->td_flags |= TDF_NEEDRESCHED; 212735e6168fSJeff Roberson } 212835e6168fSJeff Roberson 2129ae7a6b38SJeff Roberson /* 2130ae7a6b38SJeff Roberson * Called once per hz tick. Used for cpu utilization information. This 2131ae7a6b38SJeff Roberson * is easier than trying to scale based on stathz. 2132ae7a6b38SJeff Roberson */ 2133ae7a6b38SJeff Roberson void 2134ae7a6b38SJeff Roberson sched_tick(void) 2135ae7a6b38SJeff Roberson { 2136ae7a6b38SJeff Roberson struct td_sched *ts; 2137ae7a6b38SJeff Roberson 2138ae7a6b38SJeff Roberson ts = curthread->td_sched; 2139ae7a6b38SJeff Roberson /* Adjust ticks for pctcpu */ 2140ae7a6b38SJeff Roberson ts->ts_ticks += 1 << SCHED_TICK_SHIFT; 2141ae7a6b38SJeff Roberson ts->ts_ltick = ticks; 2142ae7a6b38SJeff Roberson /* 2143ae7a6b38SJeff Roberson * Update if we've exceeded our desired tick threshhold by over one 2144ae7a6b38SJeff Roberson * second. 2145ae7a6b38SJeff Roberson */ 2146ae7a6b38SJeff Roberson if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick) 2147ae7a6b38SJeff Roberson sched_pctcpu_update(ts); 2148ae7a6b38SJeff Roberson } 2149ae7a6b38SJeff Roberson 2150ae7a6b38SJeff Roberson /* 2151ae7a6b38SJeff Roberson * Return whether the current CPU has runnable tasks. Used for in-kernel 2152ae7a6b38SJeff Roberson * cooperative idle threads. 2153ae7a6b38SJeff Roberson */ 215435e6168fSJeff Roberson int 215535e6168fSJeff Roberson sched_runnable(void) 215635e6168fSJeff Roberson { 2157ad1e7d28SJulian Elischer struct tdq *tdq; 2158b90816f1SJeff Roberson int load; 215935e6168fSJeff Roberson 2160b90816f1SJeff Roberson load = 1; 2161b90816f1SJeff Roberson 2162ad1e7d28SJulian Elischer tdq = TDQ_SELF(); 21633f741ca1SJeff Roberson if ((curthread->td_flags & TDF_IDLETD) != 0) { 2164d2ad694cSJeff Roberson if (tdq->tdq_load > 0) 21653f741ca1SJeff Roberson goto out; 21663f741ca1SJeff Roberson } else 2167d2ad694cSJeff Roberson if (tdq->tdq_load - 1 > 0) 2168b90816f1SJeff Roberson goto out; 2169b90816f1SJeff Roberson load = 0; 2170b90816f1SJeff Roberson out: 2171b90816f1SJeff Roberson return (load); 217235e6168fSJeff Roberson } 217335e6168fSJeff Roberson 2174ae7a6b38SJeff Roberson /* 2175ae7a6b38SJeff Roberson * Choose the highest priority thread to run. The thread is removed from 2176ae7a6b38SJeff Roberson * the run-queue while running however the load remains. For SMP we set 2177ae7a6b38SJeff Roberson * the tdq in the global idle bitmask if it idles here. 2178ae7a6b38SJeff Roberson */ 21797a5e5e2aSJeff Roberson struct thread * 2180c9f25d8fSJeff Roberson sched_choose(void) 2181c9f25d8fSJeff Roberson { 2182ae7a6b38SJeff Roberson struct td_sched *ts; 2183ae7a6b38SJeff Roberson struct tdq *tdq; 2184ae7a6b38SJeff Roberson 2185ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 2186ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2187ad1e7d28SJulian Elischer ts = tdq_choose(tdq); 2188ad1e7d28SJulian Elischer if (ts) { 2189ad1e7d28SJulian Elischer tdq_runq_rem(tdq, ts); 21907a5e5e2aSJeff Roberson return (ts->ts_thread); 219135e6168fSJeff Roberson } 219262fa74d9SJeff Roberson return (PCPU_GET(idlethread)); 21937a5e5e2aSJeff Roberson } 21947a5e5e2aSJeff Roberson 2195ae7a6b38SJeff Roberson /* 2196ae7a6b38SJeff Roberson * Set owepreempt if necessary. Preemption never happens directly in ULE, 2197ae7a6b38SJeff Roberson * we always request it once we exit a critical section. 2198ae7a6b38SJeff Roberson */ 2199ae7a6b38SJeff Roberson static inline void 2200ae7a6b38SJeff Roberson sched_setpreempt(struct thread *td) 22017a5e5e2aSJeff Roberson { 22027a5e5e2aSJeff Roberson struct thread *ctd; 22037a5e5e2aSJeff Roberson int cpri; 22047a5e5e2aSJeff Roberson int pri; 22057a5e5e2aSJeff Roberson 2206ff256d9cSJeff Roberson THREAD_LOCK_ASSERT(curthread, MA_OWNED); 2207ff256d9cSJeff Roberson 22087a5e5e2aSJeff Roberson ctd = curthread; 22097a5e5e2aSJeff Roberson pri = td->td_priority; 22107a5e5e2aSJeff Roberson cpri = ctd->td_priority; 2211ff256d9cSJeff Roberson if (pri < cpri) 2212ff256d9cSJeff Roberson ctd->td_flags |= TDF_NEEDRESCHED; 22137a5e5e2aSJeff Roberson if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd)) 2214ae7a6b38SJeff Roberson return; 2215ff256d9cSJeff Roberson if (!sched_shouldpreempt(pri, cpri, 0)) 2216ae7a6b38SJeff Roberson return; 22177a5e5e2aSJeff Roberson ctd->td_owepreempt = 1; 221835e6168fSJeff Roberson } 221935e6168fSJeff Roberson 2220ae7a6b38SJeff Roberson /* 2221ae7a6b38SJeff Roberson * Add a thread to a thread queue. Initializes priority, slice, runq, and 2222ae7a6b38SJeff Roberson * add it to the appropriate queue. This is the internal function called 2223ae7a6b38SJeff Roberson * when the tdq is predetermined. 2224ae7a6b38SJeff Roberson */ 222535e6168fSJeff Roberson void 2226ae7a6b38SJeff Roberson tdq_add(struct tdq *tdq, struct thread *td, int flags) 222735e6168fSJeff Roberson { 2228ad1e7d28SJulian Elischer struct td_sched *ts; 222922bf7d9aSJeff Roberson int class; 2230c9f25d8fSJeff Roberson 2231ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 22327a5e5e2aSJeff Roberson KASSERT((td->td_inhibitors == 0), 22337a5e5e2aSJeff Roberson ("sched_add: trying to run inhibited thread")); 22347a5e5e2aSJeff Roberson KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 22357a5e5e2aSJeff Roberson ("sched_add: bad thread state")); 2236b61ce5b0SJeff Roberson KASSERT(td->td_flags & TDF_INMEM, 2237b61ce5b0SJeff Roberson ("sched_add: thread swapped out")); 2238ae7a6b38SJeff Roberson 2239ae7a6b38SJeff Roberson ts = td->td_sched; 22407a5e5e2aSJeff Roberson class = PRI_BASE(td->td_pri_class); 2241ae7a6b38SJeff Roberson TD_SET_RUNQ(td); 22427a5e5e2aSJeff Roberson if (ts->ts_slice == 0) 22437a5e5e2aSJeff Roberson ts->ts_slice = sched_slice; 22442454aaf5SJeff Roberson /* 2245ae7a6b38SJeff Roberson * Pick the run queue based on priority. 22462454aaf5SJeff Roberson */ 2247ae7a6b38SJeff Roberson if (td->td_priority <= PRI_MAX_REALTIME) 2248ae7a6b38SJeff Roberson ts->ts_runq = &tdq->tdq_realtime; 2249ae7a6b38SJeff Roberson else if (td->td_priority <= PRI_MAX_TIMESHARE) 2250ae7a6b38SJeff Roberson ts->ts_runq = &tdq->tdq_timeshare; 22517b8bfa0dSJeff Roberson else 2252ae7a6b38SJeff Roberson ts->ts_runq = &tdq->tdq_idle; 2253ae7a6b38SJeff Roberson if (td->td_priority < tdq->tdq_lowpri) 2254ae7a6b38SJeff Roberson tdq->tdq_lowpri = td->td_priority; 2255ad1e7d28SJulian Elischer tdq_runq_add(tdq, ts, flags); 2256ad1e7d28SJulian Elischer tdq_load_add(tdq, ts); 2257ae7a6b38SJeff Roberson } 2258ae7a6b38SJeff Roberson 2259ae7a6b38SJeff Roberson /* 2260ae7a6b38SJeff Roberson * Select the target thread queue and add a thread to it. Request 2261ae7a6b38SJeff Roberson * preemption or IPI a remote processor if required. 2262ae7a6b38SJeff Roberson */ 2263ae7a6b38SJeff Roberson void 2264ae7a6b38SJeff Roberson sched_add(struct thread *td, int flags) 2265ae7a6b38SJeff Roberson { 2266ae7a6b38SJeff Roberson struct td_sched *ts; 2267ae7a6b38SJeff Roberson struct tdq *tdq; 22687b8bfa0dSJeff Roberson #ifdef SMP 2269ae7a6b38SJeff Roberson int cpuid; 2270ae7a6b38SJeff Roberson int cpu; 2271ae7a6b38SJeff Roberson #endif 2272ae7a6b38SJeff Roberson CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 2273431f8906SJulian Elischer td, td->td_name, td->td_priority, curthread, 2274431f8906SJulian Elischer curthread->td_name); 2275ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2276ae7a6b38SJeff Roberson ts = td->td_sched; 2277ae7a6b38SJeff Roberson /* 2278ae7a6b38SJeff Roberson * Recalculate the priority before we select the target cpu or 2279ae7a6b38SJeff Roberson * run-queue. 2280ae7a6b38SJeff Roberson */ 2281ae7a6b38SJeff Roberson if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 2282ae7a6b38SJeff Roberson sched_priority(td); 2283ae7a6b38SJeff Roberson #ifdef SMP 2284ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 2285ae7a6b38SJeff Roberson /* 2286ae7a6b38SJeff Roberson * Pick the destination cpu and if it isn't ours transfer to the 2287ae7a6b38SJeff Roberson * target cpu. 2288ae7a6b38SJeff Roberson */ 2289ae7a6b38SJeff Roberson cpu = sched_pickcpu(ts, flags); 2290ae7a6b38SJeff Roberson tdq = sched_setcpu(ts, cpu, flags); 2291ae7a6b38SJeff Roberson tdq_add(tdq, td, flags); 2292ae7a6b38SJeff Roberson if (cpu != cpuid) { 2293ff256d9cSJeff Roberson tdq_notify(tdq, ts); 22947b8bfa0dSJeff Roberson return; 22957b8bfa0dSJeff Roberson } 2296ae7a6b38SJeff Roberson #else 2297ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 2298ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 2299ae7a6b38SJeff Roberson /* 2300ae7a6b38SJeff Roberson * Now that the thread is moving to the run-queue, set the lock 2301ae7a6b38SJeff Roberson * to the scheduler's lock. 2302ae7a6b38SJeff Roberson */ 2303ae7a6b38SJeff Roberson thread_lock_set(td, TDQ_LOCKPTR(tdq)); 2304ae7a6b38SJeff Roberson tdq_add(tdq, td, flags); 23057b8bfa0dSJeff Roberson #endif 2306ae7a6b38SJeff Roberson if (!(flags & SRQ_YIELDING)) 2307ae7a6b38SJeff Roberson sched_setpreempt(td); 230835e6168fSJeff Roberson } 230935e6168fSJeff Roberson 2310ae7a6b38SJeff Roberson /* 2311ae7a6b38SJeff Roberson * Remove a thread from a run-queue without running it. This is used 2312ae7a6b38SJeff Roberson * when we're stealing a thread from a remote queue. Otherwise all threads 2313ae7a6b38SJeff Roberson * exit by calling sched_exit_thread() and sched_throw() themselves. 2314ae7a6b38SJeff Roberson */ 231535e6168fSJeff Roberson void 23167cf90fb3SJeff Roberson sched_rem(struct thread *td) 231735e6168fSJeff Roberson { 2318ad1e7d28SJulian Elischer struct tdq *tdq; 2319ad1e7d28SJulian Elischer struct td_sched *ts; 23207cf90fb3SJeff Roberson 232181d47d3fSJeff Roberson CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 2322431f8906SJulian Elischer td, td->td_name, td->td_priority, curthread, 2323431f8906SJulian Elischer curthread->td_name); 2324ad1e7d28SJulian Elischer ts = td->td_sched; 2325ae7a6b38SJeff Roberson tdq = TDQ_CPU(ts->ts_cpu); 2326ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2327ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 23287a5e5e2aSJeff Roberson KASSERT(TD_ON_RUNQ(td), 2329ad1e7d28SJulian Elischer ("sched_rem: thread not on run queue")); 2330ad1e7d28SJulian Elischer tdq_runq_rem(tdq, ts); 2331ad1e7d28SJulian Elischer tdq_load_rem(tdq, ts); 23327a5e5e2aSJeff Roberson TD_SET_CAN_RUN(td); 233362fa74d9SJeff Roberson if (td->td_priority == tdq->tdq_lowpri) 233462fa74d9SJeff Roberson tdq_setlowpri(tdq, NULL); 233535e6168fSJeff Roberson } 233635e6168fSJeff Roberson 2337ae7a6b38SJeff Roberson /* 2338ae7a6b38SJeff Roberson * Fetch cpu utilization information. Updates on demand. 2339ae7a6b38SJeff Roberson */ 234035e6168fSJeff Roberson fixpt_t 23417cf90fb3SJeff Roberson sched_pctcpu(struct thread *td) 234235e6168fSJeff Roberson { 234335e6168fSJeff Roberson fixpt_t pctcpu; 2344ad1e7d28SJulian Elischer struct td_sched *ts; 234535e6168fSJeff Roberson 234635e6168fSJeff Roberson pctcpu = 0; 2347ad1e7d28SJulian Elischer ts = td->td_sched; 2348ad1e7d28SJulian Elischer if (ts == NULL) 2349484288deSJeff Roberson return (0); 235035e6168fSJeff Roberson 23517b20fb19SJeff Roberson thread_lock(td); 2352ad1e7d28SJulian Elischer if (ts->ts_ticks) { 235335e6168fSJeff Roberson int rtick; 235435e6168fSJeff Roberson 2355ad1e7d28SJulian Elischer sched_pctcpu_update(ts); 235635e6168fSJeff Roberson /* How many rtick per second ? */ 2357e7d50326SJeff Roberson rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz); 2358e7d50326SJeff Roberson pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT; 235935e6168fSJeff Roberson } 23607b20fb19SJeff Roberson thread_unlock(td); 236135e6168fSJeff Roberson 236235e6168fSJeff Roberson return (pctcpu); 236335e6168fSJeff Roberson } 236435e6168fSJeff Roberson 236562fa74d9SJeff Roberson /* 236662fa74d9SJeff Roberson * Enforce affinity settings for a thread. Called after adjustments to 236762fa74d9SJeff Roberson * cpumask. 236862fa74d9SJeff Roberson */ 2369885d51a3SJeff Roberson void 2370885d51a3SJeff Roberson sched_affinity(struct thread *td) 2371885d51a3SJeff Roberson { 237262fa74d9SJeff Roberson #ifdef SMP 237362fa74d9SJeff Roberson struct td_sched *ts; 237462fa74d9SJeff Roberson int cpu; 237562fa74d9SJeff Roberson 237662fa74d9SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 237762fa74d9SJeff Roberson ts = td->td_sched; 237862fa74d9SJeff Roberson if (THREAD_CAN_SCHED(td, ts->ts_cpu)) 237962fa74d9SJeff Roberson return; 238062fa74d9SJeff Roberson if (!TD_IS_RUNNING(td)) 238162fa74d9SJeff Roberson return; 238262fa74d9SJeff Roberson td->td_flags |= TDF_NEEDRESCHED; 238362fa74d9SJeff Roberson if (!THREAD_CAN_MIGRATE(td)) 238462fa74d9SJeff Roberson return; 238562fa74d9SJeff Roberson /* 238662fa74d9SJeff Roberson * Assign the new cpu and force a switch before returning to 238762fa74d9SJeff Roberson * userspace. If the target thread is not running locally send 238862fa74d9SJeff Roberson * an ipi to force the issue. 238962fa74d9SJeff Roberson */ 239062fa74d9SJeff Roberson cpu = ts->ts_cpu; 239162fa74d9SJeff Roberson ts->ts_cpu = sched_pickcpu(ts, 0); 239262fa74d9SJeff Roberson if (cpu != PCPU_GET(cpuid)) 239362fa74d9SJeff Roberson ipi_selected(1 << cpu, IPI_PREEMPT); 239462fa74d9SJeff Roberson #endif 2395885d51a3SJeff Roberson } 2396885d51a3SJeff Roberson 2397ae7a6b38SJeff Roberson /* 2398ae7a6b38SJeff Roberson * Bind a thread to a target cpu. 2399ae7a6b38SJeff Roberson */ 24009bacd788SJeff Roberson void 24019bacd788SJeff Roberson sched_bind(struct thread *td, int cpu) 24029bacd788SJeff Roberson { 2403ad1e7d28SJulian Elischer struct td_sched *ts; 24049bacd788SJeff Roberson 2405c47f202bSJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); 2406ad1e7d28SJulian Elischer ts = td->td_sched; 24076b2f763fSJeff Roberson if (ts->ts_flags & TSF_BOUND) 2408c95d2db2SJeff Roberson sched_unbind(td); 2409ad1e7d28SJulian Elischer ts->ts_flags |= TSF_BOUND; 24106b2f763fSJeff Roberson sched_pin(); 241180f86c9fSJeff Roberson if (PCPU_GET(cpuid) == cpu) 24129bacd788SJeff Roberson return; 24136b2f763fSJeff Roberson ts->ts_cpu = cpu; 24149bacd788SJeff Roberson /* When we return from mi_switch we'll be on the correct cpu. */ 2415279f949eSPoul-Henning Kamp mi_switch(SW_VOL, NULL); 24169bacd788SJeff Roberson } 24179bacd788SJeff Roberson 2418ae7a6b38SJeff Roberson /* 2419ae7a6b38SJeff Roberson * Release a bound thread. 2420ae7a6b38SJeff Roberson */ 24219bacd788SJeff Roberson void 24229bacd788SJeff Roberson sched_unbind(struct thread *td) 24239bacd788SJeff Roberson { 2424e7d50326SJeff Roberson struct td_sched *ts; 2425e7d50326SJeff Roberson 24267b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2427e7d50326SJeff Roberson ts = td->td_sched; 24286b2f763fSJeff Roberson if ((ts->ts_flags & TSF_BOUND) == 0) 24296b2f763fSJeff Roberson return; 2430e7d50326SJeff Roberson ts->ts_flags &= ~TSF_BOUND; 2431e7d50326SJeff Roberson sched_unpin(); 24329bacd788SJeff Roberson } 24339bacd788SJeff Roberson 243435e6168fSJeff Roberson int 2435ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td) 2436ebccf1e3SJoseph Koshy { 24377b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2438ad1e7d28SJulian Elischer return (td->td_sched->ts_flags & TSF_BOUND); 2439ebccf1e3SJoseph Koshy } 2440ebccf1e3SJoseph Koshy 2441ae7a6b38SJeff Roberson /* 2442ae7a6b38SJeff Roberson * Basic yield call. 2443ae7a6b38SJeff Roberson */ 244436ec198bSDavid Xu void 244536ec198bSDavid Xu sched_relinquish(struct thread *td) 244636ec198bSDavid Xu { 24477b20fb19SJeff Roberson thread_lock(td); 24487b20fb19SJeff Roberson SCHED_STAT_INC(switch_relinquish); 244936ec198bSDavid Xu mi_switch(SW_VOL, NULL); 24507b20fb19SJeff Roberson thread_unlock(td); 245136ec198bSDavid Xu } 245236ec198bSDavid Xu 2453ae7a6b38SJeff Roberson /* 2454ae7a6b38SJeff Roberson * Return the total system load. 2455ae7a6b38SJeff Roberson */ 2456ebccf1e3SJoseph Koshy int 245733916c36SJeff Roberson sched_load(void) 245833916c36SJeff Roberson { 245933916c36SJeff Roberson #ifdef SMP 246033916c36SJeff Roberson int total; 246133916c36SJeff Roberson int i; 246233916c36SJeff Roberson 246333916c36SJeff Roberson total = 0; 246462fa74d9SJeff Roberson for (i = 0; i <= mp_maxid; i++) 246562fa74d9SJeff Roberson total += TDQ_CPU(i)->tdq_sysload; 246633916c36SJeff Roberson return (total); 246733916c36SJeff Roberson #else 2468d2ad694cSJeff Roberson return (TDQ_SELF()->tdq_sysload); 246933916c36SJeff Roberson #endif 247033916c36SJeff Roberson } 247133916c36SJeff Roberson 247233916c36SJeff Roberson int 247335e6168fSJeff Roberson sched_sizeof_proc(void) 247435e6168fSJeff Roberson { 247535e6168fSJeff Roberson return (sizeof(struct proc)); 247635e6168fSJeff Roberson } 247735e6168fSJeff Roberson 247835e6168fSJeff Roberson int 247935e6168fSJeff Roberson sched_sizeof_thread(void) 248035e6168fSJeff Roberson { 248135e6168fSJeff Roberson return (sizeof(struct thread) + sizeof(struct td_sched)); 248235e6168fSJeff Roberson } 2483b41f1452SDavid Xu 24847a5e5e2aSJeff Roberson /* 24857a5e5e2aSJeff Roberson * The actual idle process. 24867a5e5e2aSJeff Roberson */ 24877a5e5e2aSJeff Roberson void 24887a5e5e2aSJeff Roberson sched_idletd(void *dummy) 24897a5e5e2aSJeff Roberson { 24907a5e5e2aSJeff Roberson struct thread *td; 2491ae7a6b38SJeff Roberson struct tdq *tdq; 24927a5e5e2aSJeff Roberson 24937a5e5e2aSJeff Roberson td = curthread; 2494ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 24957a5e5e2aSJeff Roberson mtx_assert(&Giant, MA_NOTOWNED); 2496ae7a6b38SJeff Roberson /* ULE relies on preemption for idle interruption. */ 2497ae7a6b38SJeff Roberson for (;;) { 2498ae7a6b38SJeff Roberson #ifdef SMP 2499ae7a6b38SJeff Roberson if (tdq_idled(tdq)) 25007a5e5e2aSJeff Roberson cpu_idle(); 2501ae7a6b38SJeff Roberson #else 2502ae7a6b38SJeff Roberson cpu_idle(); 2503ae7a6b38SJeff Roberson #endif 2504ae7a6b38SJeff Roberson } 2505b41f1452SDavid Xu } 2506e7d50326SJeff Roberson 25077b20fb19SJeff Roberson /* 25087b20fb19SJeff Roberson * A CPU is entering for the first time or a thread is exiting. 25097b20fb19SJeff Roberson */ 25107b20fb19SJeff Roberson void 25117b20fb19SJeff Roberson sched_throw(struct thread *td) 25127b20fb19SJeff Roberson { 251359c68134SJeff Roberson struct thread *newtd; 2514ae7a6b38SJeff Roberson struct tdq *tdq; 2515ae7a6b38SJeff Roberson 2516ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 25177b20fb19SJeff Roberson if (td == NULL) { 2518ae7a6b38SJeff Roberson /* Correct spinlock nesting and acquire the correct lock. */ 2519ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 25207b20fb19SJeff Roberson spinlock_exit(); 25217b20fb19SJeff Roberson } else { 2522ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2523ae7a6b38SJeff Roberson tdq_load_rem(tdq, td->td_sched); 2524eea4f254SJeff Roberson lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object); 25257b20fb19SJeff Roberson } 25267b20fb19SJeff Roberson KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 252759c68134SJeff Roberson newtd = choosethread(); 252859c68134SJeff Roberson TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 25297b20fb19SJeff Roberson PCPU_SET(switchtime, cpu_ticks()); 25307b20fb19SJeff Roberson PCPU_SET(switchticks, ticks); 253159c68134SJeff Roberson cpu_throw(td, newtd); /* doesn't return */ 25327b20fb19SJeff Roberson } 25337b20fb19SJeff Roberson 2534ae7a6b38SJeff Roberson /* 2535ae7a6b38SJeff Roberson * This is called from fork_exit(). Just acquire the correct locks and 2536ae7a6b38SJeff Roberson * let fork do the rest of the work. 2537ae7a6b38SJeff Roberson */ 25387b20fb19SJeff Roberson void 2539fe54587fSJeff Roberson sched_fork_exit(struct thread *td) 25407b20fb19SJeff Roberson { 2541ae7a6b38SJeff Roberson struct td_sched *ts; 2542ae7a6b38SJeff Roberson struct tdq *tdq; 2543ae7a6b38SJeff Roberson int cpuid; 25447b20fb19SJeff Roberson 25457b20fb19SJeff Roberson /* 25467b20fb19SJeff Roberson * Finish setting up thread glue so that it begins execution in a 2547ae7a6b38SJeff Roberson * non-nested critical section with the scheduler lock held. 25487b20fb19SJeff Roberson */ 2549ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 2550ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpuid); 2551ae7a6b38SJeff Roberson ts = td->td_sched; 2552ae7a6b38SJeff Roberson if (TD_IS_IDLETHREAD(td)) 2553ae7a6b38SJeff Roberson td->td_lock = TDQ_LOCKPTR(tdq); 2554ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2555ae7a6b38SJeff Roberson td->td_oncpu = cpuid; 255659c68134SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 2557eea4f254SJeff Roberson lock_profile_obtain_lock_success( 2558eea4f254SJeff Roberson &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__); 255962fa74d9SJeff Roberson tdq->tdq_lowpri = td->td_priority; 25607b20fb19SJeff Roberson } 25617b20fb19SJeff Roberson 2562ae7a6b38SJeff Roberson static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, 2563ae7a6b38SJeff Roberson "Scheduler"); 2564ae7a6b38SJeff Roberson SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0, 2565e7d50326SJeff Roberson "Scheduler name"); 2566ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, 2567ae7a6b38SJeff Roberson "Slice size for timeshare threads"); 2568ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, 2569ae7a6b38SJeff Roberson "Interactivity score threshold"); 2570ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh, 2571ae7a6b38SJeff Roberson 0,"Min priority for preemption, lower priorities have greater precedence"); 25727b8bfa0dSJeff Roberson #ifdef SMP 2573ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0, 2574ae7a6b38SJeff Roberson "Number of hz ticks to keep thread affinity for"); 2575ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0, 2576ae7a6b38SJeff Roberson "Enables the long-term load balancer"); 25777fcf154aSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW, 25787fcf154aSJeff Roberson &balance_interval, 0, 25797fcf154aSJeff Roberson "Average frequency in stathz ticks to run the long-term balancer"); 2580ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0, 2581ae7a6b38SJeff Roberson "Steals work from another hyper-threaded core on idle"); 2582ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0, 2583ae7a6b38SJeff Roberson "Attempts to steal work from other cores before idling"); 258428994a58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0, 258528994a58SJeff Roberson "Minimum load on remote cpu before we'll steal"); 25867b8bfa0dSJeff Roberson #endif 2587e7d50326SJeff Roberson 258854b0e65fSJeff Roberson /* ps compat. All cpu percentages from ULE are weighted. */ 2589a5423ea3SJeff Roberson static int ccpu = 0; 2590e7d50326SJeff Roberson SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 2591e7d50326SJeff Roberson 2592e7d50326SJeff Roberson 2593ed062c8dSJulian Elischer #define KERN_SWITCH_INCLUDE 1 2594ed062c8dSJulian Elischer #include "kern/kern_switch.c" 2595