135e6168fSJeff Roberson /*- 2e7d50326SJeff Roberson * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org> 335e6168fSJeff Roberson * All rights reserved. 435e6168fSJeff Roberson * 535e6168fSJeff Roberson * Redistribution and use in source and binary forms, with or without 635e6168fSJeff Roberson * modification, are permitted provided that the following conditions 735e6168fSJeff Roberson * are met: 835e6168fSJeff Roberson * 1. Redistributions of source code must retain the above copyright 935e6168fSJeff Roberson * notice unmodified, this list of conditions, and the following 1035e6168fSJeff Roberson * disclaimer. 1135e6168fSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 1235e6168fSJeff Roberson * notice, this list of conditions and the following disclaimer in the 1335e6168fSJeff Roberson * documentation and/or other materials provided with the distribution. 1435e6168fSJeff Roberson * 1535e6168fSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1635e6168fSJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1735e6168fSJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1835e6168fSJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1935e6168fSJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2035e6168fSJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2135e6168fSJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2235e6168fSJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2335e6168fSJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2435e6168fSJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2535e6168fSJeff Roberson */ 2635e6168fSJeff Roberson 27ae7a6b38SJeff Roberson /* 28ae7a6b38SJeff Roberson * This file implements the ULE scheduler. ULE supports independent CPU 29ae7a6b38SJeff Roberson * run queues and fine grain locking. It has superior interactive 30ae7a6b38SJeff Roberson * performance under load even on uni-processor systems. 31ae7a6b38SJeff Roberson * 32ae7a6b38SJeff Roberson * etymology: 33a5423ea3SJeff Roberson * ULE is the last three letters in schedule. It owes its name to a 34ae7a6b38SJeff Roberson * generic user created for a scheduling system by Paul Mikesell at 35ae7a6b38SJeff Roberson * Isilon Systems and a general lack of creativity on the part of the author. 36ae7a6b38SJeff Roberson */ 37ae7a6b38SJeff Roberson 38677b542eSDavid E. O'Brien #include <sys/cdefs.h> 39677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 40677b542eSDavid E. O'Brien 414da0d332SPeter Wemm #include "opt_hwpmc_hooks.h" 424da0d332SPeter Wemm #include "opt_sched.h" 439923b511SScott Long 4435e6168fSJeff Roberson #include <sys/param.h> 4535e6168fSJeff Roberson #include <sys/systm.h> 462c3490b1SMarcel Moolenaar #include <sys/kdb.h> 4735e6168fSJeff Roberson #include <sys/kernel.h> 4835e6168fSJeff Roberson #include <sys/ktr.h> 4935e6168fSJeff Roberson #include <sys/lock.h> 5035e6168fSJeff Roberson #include <sys/mutex.h> 5135e6168fSJeff Roberson #include <sys/proc.h> 52245f3abfSJeff Roberson #include <sys/resource.h> 539bacd788SJeff Roberson #include <sys/resourcevar.h> 5435e6168fSJeff Roberson #include <sys/sched.h> 5535e6168fSJeff Roberson #include <sys/smp.h> 5635e6168fSJeff Roberson #include <sys/sx.h> 5735e6168fSJeff Roberson #include <sys/sysctl.h> 5835e6168fSJeff Roberson #include <sys/sysproto.h> 59f5c157d9SJohn Baldwin #include <sys/turnstile.h> 603db720fdSDavid Xu #include <sys/umtx.h> 6135e6168fSJeff Roberson #include <sys/vmmeter.h> 6262fa74d9SJeff Roberson #include <sys/cpuset.h> 6335e6168fSJeff Roberson #ifdef KTRACE 6435e6168fSJeff Roberson #include <sys/uio.h> 6535e6168fSJeff Roberson #include <sys/ktrace.h> 6635e6168fSJeff Roberson #endif 6735e6168fSJeff Roberson 68ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 69ebccf1e3SJoseph Koshy #include <sys/pmckern.h> 70ebccf1e3SJoseph Koshy #endif 71ebccf1e3SJoseph Koshy 7235e6168fSJeff Roberson #include <machine/cpu.h> 7322bf7d9aSJeff Roberson #include <machine/smp.h> 7435e6168fSJeff Roberson 75cbdd62adSPeter Grehan #if !defined(__i386__) && !defined(__amd64__) && !defined(__powerpc__) && !defined(__arm__) 7602e2d6b4SJeff Roberson #error "This architecture is not currently compatible with ULE" 777a5e5e2aSJeff Roberson #endif 787a5e5e2aSJeff Roberson 79ae7a6b38SJeff Roberson #define KTR_ULE 0 8014618990SJeff Roberson 816b2f763fSJeff Roberson /* 82ae7a6b38SJeff Roberson * Thread scheduler specific section. All fields are protected 83ae7a6b38SJeff Roberson * by the thread lock. 84ed062c8dSJulian Elischer */ 85ad1e7d28SJulian Elischer struct td_sched { 86ae7a6b38SJeff Roberson TAILQ_ENTRY(td_sched) ts_procq; /* Run queue. */ 87ae7a6b38SJeff Roberson struct thread *ts_thread; /* Active associated thread. */ 88ae7a6b38SJeff Roberson struct runq *ts_runq; /* Run-queue we're queued on. */ 89ae7a6b38SJeff Roberson short ts_flags; /* TSF_* flags. */ 90ae7a6b38SJeff Roberson u_char ts_rqindex; /* Run queue index. */ 91ad1e7d28SJulian Elischer u_char ts_cpu; /* CPU that we have affinity for. */ 92ae7a6b38SJeff Roberson int ts_slice; /* Ticks of slice remaining. */ 93ae7a6b38SJeff Roberson u_int ts_slptime; /* Number of ticks we vol. slept */ 94ae7a6b38SJeff Roberson u_int ts_runtime; /* Number of ticks we were running */ 95ed062c8dSJulian Elischer /* The following variables are only used for pctcpu calculation */ 96ad1e7d28SJulian Elischer int ts_ltick; /* Last tick that we were running on */ 97ad1e7d28SJulian Elischer int ts_ftick; /* First tick that we were running on */ 98ad1e7d28SJulian Elischer int ts_ticks; /* Tick count */ 997b8bfa0dSJeff Roberson int ts_rltick; /* Real last tick, for affinity. */ 100ed062c8dSJulian Elischer }; 101ad1e7d28SJulian Elischer /* flags kept in ts_flags */ 1027b8bfa0dSJeff Roberson #define TSF_BOUND 0x0001 /* Thread can not migrate. */ 1037b8bfa0dSJeff Roberson #define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */ 10435e6168fSJeff Roberson 105ad1e7d28SJulian Elischer static struct td_sched td_sched0; 10635e6168fSJeff Roberson 10762fa74d9SJeff Roberson #define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0) 10862fa74d9SJeff Roberson #define THREAD_CAN_SCHED(td, cpu) \ 10962fa74d9SJeff Roberson CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask) 11062fa74d9SJeff Roberson 11135e6168fSJeff Roberson /* 112e7d50326SJeff Roberson * Cpu percentage computation macros and defines. 113e1f89c22SJeff Roberson * 114e7d50326SJeff Roberson * SCHED_TICK_SECS: Number of seconds to average the cpu usage across. 115e7d50326SJeff Roberson * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across. 1168ab80cf0SJeff Roberson * SCHED_TICK_MAX: Maximum number of ticks before scaling back. 117e7d50326SJeff Roberson * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results. 118e7d50326SJeff Roberson * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count. 119e7d50326SJeff Roberson * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks. 12035e6168fSJeff Roberson */ 121e7d50326SJeff Roberson #define SCHED_TICK_SECS 10 122e7d50326SJeff Roberson #define SCHED_TICK_TARG (hz * SCHED_TICK_SECS) 1238ab80cf0SJeff Roberson #define SCHED_TICK_MAX (SCHED_TICK_TARG + hz) 124e7d50326SJeff Roberson #define SCHED_TICK_SHIFT 10 125e7d50326SJeff Roberson #define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT) 126eddb4efaSJeff Roberson #define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz)) 12735e6168fSJeff Roberson 12835e6168fSJeff Roberson /* 129e7d50326SJeff Roberson * These macros determine priorities for non-interactive threads. They are 130e7d50326SJeff Roberson * assigned a priority based on their recent cpu utilization as expressed 131e7d50326SJeff Roberson * by the ratio of ticks to the tick total. NHALF priorities at the start 132e7d50326SJeff Roberson * and end of the MIN to MAX timeshare range are only reachable with negative 133e7d50326SJeff Roberson * or positive nice respectively. 134e7d50326SJeff Roberson * 135e7d50326SJeff Roberson * PRI_RANGE: Priority range for utilization dependent priorities. 136e7d50326SJeff Roberson * PRI_NRESV: Number of nice values. 137e7d50326SJeff Roberson * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total. 138e7d50326SJeff Roberson * PRI_NICE: Determines the part of the priority inherited from nice. 139e7d50326SJeff Roberson */ 140e7d50326SJeff Roberson #define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN) 141e7d50326SJeff Roberson #define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 142e7d50326SJeff Roberson #define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF) 143e7d50326SJeff Roberson #define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF) 144dda713dfSJeff Roberson #define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN) 145e7d50326SJeff Roberson #define SCHED_PRI_TICKS(ts) \ 146e7d50326SJeff Roberson (SCHED_TICK_HZ((ts)) / \ 1471e516cf5SJeff Roberson (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE)) 148e7d50326SJeff Roberson #define SCHED_PRI_NICE(nice) (nice) 149e7d50326SJeff Roberson 150e7d50326SJeff Roberson /* 151e7d50326SJeff Roberson * These determine the interactivity of a process. Interactivity differs from 152e7d50326SJeff Roberson * cpu utilization in that it expresses the voluntary time slept vs time ran 153e7d50326SJeff Roberson * while cpu utilization includes all time not running. This more accurately 154e7d50326SJeff Roberson * models the intent of the thread. 15535e6168fSJeff Roberson * 156407b0157SJeff Roberson * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 157407b0157SJeff Roberson * before throttling back. 158d322132cSJeff Roberson * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 159210491d3SJeff Roberson * INTERACT_MAX: Maximum interactivity value. Smaller is better. 160e1f89c22SJeff Roberson * INTERACT_THRESH: Threshhold for placement on the current runq. 16135e6168fSJeff Roberson */ 162e7d50326SJeff Roberson #define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT) 163e7d50326SJeff Roberson #define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT) 164210491d3SJeff Roberson #define SCHED_INTERACT_MAX (100) 165210491d3SJeff Roberson #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 1664c9612c6SJeff Roberson #define SCHED_INTERACT_THRESH (30) 167e1f89c22SJeff Roberson 16835e6168fSJeff Roberson /* 169e7d50326SJeff Roberson * tickincr: Converts a stathz tick into a hz domain scaled by 170e7d50326SJeff Roberson * the shift factor. Without the shift the error rate 171e7d50326SJeff Roberson * due to rounding would be unacceptably high. 172e7d50326SJeff Roberson * realstathz: stathz is sometimes 0 and run off of hz. 173e7d50326SJeff Roberson * sched_slice: Runtime of each thread before rescheduling. 174ae7a6b38SJeff Roberson * preempt_thresh: Priority threshold for preemption and remote IPIs. 17535e6168fSJeff Roberson */ 176e7d50326SJeff Roberson static int sched_interact = SCHED_INTERACT_THRESH; 177e7d50326SJeff Roberson static int realstathz; 178e7d50326SJeff Roberson static int tickincr; 179e7d50326SJeff Roberson static int sched_slice; 18002e2d6b4SJeff Roberson #ifdef PREEMPTION 18102e2d6b4SJeff Roberson #ifdef FULL_PREEMPTION 18202e2d6b4SJeff Roberson static int preempt_thresh = PRI_MAX_IDLE; 18302e2d6b4SJeff Roberson #else 184ae7a6b38SJeff Roberson static int preempt_thresh = PRI_MIN_KERN; 18502e2d6b4SJeff Roberson #endif 18602e2d6b4SJeff Roberson #else 18702e2d6b4SJeff Roberson static int preempt_thresh = 0; 18802e2d6b4SJeff Roberson #endif 18962fa74d9SJeff Roberson static int lowpri_userret = 1; 190ae7a6b38SJeff Roberson 19135e6168fSJeff Roberson /* 192ae7a6b38SJeff Roberson * tdq - per processor runqs and statistics. All fields are protected by the 193ae7a6b38SJeff Roberson * tdq_lock. The load and lowpri may be accessed without to avoid excess 194ae7a6b38SJeff Roberson * locking in sched_pickcpu(); 19535e6168fSJeff Roberson */ 196ad1e7d28SJulian Elischer struct tdq { 19762fa74d9SJeff Roberson struct cpu_group *tdq_cg; /* Pointer to cpu topology. */ 19862fa74d9SJeff Roberson struct mtx tdq_lock; /* run queue lock. */ 199e7d50326SJeff Roberson struct runq tdq_realtime; /* real-time run queue. */ 200ae7a6b38SJeff Roberson struct runq tdq_timeshare; /* timeshare run queue. */ 201ae7a6b38SJeff Roberson struct runq tdq_idle; /* Queue of IDLE threads. */ 202ae7a6b38SJeff Roberson int tdq_load; /* Aggregate load. */ 20362fa74d9SJeff Roberson int tdq_sysload; /* For loadavg, !ITHD load. */ 204ed0e8f2fSJeff Roberson u_char tdq_idx; /* Current insert index. */ 205ed0e8f2fSJeff Roberson u_char tdq_ridx; /* Current removal index. */ 206ae7a6b38SJeff Roberson u_char tdq_lowpri; /* Lowest priority thread. */ 207ae7a6b38SJeff Roberson int tdq_transferable; /* Transferable thread count. */ 20862fa74d9SJeff Roberson char tdq_name[sizeof("sched lock") + 6]; 209ae7a6b38SJeff Roberson } __aligned(64); 21035e6168fSJeff Roberson 2117b8bfa0dSJeff Roberson 21280f86c9fSJeff Roberson #ifdef SMP 21362fa74d9SJeff Roberson struct cpu_group *cpu_top; 2147b8bfa0dSJeff Roberson 21562fa74d9SJeff Roberson #define SCHED_AFFINITY_DEFAULT (max(1, hz / 1000)) 21662fa74d9SJeff Roberson #define SCHED_AFFINITY(ts, t) ((ts)->ts_rltick > ticks - ((t) * affinity)) 2177b8bfa0dSJeff Roberson 2187b8bfa0dSJeff Roberson /* 2197b8bfa0dSJeff Roberson * Run-time tunables. 2207b8bfa0dSJeff Roberson */ 22128994a58SJeff Roberson static int rebalance = 1; 2227fcf154aSJeff Roberson static int balance_interval = 128; /* Default set in sched_initticks(). */ 22328994a58SJeff Roberson static int pick_pri = 1; 2247b8bfa0dSJeff Roberson static int affinity; 2257b8bfa0dSJeff Roberson static int tryself = 1; 22662fa74d9SJeff Roberson static int oldtryself = 0; 2277fcf154aSJeff Roberson static int steal_htt = 1; 22828994a58SJeff Roberson static int steal_idle = 1; 22928994a58SJeff Roberson static int steal_thresh = 2; 23080f86c9fSJeff Roberson 23135e6168fSJeff Roberson /* 232d2ad694cSJeff Roberson * One thread queue per processor. 23335e6168fSJeff Roberson */ 234ad1e7d28SJulian Elischer static struct tdq tdq_cpu[MAXCPU]; 2357fcf154aSJeff Roberson static struct tdq *balance_tdq; 2367fcf154aSJeff Roberson static int balance_ticks; 237dc03363dSJeff Roberson 238ad1e7d28SJulian Elischer #define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)]) 239ad1e7d28SJulian Elischer #define TDQ_CPU(x) (&tdq_cpu[(x)]) 240c47f202bSJeff Roberson #define TDQ_ID(x) ((int)((x) - tdq_cpu)) 24180f86c9fSJeff Roberson #else /* !SMP */ 242ad1e7d28SJulian Elischer static struct tdq tdq_cpu; 243dc03363dSJeff Roberson 24436b36916SJeff Roberson #define TDQ_ID(x) (0) 245ad1e7d28SJulian Elischer #define TDQ_SELF() (&tdq_cpu) 246ad1e7d28SJulian Elischer #define TDQ_CPU(x) (&tdq_cpu) 2470a016a05SJeff Roberson #endif 24835e6168fSJeff Roberson 249ae7a6b38SJeff Roberson #define TDQ_LOCK_ASSERT(t, type) mtx_assert(TDQ_LOCKPTR((t)), (type)) 250ae7a6b38SJeff Roberson #define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t))) 251ae7a6b38SJeff Roberson #define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f)) 252ae7a6b38SJeff Roberson #define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t))) 25362fa74d9SJeff Roberson #define TDQ_LOCKPTR(t) (&(t)->tdq_lock) 254ae7a6b38SJeff Roberson 2558460a577SJohn Birrell static void sched_priority(struct thread *); 25621381d1bSJeff Roberson static void sched_thread_priority(struct thread *, u_char); 2578460a577SJohn Birrell static int sched_interact_score(struct thread *); 2588460a577SJohn Birrell static void sched_interact_update(struct thread *); 2598460a577SJohn Birrell static void sched_interact_fork(struct thread *); 260ad1e7d28SJulian Elischer static void sched_pctcpu_update(struct td_sched *); 26135e6168fSJeff Roberson 2625d7ef00cSJeff Roberson /* Operations on per processor queues */ 263ad1e7d28SJulian Elischer static struct td_sched * tdq_choose(struct tdq *); 264ad1e7d28SJulian Elischer static void tdq_setup(struct tdq *); 265ad1e7d28SJulian Elischer static void tdq_load_add(struct tdq *, struct td_sched *); 266ad1e7d28SJulian Elischer static void tdq_load_rem(struct tdq *, struct td_sched *); 267ad1e7d28SJulian Elischer static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int); 268ad1e7d28SJulian Elischer static __inline void tdq_runq_rem(struct tdq *, struct td_sched *); 269ad1e7d28SJulian Elischer void tdq_print(int cpu); 270e7d50326SJeff Roberson static void runq_print(struct runq *rq); 271ae7a6b38SJeff Roberson static void tdq_add(struct tdq *, struct thread *, int); 2725d7ef00cSJeff Roberson #ifdef SMP 27362fa74d9SJeff Roberson static int tdq_move(struct tdq *, struct tdq *); 274ad1e7d28SJulian Elischer static int tdq_idled(struct tdq *); 2757b8bfa0dSJeff Roberson static void tdq_notify(struct td_sched *); 27662fa74d9SJeff Roberson static struct td_sched *tdq_steal(struct tdq *, int); 27762fa74d9SJeff Roberson static struct td_sched *runq_steal(struct runq *, int); 278ae7a6b38SJeff Roberson static int sched_pickcpu(struct td_sched *, int); 2797fcf154aSJeff Roberson static void sched_balance(void); 28062fa74d9SJeff Roberson static int sched_balance_pair(struct tdq *, struct tdq *); 281ae7a6b38SJeff Roberson static inline struct tdq *sched_setcpu(struct td_sched *, int, int); 282ae7a6b38SJeff Roberson static inline struct mtx *thread_block_switch(struct thread *); 283ae7a6b38SJeff Roberson static inline void thread_unblock_switch(struct thread *, struct mtx *); 284c47f202bSJeff Roberson static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int); 2855d7ef00cSJeff Roberson #endif 2865d7ef00cSJeff Roberson 287e7d50326SJeff Roberson static void sched_setup(void *dummy); 288e7d50326SJeff Roberson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 289e7d50326SJeff Roberson 290e7d50326SJeff Roberson static void sched_initticks(void *dummy); 291e7d50326SJeff Roberson SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL) 292e7d50326SJeff Roberson 293ae7a6b38SJeff Roberson /* 294ae7a6b38SJeff Roberson * Print the threads waiting on a run-queue. 295ae7a6b38SJeff Roberson */ 296e7d50326SJeff Roberson static void 297e7d50326SJeff Roberson runq_print(struct runq *rq) 298e7d50326SJeff Roberson { 299e7d50326SJeff Roberson struct rqhead *rqh; 300e7d50326SJeff Roberson struct td_sched *ts; 301e7d50326SJeff Roberson int pri; 302e7d50326SJeff Roberson int j; 303e7d50326SJeff Roberson int i; 304e7d50326SJeff Roberson 305e7d50326SJeff Roberson for (i = 0; i < RQB_LEN; i++) { 306e7d50326SJeff Roberson printf("\t\trunq bits %d 0x%zx\n", 307e7d50326SJeff Roberson i, rq->rq_status.rqb_bits[i]); 308e7d50326SJeff Roberson for (j = 0; j < RQB_BPW; j++) 309e7d50326SJeff Roberson if (rq->rq_status.rqb_bits[i] & (1ul << j)) { 310e7d50326SJeff Roberson pri = j + (i << RQB_L2BPW); 311e7d50326SJeff Roberson rqh = &rq->rq_queues[pri]; 312e7d50326SJeff Roberson TAILQ_FOREACH(ts, rqh, ts_procq) { 313e7d50326SJeff Roberson printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n", 314431f8906SJulian Elischer ts->ts_thread, ts->ts_thread->td_name, ts->ts_thread->td_priority, ts->ts_rqindex, pri); 315e7d50326SJeff Roberson } 316e7d50326SJeff Roberson } 317e7d50326SJeff Roberson } 318e7d50326SJeff Roberson } 319e7d50326SJeff Roberson 320ae7a6b38SJeff Roberson /* 321ae7a6b38SJeff Roberson * Print the status of a per-cpu thread queue. Should be a ddb show cmd. 322ae7a6b38SJeff Roberson */ 32315dc847eSJeff Roberson void 324ad1e7d28SJulian Elischer tdq_print(int cpu) 32515dc847eSJeff Roberson { 326ad1e7d28SJulian Elischer struct tdq *tdq; 32715dc847eSJeff Roberson 328ad1e7d28SJulian Elischer tdq = TDQ_CPU(cpu); 32915dc847eSJeff Roberson 330c47f202bSJeff Roberson printf("tdq %d:\n", TDQ_ID(tdq)); 33162fa74d9SJeff Roberson printf("\tlock %p\n", TDQ_LOCKPTR(tdq)); 33262fa74d9SJeff Roberson printf("\tLock name: %s\n", tdq->tdq_name); 333d2ad694cSJeff Roberson printf("\tload: %d\n", tdq->tdq_load); 334e7d50326SJeff Roberson printf("\ttimeshare idx: %d\n", tdq->tdq_idx); 3353f872f85SJeff Roberson printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx); 336e7d50326SJeff Roberson printf("\trealtime runq:\n"); 337e7d50326SJeff Roberson runq_print(&tdq->tdq_realtime); 338e7d50326SJeff Roberson printf("\ttimeshare runq:\n"); 339e7d50326SJeff Roberson runq_print(&tdq->tdq_timeshare); 340e7d50326SJeff Roberson printf("\tidle runq:\n"); 341e7d50326SJeff Roberson runq_print(&tdq->tdq_idle); 342d2ad694cSJeff Roberson printf("\tload transferable: %d\n", tdq->tdq_transferable); 343ae7a6b38SJeff Roberson printf("\tlowest priority: %d\n", tdq->tdq_lowpri); 34415dc847eSJeff Roberson } 34515dc847eSJeff Roberson 346ae7a6b38SJeff Roberson #define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS) 347ae7a6b38SJeff Roberson /* 348ae7a6b38SJeff Roberson * Add a thread to the actual run-queue. Keeps transferable counts up to 349ae7a6b38SJeff Roberson * date with what is actually on the run-queue. Selects the correct 350ae7a6b38SJeff Roberson * queue position for timeshare threads. 351ae7a6b38SJeff Roberson */ 352155b9987SJeff Roberson static __inline void 353ad1e7d28SJulian Elischer tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags) 354155b9987SJeff Roberson { 355ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 356ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 357e7d50326SJeff Roberson if (THREAD_CAN_MIGRATE(ts->ts_thread)) { 358d2ad694cSJeff Roberson tdq->tdq_transferable++; 359ad1e7d28SJulian Elischer ts->ts_flags |= TSF_XFERABLE; 36080f86c9fSJeff Roberson } 361e7d50326SJeff Roberson if (ts->ts_runq == &tdq->tdq_timeshare) { 362ed0e8f2fSJeff Roberson u_char pri; 363e7d50326SJeff Roberson 364e7d50326SJeff Roberson pri = ts->ts_thread->td_priority; 365e7d50326SJeff Roberson KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE, 366e7d50326SJeff Roberson ("Invalid priority %d on timeshare runq", pri)); 367e7d50326SJeff Roberson /* 368e7d50326SJeff Roberson * This queue contains only priorities between MIN and MAX 369e7d50326SJeff Roberson * realtime. Use the whole queue to represent these values. 370e7d50326SJeff Roberson */ 371c47f202bSJeff Roberson if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) { 372e7d50326SJeff Roberson pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ; 373e7d50326SJeff Roberson pri = (pri + tdq->tdq_idx) % RQ_NQS; 3743f872f85SJeff Roberson /* 3753f872f85SJeff Roberson * This effectively shortens the queue by one so we 3763f872f85SJeff Roberson * can have a one slot difference between idx and 3773f872f85SJeff Roberson * ridx while we wait for threads to drain. 3783f872f85SJeff Roberson */ 3793f872f85SJeff Roberson if (tdq->tdq_ridx != tdq->tdq_idx && 3803f872f85SJeff Roberson pri == tdq->tdq_ridx) 3814499aff6SJeff Roberson pri = (unsigned char)(pri - 1) % RQ_NQS; 382e7d50326SJeff Roberson } else 3833f872f85SJeff Roberson pri = tdq->tdq_ridx; 384e7d50326SJeff Roberson runq_add_pri(ts->ts_runq, ts, pri, flags); 385e7d50326SJeff Roberson } else 386ad1e7d28SJulian Elischer runq_add(ts->ts_runq, ts, flags); 387155b9987SJeff Roberson } 388155b9987SJeff Roberson 389ae7a6b38SJeff Roberson /* 390ae7a6b38SJeff Roberson * Remove a thread from a run-queue. This typically happens when a thread 391ae7a6b38SJeff Roberson * is selected to run. Running threads are not on the queue and the 392ae7a6b38SJeff Roberson * transferable count does not reflect them. 393ae7a6b38SJeff Roberson */ 394155b9987SJeff Roberson static __inline void 395ad1e7d28SJulian Elischer tdq_runq_rem(struct tdq *tdq, struct td_sched *ts) 396155b9987SJeff Roberson { 397ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 398ae7a6b38SJeff Roberson KASSERT(ts->ts_runq != NULL, 399ae7a6b38SJeff Roberson ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread)); 400ad1e7d28SJulian Elischer if (ts->ts_flags & TSF_XFERABLE) { 401d2ad694cSJeff Roberson tdq->tdq_transferable--; 402ad1e7d28SJulian Elischer ts->ts_flags &= ~TSF_XFERABLE; 40380f86c9fSJeff Roberson } 4043f872f85SJeff Roberson if (ts->ts_runq == &tdq->tdq_timeshare) { 4053f872f85SJeff Roberson if (tdq->tdq_idx != tdq->tdq_ridx) 4063f872f85SJeff Roberson runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx); 407e7d50326SJeff Roberson else 4083f872f85SJeff Roberson runq_remove_idx(ts->ts_runq, ts, NULL); 4098ab80cf0SJeff Roberson /* 4108ab80cf0SJeff Roberson * For timeshare threads we update the priority here so 4118ab80cf0SJeff Roberson * the priority reflects the time we've been sleeping. 4128ab80cf0SJeff Roberson */ 4138ab80cf0SJeff Roberson ts->ts_ltick = ticks; 4148ab80cf0SJeff Roberson sched_pctcpu_update(ts); 4158ab80cf0SJeff Roberson sched_priority(ts->ts_thread); 4163f872f85SJeff Roberson } else 417ad1e7d28SJulian Elischer runq_remove(ts->ts_runq, ts); 418155b9987SJeff Roberson } 419155b9987SJeff Roberson 420ae7a6b38SJeff Roberson /* 421ae7a6b38SJeff Roberson * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load 422ae7a6b38SJeff Roberson * for this thread to the referenced thread queue. 423ae7a6b38SJeff Roberson */ 424a8949de2SJeff Roberson static void 425ad1e7d28SJulian Elischer tdq_load_add(struct tdq *tdq, struct td_sched *ts) 4265d7ef00cSJeff Roberson { 427ef1134c9SJeff Roberson int class; 428ae7a6b38SJeff Roberson 429ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 430ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 431ad1e7d28SJulian Elischer class = PRI_BASE(ts->ts_thread->td_pri_class); 432d2ad694cSJeff Roberson tdq->tdq_load++; 433c47f202bSJeff Roberson CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load); 4347b8bfa0dSJeff Roberson if (class != PRI_ITHD && 4357b8bfa0dSJeff Roberson (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 436d2ad694cSJeff Roberson tdq->tdq_sysload++; 4375d7ef00cSJeff Roberson } 43815dc847eSJeff Roberson 439ae7a6b38SJeff Roberson /* 440ae7a6b38SJeff Roberson * Remove the load from a thread that is transitioning to a sleep state or 441ae7a6b38SJeff Roberson * exiting. 442ae7a6b38SJeff Roberson */ 443a8949de2SJeff Roberson static void 444ad1e7d28SJulian Elischer tdq_load_rem(struct tdq *tdq, struct td_sched *ts) 4455d7ef00cSJeff Roberson { 446ef1134c9SJeff Roberson int class; 447ae7a6b38SJeff Roberson 448ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 449ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 450ad1e7d28SJulian Elischer class = PRI_BASE(ts->ts_thread->td_pri_class); 4517b8bfa0dSJeff Roberson if (class != PRI_ITHD && 4527b8bfa0dSJeff Roberson (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0) 453d2ad694cSJeff Roberson tdq->tdq_sysload--; 454ae7a6b38SJeff Roberson KASSERT(tdq->tdq_load != 0, 455c47f202bSJeff Roberson ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq))); 456d2ad694cSJeff Roberson tdq->tdq_load--; 457d2ad694cSJeff Roberson CTR1(KTR_SCHED, "load: %d", tdq->tdq_load); 458ad1e7d28SJulian Elischer ts->ts_runq = NULL; 45915dc847eSJeff Roberson } 46015dc847eSJeff Roberson 461356500a3SJeff Roberson /* 46262fa74d9SJeff Roberson * Set lowpri to its exact value by searching the run-queue and 46362fa74d9SJeff Roberson * evaluating curthread. curthread may be passed as an optimization. 464356500a3SJeff Roberson */ 46522bf7d9aSJeff Roberson static void 46662fa74d9SJeff Roberson tdq_setlowpri(struct tdq *tdq, struct thread *ctd) 46762fa74d9SJeff Roberson { 46862fa74d9SJeff Roberson struct td_sched *ts; 46962fa74d9SJeff Roberson struct thread *td; 47062fa74d9SJeff Roberson 47162fa74d9SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 47262fa74d9SJeff Roberson if (ctd == NULL) 47362fa74d9SJeff Roberson ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread; 47462fa74d9SJeff Roberson ts = tdq_choose(tdq); 47562fa74d9SJeff Roberson if (ts) 47662fa74d9SJeff Roberson td = ts->ts_thread; 47762fa74d9SJeff Roberson if (ts == NULL || td->td_priority > ctd->td_priority) 47862fa74d9SJeff Roberson tdq->tdq_lowpri = ctd->td_priority; 47962fa74d9SJeff Roberson else 48062fa74d9SJeff Roberson tdq->tdq_lowpri = td->td_priority; 48162fa74d9SJeff Roberson } 48262fa74d9SJeff Roberson 48362fa74d9SJeff Roberson #ifdef SMP 48462fa74d9SJeff Roberson struct cpu_search { 48562fa74d9SJeff Roberson cpumask_t cs_mask; /* Mask of valid cpus. */ 48662fa74d9SJeff Roberson u_int cs_load; 48762fa74d9SJeff Roberson u_int cs_cpu; 48862fa74d9SJeff Roberson int cs_limit; /* Min priority for low min load for high. */ 48962fa74d9SJeff Roberson }; 49062fa74d9SJeff Roberson 49162fa74d9SJeff Roberson #define CPU_SEARCH_LOWEST 0x1 49262fa74d9SJeff Roberson #define CPU_SEARCH_HIGHEST 0x2 49362fa74d9SJeff Roberson #define CPU_SEARCH_BOTH (CPU_SEARCH_LOWEST|CPU_SEARCH_HIGHEST) 49462fa74d9SJeff Roberson 49562fa74d9SJeff Roberson #define CPUMASK_FOREACH(cpu, mask) \ 49662fa74d9SJeff Roberson for ((cpu) = 0; (cpu) < sizeof((mask)) * 8; (cpu)++) \ 49762fa74d9SJeff Roberson if ((mask) & 1 << (cpu)) 49862fa74d9SJeff Roberson 49962fa74d9SJeff Roberson __inline int cpu_search(struct cpu_group *cg, struct cpu_search *low, 50062fa74d9SJeff Roberson struct cpu_search *high, const int match); 50162fa74d9SJeff Roberson int cpu_search_lowest(struct cpu_group *cg, struct cpu_search *low); 50262fa74d9SJeff Roberson int cpu_search_highest(struct cpu_group *cg, struct cpu_search *high); 50362fa74d9SJeff Roberson int cpu_search_both(struct cpu_group *cg, struct cpu_search *low, 50462fa74d9SJeff Roberson struct cpu_search *high); 50562fa74d9SJeff Roberson 50662fa74d9SJeff Roberson /* 50762fa74d9SJeff Roberson * This routine compares according to the match argument and should be 50862fa74d9SJeff Roberson * reduced in actual instantiations via constant propagation and dead code 50962fa74d9SJeff Roberson * elimination. 51062fa74d9SJeff Roberson */ 51162fa74d9SJeff Roberson static __inline int 51262fa74d9SJeff Roberson cpu_compare(int cpu, struct cpu_search *low, struct cpu_search *high, 51362fa74d9SJeff Roberson const int match) 51462fa74d9SJeff Roberson { 51562fa74d9SJeff Roberson struct tdq *tdq; 51662fa74d9SJeff Roberson 51762fa74d9SJeff Roberson tdq = TDQ_CPU(cpu); 51862fa74d9SJeff Roberson if (match & CPU_SEARCH_LOWEST) 51962fa74d9SJeff Roberson if (low->cs_mask & (1 << cpu) && 52062fa74d9SJeff Roberson tdq->tdq_load < low->cs_load && 52162fa74d9SJeff Roberson tdq->tdq_lowpri > low->cs_limit) { 52262fa74d9SJeff Roberson low->cs_cpu = cpu; 52362fa74d9SJeff Roberson low->cs_load = tdq->tdq_load; 52462fa74d9SJeff Roberson } 52562fa74d9SJeff Roberson if (match & CPU_SEARCH_HIGHEST) 52662fa74d9SJeff Roberson if (high->cs_mask & (1 << cpu) && 52762fa74d9SJeff Roberson tdq->tdq_load >= high->cs_limit && 52862fa74d9SJeff Roberson tdq->tdq_load > high->cs_load && 52962fa74d9SJeff Roberson tdq->tdq_transferable) { 53062fa74d9SJeff Roberson high->cs_cpu = cpu; 53162fa74d9SJeff Roberson high->cs_load = tdq->tdq_load; 53262fa74d9SJeff Roberson } 53362fa74d9SJeff Roberson return (tdq->tdq_load); 53462fa74d9SJeff Roberson } 53562fa74d9SJeff Roberson 53662fa74d9SJeff Roberson /* 53762fa74d9SJeff Roberson * Search the tree of cpu_groups for the lowest or highest loaded cpu 53862fa74d9SJeff Roberson * according to the match argument. This routine actually compares the 53962fa74d9SJeff Roberson * load on all paths through the tree and finds the least loaded cpu on 54062fa74d9SJeff Roberson * the least loaded path, which may differ from the least loaded cpu in 54162fa74d9SJeff Roberson * the system. This balances work among caches and busses. 54262fa74d9SJeff Roberson * 54362fa74d9SJeff Roberson * This inline is instantiated in three forms below using constants for the 54462fa74d9SJeff Roberson * match argument. It is reduced to the minimum set for each case. It is 54562fa74d9SJeff Roberson * also recursive to the depth of the tree. 54662fa74d9SJeff Roberson */ 54762fa74d9SJeff Roberson static inline int 54862fa74d9SJeff Roberson cpu_search(struct cpu_group *cg, struct cpu_search *low, 54962fa74d9SJeff Roberson struct cpu_search *high, const int match) 55062fa74d9SJeff Roberson { 55162fa74d9SJeff Roberson int total; 55262fa74d9SJeff Roberson 55362fa74d9SJeff Roberson total = 0; 55462fa74d9SJeff Roberson if (cg->cg_children) { 55562fa74d9SJeff Roberson struct cpu_search lgroup; 55662fa74d9SJeff Roberson struct cpu_search hgroup; 55762fa74d9SJeff Roberson struct cpu_group *child; 55862fa74d9SJeff Roberson u_int lload; 55962fa74d9SJeff Roberson int hload; 56062fa74d9SJeff Roberson int load; 56162fa74d9SJeff Roberson int i; 56262fa74d9SJeff Roberson 56362fa74d9SJeff Roberson lload = -1; 56462fa74d9SJeff Roberson hload = -1; 56562fa74d9SJeff Roberson for (i = 0; i < cg->cg_children; i++) { 56662fa74d9SJeff Roberson child = &cg->cg_child[i]; 56762fa74d9SJeff Roberson if (match & CPU_SEARCH_LOWEST) { 56862fa74d9SJeff Roberson lgroup = *low; 56962fa74d9SJeff Roberson lgroup.cs_load = -1; 57062fa74d9SJeff Roberson } 57162fa74d9SJeff Roberson if (match & CPU_SEARCH_HIGHEST) { 57262fa74d9SJeff Roberson hgroup = *high; 57362fa74d9SJeff Roberson lgroup.cs_load = 0; 57462fa74d9SJeff Roberson } 57562fa74d9SJeff Roberson switch (match) { 57662fa74d9SJeff Roberson case CPU_SEARCH_LOWEST: 57762fa74d9SJeff Roberson load = cpu_search_lowest(child, &lgroup); 57862fa74d9SJeff Roberson break; 57962fa74d9SJeff Roberson case CPU_SEARCH_HIGHEST: 58062fa74d9SJeff Roberson load = cpu_search_highest(child, &hgroup); 58162fa74d9SJeff Roberson break; 58262fa74d9SJeff Roberson case CPU_SEARCH_BOTH: 58362fa74d9SJeff Roberson load = cpu_search_both(child, &lgroup, &hgroup); 58462fa74d9SJeff Roberson break; 58562fa74d9SJeff Roberson } 58662fa74d9SJeff Roberson total += load; 58762fa74d9SJeff Roberson if (match & CPU_SEARCH_LOWEST) 58862fa74d9SJeff Roberson if (load < lload || low->cs_cpu == -1) { 58962fa74d9SJeff Roberson *low = lgroup; 59062fa74d9SJeff Roberson lload = load; 59162fa74d9SJeff Roberson } 59262fa74d9SJeff Roberson if (match & CPU_SEARCH_HIGHEST) 59362fa74d9SJeff Roberson if (load > hload || high->cs_cpu == -1) { 59462fa74d9SJeff Roberson hload = load; 59562fa74d9SJeff Roberson *high = hgroup; 59662fa74d9SJeff Roberson } 59762fa74d9SJeff Roberson } 59862fa74d9SJeff Roberson } else { 59962fa74d9SJeff Roberson int cpu; 60062fa74d9SJeff Roberson 60162fa74d9SJeff Roberson CPUMASK_FOREACH(cpu, cg->cg_mask) 60262fa74d9SJeff Roberson total += cpu_compare(cpu, low, high, match); 60362fa74d9SJeff Roberson } 60462fa74d9SJeff Roberson return (total); 60562fa74d9SJeff Roberson } 60662fa74d9SJeff Roberson 60762fa74d9SJeff Roberson /* 60862fa74d9SJeff Roberson * cpu_search instantiations must pass constants to maintain the inline 60962fa74d9SJeff Roberson * optimization. 61062fa74d9SJeff Roberson */ 61162fa74d9SJeff Roberson int 61262fa74d9SJeff Roberson cpu_search_lowest(struct cpu_group *cg, struct cpu_search *low) 61362fa74d9SJeff Roberson { 61462fa74d9SJeff Roberson return cpu_search(cg, low, NULL, CPU_SEARCH_LOWEST); 61562fa74d9SJeff Roberson } 61662fa74d9SJeff Roberson 61762fa74d9SJeff Roberson int 61862fa74d9SJeff Roberson cpu_search_highest(struct cpu_group *cg, struct cpu_search *high) 61962fa74d9SJeff Roberson { 62062fa74d9SJeff Roberson return cpu_search(cg, NULL, high, CPU_SEARCH_HIGHEST); 62162fa74d9SJeff Roberson } 62262fa74d9SJeff Roberson 62362fa74d9SJeff Roberson int 62462fa74d9SJeff Roberson cpu_search_both(struct cpu_group *cg, struct cpu_search *low, 62562fa74d9SJeff Roberson struct cpu_search *high) 62662fa74d9SJeff Roberson { 62762fa74d9SJeff Roberson return cpu_search(cg, low, high, CPU_SEARCH_BOTH); 62862fa74d9SJeff Roberson } 62962fa74d9SJeff Roberson 63062fa74d9SJeff Roberson /* 63162fa74d9SJeff Roberson * Find the cpu with the least load via the least loaded path that has a 63262fa74d9SJeff Roberson * lowpri greater than pri pri. A pri of -1 indicates any priority is 63362fa74d9SJeff Roberson * acceptable. 63462fa74d9SJeff Roberson */ 63562fa74d9SJeff Roberson static inline int 63662fa74d9SJeff Roberson sched_lowest(struct cpu_group *cg, cpumask_t mask, int pri) 63762fa74d9SJeff Roberson { 63862fa74d9SJeff Roberson struct cpu_search low; 63962fa74d9SJeff Roberson 64062fa74d9SJeff Roberson low.cs_cpu = -1; 64162fa74d9SJeff Roberson low.cs_load = -1; 64262fa74d9SJeff Roberson low.cs_mask = mask; 64362fa74d9SJeff Roberson low.cs_limit = pri; 64462fa74d9SJeff Roberson cpu_search_lowest(cg, &low); 64562fa74d9SJeff Roberson return low.cs_cpu; 64662fa74d9SJeff Roberson } 64762fa74d9SJeff Roberson 64862fa74d9SJeff Roberson /* 64962fa74d9SJeff Roberson * Find the cpu with the highest load via the highest loaded path. 65062fa74d9SJeff Roberson */ 65162fa74d9SJeff Roberson static inline int 65262fa74d9SJeff Roberson sched_highest(struct cpu_group *cg, cpumask_t mask, int minload) 65362fa74d9SJeff Roberson { 65462fa74d9SJeff Roberson struct cpu_search high; 65562fa74d9SJeff Roberson 65662fa74d9SJeff Roberson high.cs_cpu = -1; 65762fa74d9SJeff Roberson high.cs_load = 0; 65862fa74d9SJeff Roberson high.cs_mask = mask; 65962fa74d9SJeff Roberson high.cs_limit = minload; 66062fa74d9SJeff Roberson cpu_search_highest(cg, &high); 66162fa74d9SJeff Roberson return high.cs_cpu; 66262fa74d9SJeff Roberson } 66362fa74d9SJeff Roberson 66462fa74d9SJeff Roberson /* 66562fa74d9SJeff Roberson * Simultaneously find the highest and lowest loaded cpu reachable via 66662fa74d9SJeff Roberson * cg. 66762fa74d9SJeff Roberson */ 66862fa74d9SJeff Roberson static inline void 66962fa74d9SJeff Roberson sched_both(struct cpu_group *cg, cpumask_t mask, int *lowcpu, int *highcpu) 67062fa74d9SJeff Roberson { 67162fa74d9SJeff Roberson struct cpu_search high; 67262fa74d9SJeff Roberson struct cpu_search low; 67362fa74d9SJeff Roberson 67462fa74d9SJeff Roberson low.cs_cpu = -1; 67562fa74d9SJeff Roberson low.cs_limit = -1; 67662fa74d9SJeff Roberson low.cs_load = -1; 67762fa74d9SJeff Roberson low.cs_mask = mask; 67862fa74d9SJeff Roberson high.cs_load = 0; 67962fa74d9SJeff Roberson high.cs_cpu = -1; 68062fa74d9SJeff Roberson high.cs_limit = -1; 68162fa74d9SJeff Roberson high.cs_mask = mask; 68262fa74d9SJeff Roberson cpu_search_both(cg, &low, &high); 68362fa74d9SJeff Roberson *lowcpu = low.cs_cpu; 68462fa74d9SJeff Roberson *highcpu = high.cs_cpu; 68562fa74d9SJeff Roberson return; 68662fa74d9SJeff Roberson } 68762fa74d9SJeff Roberson 68862fa74d9SJeff Roberson static void 68962fa74d9SJeff Roberson sched_balance_group(struct cpu_group *cg) 69062fa74d9SJeff Roberson { 69162fa74d9SJeff Roberson cpumask_t mask; 69262fa74d9SJeff Roberson int high; 69362fa74d9SJeff Roberson int low; 69462fa74d9SJeff Roberson int i; 69562fa74d9SJeff Roberson 69662fa74d9SJeff Roberson mask = -1; 69762fa74d9SJeff Roberson for (;;) { 69862fa74d9SJeff Roberson sched_both(cg, mask, &low, &high); 69962fa74d9SJeff Roberson if (low == high || low == -1 || high == -1) 70062fa74d9SJeff Roberson break; 70162fa74d9SJeff Roberson if (sched_balance_pair(TDQ_CPU(high), TDQ_CPU(low))) 70262fa74d9SJeff Roberson break; 70362fa74d9SJeff Roberson /* 70462fa74d9SJeff Roberson * If we failed to move any threads determine which cpu 70562fa74d9SJeff Roberson * to kick out of the set and try again. 70662fa74d9SJeff Roberson */ 70762fa74d9SJeff Roberson if (TDQ_CPU(high)->tdq_transferable == 0) 70862fa74d9SJeff Roberson mask &= ~(1 << high); 70962fa74d9SJeff Roberson else 71062fa74d9SJeff Roberson mask &= ~(1 << low); 71162fa74d9SJeff Roberson } 71262fa74d9SJeff Roberson 71362fa74d9SJeff Roberson for (i = 0; i < cg->cg_children; i++) 71462fa74d9SJeff Roberson sched_balance_group(&cg->cg_child[i]); 71562fa74d9SJeff Roberson } 71662fa74d9SJeff Roberson 71762fa74d9SJeff Roberson static void 7187fcf154aSJeff Roberson sched_balance() 719356500a3SJeff Roberson { 7207fcf154aSJeff Roberson struct tdq *tdq; 721356500a3SJeff Roberson 7227fcf154aSJeff Roberson /* 7237fcf154aSJeff Roberson * Select a random time between .5 * balance_interval and 7247fcf154aSJeff Roberson * 1.5 * balance_interval. 7257fcf154aSJeff Roberson */ 7267fcf154aSJeff Roberson balance_ticks = max(balance_interval / 2, 1); 7277fcf154aSJeff Roberson balance_ticks += random() % balance_interval; 728ae7a6b38SJeff Roberson if (smp_started == 0 || rebalance == 0) 729598b368dSJeff Roberson return; 7307fcf154aSJeff Roberson tdq = TDQ_SELF(); 7317fcf154aSJeff Roberson TDQ_UNLOCK(tdq); 73262fa74d9SJeff Roberson sched_balance_group(cpu_top); 7337fcf154aSJeff Roberson TDQ_LOCK(tdq); 734cac77d04SJeff Roberson } 73586f8ae96SJeff Roberson 736ae7a6b38SJeff Roberson /* 737ae7a6b38SJeff Roberson * Lock two thread queues using their address to maintain lock order. 738ae7a6b38SJeff Roberson */ 739ae7a6b38SJeff Roberson static void 740ae7a6b38SJeff Roberson tdq_lock_pair(struct tdq *one, struct tdq *two) 741ae7a6b38SJeff Roberson { 742ae7a6b38SJeff Roberson if (one < two) { 743ae7a6b38SJeff Roberson TDQ_LOCK(one); 744ae7a6b38SJeff Roberson TDQ_LOCK_FLAGS(two, MTX_DUPOK); 745ae7a6b38SJeff Roberson } else { 746ae7a6b38SJeff Roberson TDQ_LOCK(two); 747ae7a6b38SJeff Roberson TDQ_LOCK_FLAGS(one, MTX_DUPOK); 748ae7a6b38SJeff Roberson } 749ae7a6b38SJeff Roberson } 750ae7a6b38SJeff Roberson 751ae7a6b38SJeff Roberson /* 7527fcf154aSJeff Roberson * Unlock two thread queues. Order is not important here. 7537fcf154aSJeff Roberson */ 7547fcf154aSJeff Roberson static void 7557fcf154aSJeff Roberson tdq_unlock_pair(struct tdq *one, struct tdq *two) 7567fcf154aSJeff Roberson { 7577fcf154aSJeff Roberson TDQ_UNLOCK(one); 7587fcf154aSJeff Roberson TDQ_UNLOCK(two); 7597fcf154aSJeff Roberson } 7607fcf154aSJeff Roberson 7617fcf154aSJeff Roberson /* 762ae7a6b38SJeff Roberson * Transfer load between two imbalanced thread queues. 763ae7a6b38SJeff Roberson */ 76462fa74d9SJeff Roberson static int 765ad1e7d28SJulian Elischer sched_balance_pair(struct tdq *high, struct tdq *low) 766cac77d04SJeff Roberson { 767cac77d04SJeff Roberson int transferable; 768cac77d04SJeff Roberson int high_load; 769cac77d04SJeff Roberson int low_load; 77062fa74d9SJeff Roberson int moved; 771cac77d04SJeff Roberson int move; 772cac77d04SJeff Roberson int diff; 773cac77d04SJeff Roberson int i; 774cac77d04SJeff Roberson 775ae7a6b38SJeff Roberson tdq_lock_pair(high, low); 776d2ad694cSJeff Roberson transferable = high->tdq_transferable; 777d2ad694cSJeff Roberson high_load = high->tdq_load; 778d2ad694cSJeff Roberson low_load = low->tdq_load; 77962fa74d9SJeff Roberson moved = 0; 780155b9987SJeff Roberson /* 781155b9987SJeff Roberson * Determine what the imbalance is and then adjust that to how many 782d2ad694cSJeff Roberson * threads we actually have to give up (transferable). 783155b9987SJeff Roberson */ 784ae7a6b38SJeff Roberson if (transferable != 0) { 785cac77d04SJeff Roberson diff = high_load - low_load; 786356500a3SJeff Roberson move = diff / 2; 787356500a3SJeff Roberson if (diff & 0x1) 788356500a3SJeff Roberson move++; 78980f86c9fSJeff Roberson move = min(move, transferable); 790356500a3SJeff Roberson for (i = 0; i < move; i++) 79162fa74d9SJeff Roberson moved += tdq_move(high, low); 792a5423ea3SJeff Roberson /* 793a5423ea3SJeff Roberson * IPI the target cpu to force it to reschedule with the new 794a5423ea3SJeff Roberson * workload. 795a5423ea3SJeff Roberson */ 796a5423ea3SJeff Roberson ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT); 797ae7a6b38SJeff Roberson } 7987fcf154aSJeff Roberson tdq_unlock_pair(high, low); 79962fa74d9SJeff Roberson return (moved); 800356500a3SJeff Roberson } 801356500a3SJeff Roberson 802ae7a6b38SJeff Roberson /* 803ae7a6b38SJeff Roberson * Move a thread from one thread queue to another. 804ae7a6b38SJeff Roberson */ 80562fa74d9SJeff Roberson static int 806ae7a6b38SJeff Roberson tdq_move(struct tdq *from, struct tdq *to) 807356500a3SJeff Roberson { 808ad1e7d28SJulian Elischer struct td_sched *ts; 809ae7a6b38SJeff Roberson struct thread *td; 810ae7a6b38SJeff Roberson struct tdq *tdq; 811ae7a6b38SJeff Roberson int cpu; 812356500a3SJeff Roberson 8137fcf154aSJeff Roberson TDQ_LOCK_ASSERT(from, MA_OWNED); 8147fcf154aSJeff Roberson TDQ_LOCK_ASSERT(to, MA_OWNED); 8157fcf154aSJeff Roberson 816ad1e7d28SJulian Elischer tdq = from; 817ae7a6b38SJeff Roberson cpu = TDQ_ID(to); 81862fa74d9SJeff Roberson ts = tdq_steal(tdq, cpu); 819ad1e7d28SJulian Elischer if (ts == NULL) 82062fa74d9SJeff Roberson return (0); 821ae7a6b38SJeff Roberson td = ts->ts_thread; 822ae7a6b38SJeff Roberson /* 823ae7a6b38SJeff Roberson * Although the run queue is locked the thread may be blocked. Lock 8247fcf154aSJeff Roberson * it to clear this and acquire the run-queue lock. 825ae7a6b38SJeff Roberson */ 826ae7a6b38SJeff Roberson thread_lock(td); 8277fcf154aSJeff Roberson /* Drop recursive lock on from acquired via thread_lock(). */ 828ae7a6b38SJeff Roberson TDQ_UNLOCK(from); 829ae7a6b38SJeff Roberson sched_rem(td); 8307b8bfa0dSJeff Roberson ts->ts_cpu = cpu; 831ae7a6b38SJeff Roberson td->td_lock = TDQ_LOCKPTR(to); 832ae7a6b38SJeff Roberson tdq_add(to, td, SRQ_YIELDING); 83362fa74d9SJeff Roberson return (1); 834356500a3SJeff Roberson } 83522bf7d9aSJeff Roberson 836ae7a6b38SJeff Roberson /* 837ae7a6b38SJeff Roberson * This tdq has idled. Try to steal a thread from another cpu and switch 838ae7a6b38SJeff Roberson * to it. 839ae7a6b38SJeff Roberson */ 84080f86c9fSJeff Roberson static int 841ad1e7d28SJulian Elischer tdq_idled(struct tdq *tdq) 84222bf7d9aSJeff Roberson { 84362fa74d9SJeff Roberson struct cpu_group *cg; 844ad1e7d28SJulian Elischer struct tdq *steal; 84562fa74d9SJeff Roberson cpumask_t mask; 84662fa74d9SJeff Roberson int thresh; 847ae7a6b38SJeff Roberson int cpu; 84880f86c9fSJeff Roberson 84988f530ccSJeff Roberson if (smp_started == 0 || steal_idle == 0) 85088f530ccSJeff Roberson return (1); 85162fa74d9SJeff Roberson mask = -1; 85262fa74d9SJeff Roberson mask &= ~PCPU_GET(cpumask); 85362fa74d9SJeff Roberson /* We don't want to be preempted while we're iterating. */ 854ae7a6b38SJeff Roberson spinlock_enter(); 85562fa74d9SJeff Roberson for (cg = tdq->tdq_cg; cg != NULL; ) { 85662fa74d9SJeff Roberson if ((cg->cg_flags & (CG_FLAG_HTT | CG_FLAG_THREAD)) == 0) 85762fa74d9SJeff Roberson thresh = steal_thresh; 85862fa74d9SJeff Roberson else 85962fa74d9SJeff Roberson thresh = 1; 86062fa74d9SJeff Roberson cpu = sched_highest(cg, mask, thresh); 86162fa74d9SJeff Roberson if (cpu == -1) { 86262fa74d9SJeff Roberson cg = cg->cg_parent; 86380f86c9fSJeff Roberson continue; 8647b8bfa0dSJeff Roberson } 8657b8bfa0dSJeff Roberson steal = TDQ_CPU(cpu); 86662fa74d9SJeff Roberson mask &= ~(1 << cpu); 8677fcf154aSJeff Roberson tdq_lock_pair(tdq, steal); 86862fa74d9SJeff Roberson if (steal->tdq_load < thresh || steal->tdq_transferable == 0) { 8697fcf154aSJeff Roberson tdq_unlock_pair(tdq, steal); 87062fa74d9SJeff Roberson continue; 87162fa74d9SJeff Roberson } 87262fa74d9SJeff Roberson /* 87362fa74d9SJeff Roberson * If a thread was added while interrupts were disabled don't 87462fa74d9SJeff Roberson * steal one here. If we fail to acquire one due to affinity 87562fa74d9SJeff Roberson * restrictions loop again with this cpu removed from the 87662fa74d9SJeff Roberson * set. 87762fa74d9SJeff Roberson */ 87862fa74d9SJeff Roberson if (tdq->tdq_load == 0 && tdq_move(steal, tdq) == 0) { 87962fa74d9SJeff Roberson tdq_unlock_pair(tdq, steal); 88062fa74d9SJeff Roberson continue; 88180f86c9fSJeff Roberson } 882ae7a6b38SJeff Roberson spinlock_exit(); 883ae7a6b38SJeff Roberson TDQ_UNLOCK(steal); 884ae7a6b38SJeff Roberson mi_switch(SW_VOL, NULL); 885ae7a6b38SJeff Roberson thread_unlock(curthread); 8867b8bfa0dSJeff Roberson 8877b8bfa0dSJeff Roberson return (0); 88822bf7d9aSJeff Roberson } 88962fa74d9SJeff Roberson spinlock_exit(); 89062fa74d9SJeff Roberson return (1); 89162fa74d9SJeff Roberson } 89222bf7d9aSJeff Roberson 893ae7a6b38SJeff Roberson /* 894ae7a6b38SJeff Roberson * Notify a remote cpu of new work. Sends an IPI if criteria are met. 895ae7a6b38SJeff Roberson */ 89622bf7d9aSJeff Roberson static void 8977b8bfa0dSJeff Roberson tdq_notify(struct td_sched *ts) 89822bf7d9aSJeff Roberson { 899fc3a97dcSJeff Roberson struct thread *ctd; 90022bf7d9aSJeff Roberson struct pcpu *pcpu; 901fc3a97dcSJeff Roberson int cpri; 902fc3a97dcSJeff Roberson int pri; 9037b8bfa0dSJeff Roberson int cpu; 90422bf7d9aSJeff Roberson 9057b8bfa0dSJeff Roberson cpu = ts->ts_cpu; 906fc3a97dcSJeff Roberson pri = ts->ts_thread->td_priority; 90722bf7d9aSJeff Roberson pcpu = pcpu_find(cpu); 908fc3a97dcSJeff Roberson ctd = pcpu->pc_curthread; 909fc3a97dcSJeff Roberson cpri = ctd->td_priority; 9106b2f763fSJeff Roberson 9116b2f763fSJeff Roberson /* 9126b2f763fSJeff Roberson * If our priority is not better than the current priority there is 9136b2f763fSJeff Roberson * nothing to do. 9146b2f763fSJeff Roberson */ 915fc3a97dcSJeff Roberson if (pri > cpri) 9166b2f763fSJeff Roberson return; 9177b8bfa0dSJeff Roberson /* 918fc3a97dcSJeff Roberson * Always IPI idle. 9197b8bfa0dSJeff Roberson */ 920fc3a97dcSJeff Roberson if (cpri > PRI_MIN_IDLE) 921fc3a97dcSJeff Roberson goto sendipi; 922fc3a97dcSJeff Roberson /* 923fc3a97dcSJeff Roberson * If we're realtime or better and there is timeshare or worse running 924fc3a97dcSJeff Roberson * send an IPI. 925fc3a97dcSJeff Roberson */ 926fc3a97dcSJeff Roberson if (pri < PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME) 927fc3a97dcSJeff Roberson goto sendipi; 928fc3a97dcSJeff Roberson /* 929fc3a97dcSJeff Roberson * Otherwise only IPI if we exceed the threshold. 930fc3a97dcSJeff Roberson */ 931ae7a6b38SJeff Roberson if (pri > preempt_thresh) 9327b8bfa0dSJeff Roberson return; 933fc3a97dcSJeff Roberson sendipi: 934fc3a97dcSJeff Roberson ctd->td_flags |= TDF_NEEDRESCHED; 93514618990SJeff Roberson ipi_selected(1 << cpu, IPI_PREEMPT); 93622bf7d9aSJeff Roberson } 93722bf7d9aSJeff Roberson 938ae7a6b38SJeff Roberson /* 939ae7a6b38SJeff Roberson * Steals load from a timeshare queue. Honors the rotating queue head 940ae7a6b38SJeff Roberson * index. 941ae7a6b38SJeff Roberson */ 942ae7a6b38SJeff Roberson static struct td_sched * 94362fa74d9SJeff Roberson runq_steal_from(struct runq *rq, int cpu, u_char start) 944ae7a6b38SJeff Roberson { 945ae7a6b38SJeff Roberson struct td_sched *ts; 946ae7a6b38SJeff Roberson struct rqbits *rqb; 947ae7a6b38SJeff Roberson struct rqhead *rqh; 948ae7a6b38SJeff Roberson int first; 949ae7a6b38SJeff Roberson int bit; 950ae7a6b38SJeff Roberson int pri; 951ae7a6b38SJeff Roberson int i; 952ae7a6b38SJeff Roberson 953ae7a6b38SJeff Roberson rqb = &rq->rq_status; 954ae7a6b38SJeff Roberson bit = start & (RQB_BPW -1); 955ae7a6b38SJeff Roberson pri = 0; 956ae7a6b38SJeff Roberson first = 0; 957ae7a6b38SJeff Roberson again: 958ae7a6b38SJeff Roberson for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) { 959ae7a6b38SJeff Roberson if (rqb->rqb_bits[i] == 0) 960ae7a6b38SJeff Roberson continue; 961ae7a6b38SJeff Roberson if (bit != 0) { 962ae7a6b38SJeff Roberson for (pri = bit; pri < RQB_BPW; pri++) 963ae7a6b38SJeff Roberson if (rqb->rqb_bits[i] & (1ul << pri)) 964ae7a6b38SJeff Roberson break; 965ae7a6b38SJeff Roberson if (pri >= RQB_BPW) 966ae7a6b38SJeff Roberson continue; 967ae7a6b38SJeff Roberson } else 968ae7a6b38SJeff Roberson pri = RQB_FFS(rqb->rqb_bits[i]); 969ae7a6b38SJeff Roberson pri += (i << RQB_L2BPW); 970ae7a6b38SJeff Roberson rqh = &rq->rq_queues[pri]; 971ae7a6b38SJeff Roberson TAILQ_FOREACH(ts, rqh, ts_procq) { 97262fa74d9SJeff Roberson if (first && THREAD_CAN_MIGRATE(ts->ts_thread) && 97362fa74d9SJeff Roberson THREAD_CAN_SCHED(ts->ts_thread, cpu)) 974ae7a6b38SJeff Roberson return (ts); 975ae7a6b38SJeff Roberson first = 1; 976ae7a6b38SJeff Roberson } 977ae7a6b38SJeff Roberson } 978ae7a6b38SJeff Roberson if (start != 0) { 979ae7a6b38SJeff Roberson start = 0; 980ae7a6b38SJeff Roberson goto again; 981ae7a6b38SJeff Roberson } 982ae7a6b38SJeff Roberson 983ae7a6b38SJeff Roberson return (NULL); 984ae7a6b38SJeff Roberson } 985ae7a6b38SJeff Roberson 986ae7a6b38SJeff Roberson /* 987ae7a6b38SJeff Roberson * Steals load from a standard linear queue. 988ae7a6b38SJeff Roberson */ 989ad1e7d28SJulian Elischer static struct td_sched * 99062fa74d9SJeff Roberson runq_steal(struct runq *rq, int cpu) 99122bf7d9aSJeff Roberson { 99222bf7d9aSJeff Roberson struct rqhead *rqh; 99322bf7d9aSJeff Roberson struct rqbits *rqb; 994ad1e7d28SJulian Elischer struct td_sched *ts; 99522bf7d9aSJeff Roberson int word; 99622bf7d9aSJeff Roberson int bit; 99722bf7d9aSJeff Roberson 99822bf7d9aSJeff Roberson rqb = &rq->rq_status; 99922bf7d9aSJeff Roberson for (word = 0; word < RQB_LEN; word++) { 100022bf7d9aSJeff Roberson if (rqb->rqb_bits[word] == 0) 100122bf7d9aSJeff Roberson continue; 100222bf7d9aSJeff Roberson for (bit = 0; bit < RQB_BPW; bit++) { 1003a2640c9bSPeter Wemm if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 100422bf7d9aSJeff Roberson continue; 100522bf7d9aSJeff Roberson rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 100628994a58SJeff Roberson TAILQ_FOREACH(ts, rqh, ts_procq) 100762fa74d9SJeff Roberson if (THREAD_CAN_MIGRATE(ts->ts_thread) && 100862fa74d9SJeff Roberson THREAD_CAN_SCHED(ts->ts_thread, cpu)) 1009ad1e7d28SJulian Elischer return (ts); 101022bf7d9aSJeff Roberson } 101122bf7d9aSJeff Roberson } 101222bf7d9aSJeff Roberson return (NULL); 101322bf7d9aSJeff Roberson } 101422bf7d9aSJeff Roberson 1015ae7a6b38SJeff Roberson /* 1016ae7a6b38SJeff Roberson * Attempt to steal a thread in priority order from a thread queue. 1017ae7a6b38SJeff Roberson */ 1018ad1e7d28SJulian Elischer static struct td_sched * 101962fa74d9SJeff Roberson tdq_steal(struct tdq *tdq, int cpu) 102022bf7d9aSJeff Roberson { 1021ad1e7d28SJulian Elischer struct td_sched *ts; 102222bf7d9aSJeff Roberson 1023ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 102462fa74d9SJeff Roberson if ((ts = runq_steal(&tdq->tdq_realtime, cpu)) != NULL) 1025ad1e7d28SJulian Elischer return (ts); 102662fa74d9SJeff Roberson if ((ts = runq_steal_from(&tdq->tdq_timeshare, cpu, tdq->tdq_ridx)) 102762fa74d9SJeff Roberson != NULL) 1028ad1e7d28SJulian Elischer return (ts); 102962fa74d9SJeff Roberson return (runq_steal(&tdq->tdq_idle, cpu)); 103022bf7d9aSJeff Roberson } 103180f86c9fSJeff Roberson 1032ae7a6b38SJeff Roberson /* 1033ae7a6b38SJeff Roberson * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the 10347fcf154aSJeff Roberson * current lock and returns with the assigned queue locked. 1035ae7a6b38SJeff Roberson */ 1036ae7a6b38SJeff Roberson static inline struct tdq * 1037ae7a6b38SJeff Roberson sched_setcpu(struct td_sched *ts, int cpu, int flags) 103880f86c9fSJeff Roberson { 1039ae7a6b38SJeff Roberson struct thread *td; 1040ae7a6b38SJeff Roberson struct tdq *tdq; 104180f86c9fSJeff Roberson 1042ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED); 1043ae7a6b38SJeff Roberson 1044ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpu); 1045ae7a6b38SJeff Roberson td = ts->ts_thread; 1046ae7a6b38SJeff Roberson ts->ts_cpu = cpu; 1047c47f202bSJeff Roberson 1048c47f202bSJeff Roberson /* If the lock matches just return the queue. */ 1049ae7a6b38SJeff Roberson if (td->td_lock == TDQ_LOCKPTR(tdq)) 1050ae7a6b38SJeff Roberson return (tdq); 1051ae7a6b38SJeff Roberson #ifdef notyet 105280f86c9fSJeff Roberson /* 1053a5423ea3SJeff Roberson * If the thread isn't running its lockptr is a 1054ae7a6b38SJeff Roberson * turnstile or a sleepqueue. We can just lock_set without 1055ae7a6b38SJeff Roberson * blocking. 1056670c524fSJeff Roberson */ 1057ae7a6b38SJeff Roberson if (TD_CAN_RUN(td)) { 1058ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1059ae7a6b38SJeff Roberson thread_lock_set(td, TDQ_LOCKPTR(tdq)); 1060ae7a6b38SJeff Roberson return (tdq); 1061ae7a6b38SJeff Roberson } 1062ae7a6b38SJeff Roberson #endif 106380f86c9fSJeff Roberson /* 1064ae7a6b38SJeff Roberson * The hard case, migration, we need to block the thread first to 1065ae7a6b38SJeff Roberson * prevent order reversals with other cpus locks. 10667b8bfa0dSJeff Roberson */ 1067ae7a6b38SJeff Roberson thread_lock_block(td); 1068ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1069ae7a6b38SJeff Roberson thread_lock_unblock(td, TDQ_LOCKPTR(tdq)); 1070ae7a6b38SJeff Roberson return (tdq); 107180f86c9fSJeff Roberson } 10722454aaf5SJeff Roberson 1073ae7a6b38SJeff Roberson static int 1074ae7a6b38SJeff Roberson sched_pickcpu(struct td_sched *ts, int flags) 1075ae7a6b38SJeff Roberson { 107662fa74d9SJeff Roberson struct cpu_group *cg; 107762fa74d9SJeff Roberson struct thread *td; 1078ae7a6b38SJeff Roberson struct tdq *tdq; 107962fa74d9SJeff Roberson cpumask_t mask; 10807b8bfa0dSJeff Roberson int self; 10817b8bfa0dSJeff Roberson int pri; 10827b8bfa0dSJeff Roberson int cpu; 10837b8bfa0dSJeff Roberson 108462fa74d9SJeff Roberson self = PCPU_GET(cpuid); 108562fa74d9SJeff Roberson td = ts->ts_thread; 10867b8bfa0dSJeff Roberson if (smp_started == 0) 10877b8bfa0dSJeff Roberson return (self); 108828994a58SJeff Roberson /* 108928994a58SJeff Roberson * Don't migrate a running thread from sched_switch(). 109028994a58SJeff Roberson */ 109162fa74d9SJeff Roberson if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td)) 109262fa74d9SJeff Roberson return (ts->ts_cpu); 10937b8bfa0dSJeff Roberson /* 109462fa74d9SJeff Roberson * Prefer to run interrupt threads on the processors that generate 109562fa74d9SJeff Roberson * the interrupt. 10967b8bfa0dSJeff Roberson */ 109762fa74d9SJeff Roberson if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_SCHED(td, self) && 109862fa74d9SJeff Roberson curthread->td_intr_nesting_level) 109962fa74d9SJeff Roberson ts->ts_cpu = self; 110062fa74d9SJeff Roberson /* 110162fa74d9SJeff Roberson * If the thread can run on the last cpu and the affinity has not 110262fa74d9SJeff Roberson * expired or it is idle run it there. 110362fa74d9SJeff Roberson */ 110462fa74d9SJeff Roberson pri = td->td_priority; 110562fa74d9SJeff Roberson tdq = TDQ_CPU(ts->ts_cpu); 110662fa74d9SJeff Roberson if (THREAD_CAN_SCHED(td, ts->ts_cpu)) { 110762fa74d9SJeff Roberson if (tdq->tdq_lowpri > PRI_MIN_IDLE) 110862fa74d9SJeff Roberson return (ts->ts_cpu); 110962fa74d9SJeff Roberson if (SCHED_AFFINITY(ts, CG_SHARE_L2) && tdq->tdq_lowpri > pri) 11107b8bfa0dSJeff Roberson return (ts->ts_cpu); 11117b8bfa0dSJeff Roberson } 11127b8bfa0dSJeff Roberson /* 111362fa74d9SJeff Roberson * Search for the highest level in the tree that still has affinity. 11147b8bfa0dSJeff Roberson */ 111562fa74d9SJeff Roberson cg = NULL; 111662fa74d9SJeff Roberson for (cg = tdq->tdq_cg; cg != NULL; cg = cg->cg_parent) 111762fa74d9SJeff Roberson if (SCHED_AFFINITY(ts, cg->cg_level)) 111862fa74d9SJeff Roberson break; 111962fa74d9SJeff Roberson cpu = -1; 112062fa74d9SJeff Roberson mask = td->td_cpuset->cs_mask.__bits[0]; 112162fa74d9SJeff Roberson if (cg) 112262fa74d9SJeff Roberson cpu = sched_lowest(cg, mask, pri); 112362fa74d9SJeff Roberson if (cpu == -1) 112462fa74d9SJeff Roberson cpu = sched_lowest(cpu_top, mask, -1); 112562fa74d9SJeff Roberson /* 112662fa74d9SJeff Roberson * Compare the lowest loaded cpu to current cpu. 112762fa74d9SJeff Roberson */ 112862fa74d9SJeff Roberson if (THREAD_CAN_SCHED(td, self) && 112962fa74d9SJeff Roberson TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE) { 113062fa74d9SJeff Roberson if (tryself && TDQ_CPU(self)->tdq_lowpri > pri) 113162fa74d9SJeff Roberson cpu = self; 113262fa74d9SJeff Roberson else if (oldtryself && curthread->td_priority > pri) 113362fa74d9SJeff Roberson cpu = self; 11347b8bfa0dSJeff Roberson } 113562fa74d9SJeff Roberson if (cpu == -1) { 113662fa74d9SJeff Roberson panic("cpu == -1, mask 0x%X cpu top %p", mask, cpu_top); 11377b8bfa0dSJeff Roberson } 1138ae7a6b38SJeff Roberson return (cpu); 113980f86c9fSJeff Roberson } 114062fa74d9SJeff Roberson #endif 114122bf7d9aSJeff Roberson 114222bf7d9aSJeff Roberson /* 114322bf7d9aSJeff Roberson * Pick the highest priority task we have and return it. 11440c0a98b2SJeff Roberson */ 1145ad1e7d28SJulian Elischer static struct td_sched * 1146ad1e7d28SJulian Elischer tdq_choose(struct tdq *tdq) 11475d7ef00cSJeff Roberson { 1148ad1e7d28SJulian Elischer struct td_sched *ts; 11495d7ef00cSJeff Roberson 1150ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 1151e7d50326SJeff Roberson ts = runq_choose(&tdq->tdq_realtime); 1152dda713dfSJeff Roberson if (ts != NULL) 1153e7d50326SJeff Roberson return (ts); 11543f872f85SJeff Roberson ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx); 1155e7d50326SJeff Roberson if (ts != NULL) { 1156dda713dfSJeff Roberson KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE, 1157e7d50326SJeff Roberson ("tdq_choose: Invalid priority on timeshare queue %d", 1158e7d50326SJeff Roberson ts->ts_thread->td_priority)); 1159ad1e7d28SJulian Elischer return (ts); 116015dc847eSJeff Roberson } 116115dc847eSJeff Roberson 1162e7d50326SJeff Roberson ts = runq_choose(&tdq->tdq_idle); 1163e7d50326SJeff Roberson if (ts != NULL) { 1164e7d50326SJeff Roberson KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE, 1165e7d50326SJeff Roberson ("tdq_choose: Invalid priority on idle queue %d", 1166e7d50326SJeff Roberson ts->ts_thread->td_priority)); 1167e7d50326SJeff Roberson return (ts); 1168e7d50326SJeff Roberson } 1169e7d50326SJeff Roberson 1170e7d50326SJeff Roberson return (NULL); 1171245f3abfSJeff Roberson } 11720a016a05SJeff Roberson 1173ae7a6b38SJeff Roberson /* 1174ae7a6b38SJeff Roberson * Initialize a thread queue. 1175ae7a6b38SJeff Roberson */ 11760a016a05SJeff Roberson static void 1177ad1e7d28SJulian Elischer tdq_setup(struct tdq *tdq) 11780a016a05SJeff Roberson { 1179ae7a6b38SJeff Roberson 1180c47f202bSJeff Roberson if (bootverbose) 1181c47f202bSJeff Roberson printf("ULE: setup cpu %d\n", TDQ_ID(tdq)); 1182e7d50326SJeff Roberson runq_init(&tdq->tdq_realtime); 1183e7d50326SJeff Roberson runq_init(&tdq->tdq_timeshare); 1184d2ad694cSJeff Roberson runq_init(&tdq->tdq_idle); 118562fa74d9SJeff Roberson snprintf(tdq->tdq_name, sizeof(tdq->tdq_name), 118662fa74d9SJeff Roberson "sched lock %d", (int)TDQ_ID(tdq)); 118762fa74d9SJeff Roberson mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock", 118862fa74d9SJeff Roberson MTX_SPIN | MTX_RECURSE); 11890a016a05SJeff Roberson } 11900a016a05SJeff Roberson 1191c47f202bSJeff Roberson #ifdef SMP 1192c47f202bSJeff Roberson static void 1193c47f202bSJeff Roberson sched_setup_smp(void) 1194c47f202bSJeff Roberson { 1195c47f202bSJeff Roberson struct tdq *tdq; 1196c47f202bSJeff Roberson int i; 1197c47f202bSJeff Roberson 119862fa74d9SJeff Roberson cpu_top = smp_topo(); 119962fa74d9SJeff Roberson for (i = 0; i < MAXCPU; i++) { 1200c47f202bSJeff Roberson if (CPU_ABSENT(i)) 1201c47f202bSJeff Roberson continue; 120262fa74d9SJeff Roberson tdq = TDQ_CPU(i); 1203c47f202bSJeff Roberson tdq_setup(tdq); 120462fa74d9SJeff Roberson tdq->tdq_cg = smp_topo_find(cpu_top, i); 120562fa74d9SJeff Roberson if (tdq->tdq_cg == NULL) 120662fa74d9SJeff Roberson panic("Can't find cpu group for %d\n", i); 1207c47f202bSJeff Roberson } 120862fa74d9SJeff Roberson balance_tdq = TDQ_SELF(); 120962fa74d9SJeff Roberson sched_balance(); 1210c47f202bSJeff Roberson } 1211c47f202bSJeff Roberson #endif 1212c47f202bSJeff Roberson 1213ae7a6b38SJeff Roberson /* 1214ae7a6b38SJeff Roberson * Setup the thread queues and initialize the topology based on MD 1215ae7a6b38SJeff Roberson * information. 1216ae7a6b38SJeff Roberson */ 121735e6168fSJeff Roberson static void 121835e6168fSJeff Roberson sched_setup(void *dummy) 121935e6168fSJeff Roberson { 1220ae7a6b38SJeff Roberson struct tdq *tdq; 1221c47f202bSJeff Roberson 1222c47f202bSJeff Roberson tdq = TDQ_SELF(); 12230ec896fdSJeff Roberson #ifdef SMP 1224c47f202bSJeff Roberson sched_setup_smp(); 1225749d01b0SJeff Roberson #else 1226c47f202bSJeff Roberson tdq_setup(tdq); 1227356500a3SJeff Roberson #endif 1228ae7a6b38SJeff Roberson /* 1229ae7a6b38SJeff Roberson * To avoid divide-by-zero, we set realstathz a dummy value 1230ae7a6b38SJeff Roberson * in case which sched_clock() called before sched_initticks(). 1231ae7a6b38SJeff Roberson */ 1232ae7a6b38SJeff Roberson realstathz = hz; 1233ae7a6b38SJeff Roberson sched_slice = (realstathz/10); /* ~100ms */ 1234ae7a6b38SJeff Roberson tickincr = 1 << SCHED_TICK_SHIFT; 1235ae7a6b38SJeff Roberson 1236ae7a6b38SJeff Roberson /* Add thread0's load since it's running. */ 1237ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1238c47f202bSJeff Roberson thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF()); 1239ae7a6b38SJeff Roberson tdq_load_add(tdq, &td_sched0); 124062fa74d9SJeff Roberson tdq->tdq_lowpri = thread0.td_priority; 1241ae7a6b38SJeff Roberson TDQ_UNLOCK(tdq); 124235e6168fSJeff Roberson } 124335e6168fSJeff Roberson 1244ae7a6b38SJeff Roberson /* 1245ae7a6b38SJeff Roberson * This routine determines the tickincr after stathz and hz are setup. 1246ae7a6b38SJeff Roberson */ 1247a1d4fe69SDavid Xu /* ARGSUSED */ 1248a1d4fe69SDavid Xu static void 1249a1d4fe69SDavid Xu sched_initticks(void *dummy) 1250a1d4fe69SDavid Xu { 1251ae7a6b38SJeff Roberson int incr; 1252ae7a6b38SJeff Roberson 1253a1d4fe69SDavid Xu realstathz = stathz ? stathz : hz; 125414618990SJeff Roberson sched_slice = (realstathz/10); /* ~100ms */ 1255a1d4fe69SDavid Xu 1256a1d4fe69SDavid Xu /* 1257e7d50326SJeff Roberson * tickincr is shifted out by 10 to avoid rounding errors due to 12583f872f85SJeff Roberson * hz not being evenly divisible by stathz on all platforms. 1259e7d50326SJeff Roberson */ 1260ae7a6b38SJeff Roberson incr = (hz << SCHED_TICK_SHIFT) / realstathz; 1261e7d50326SJeff Roberson /* 1262e7d50326SJeff Roberson * This does not work for values of stathz that are more than 1263e7d50326SJeff Roberson * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen. 1264a1d4fe69SDavid Xu */ 1265ae7a6b38SJeff Roberson if (incr == 0) 1266ae7a6b38SJeff Roberson incr = 1; 1267ae7a6b38SJeff Roberson tickincr = incr; 12687b8bfa0dSJeff Roberson #ifdef SMP 12699862717aSJeff Roberson /* 12707fcf154aSJeff Roberson * Set the default balance interval now that we know 12717fcf154aSJeff Roberson * what realstathz is. 12727fcf154aSJeff Roberson */ 12737fcf154aSJeff Roberson balance_interval = realstathz; 12747fcf154aSJeff Roberson /* 12759862717aSJeff Roberson * Set steal thresh to log2(mp_ncpu) but no greater than 4. This 12769862717aSJeff Roberson * prevents excess thrashing on large machines and excess idle on 12779862717aSJeff Roberson * smaller machines. 12789862717aSJeff Roberson */ 127962fa74d9SJeff Roberson steal_thresh = min(ffs(mp_ncpus) - 1, 3); 12807b8bfa0dSJeff Roberson affinity = SCHED_AFFINITY_DEFAULT; 12817b8bfa0dSJeff Roberson #endif 1282a1d4fe69SDavid Xu } 1283a1d4fe69SDavid Xu 1284a1d4fe69SDavid Xu 128535e6168fSJeff Roberson /* 1286ae7a6b38SJeff Roberson * This is the core of the interactivity algorithm. Determines a score based 1287ae7a6b38SJeff Roberson * on past behavior. It is the ratio of sleep time to run time scaled to 1288ae7a6b38SJeff Roberson * a [0, 100] integer. This is the voluntary sleep time of a process, which 1289ae7a6b38SJeff Roberson * differs from the cpu usage because it does not account for time spent 1290ae7a6b38SJeff Roberson * waiting on a run-queue. Would be prettier if we had floating point. 1291ae7a6b38SJeff Roberson */ 1292ae7a6b38SJeff Roberson static int 1293ae7a6b38SJeff Roberson sched_interact_score(struct thread *td) 1294ae7a6b38SJeff Roberson { 1295ae7a6b38SJeff Roberson struct td_sched *ts; 1296ae7a6b38SJeff Roberson int div; 1297ae7a6b38SJeff Roberson 1298ae7a6b38SJeff Roberson ts = td->td_sched; 1299ae7a6b38SJeff Roberson /* 1300ae7a6b38SJeff Roberson * The score is only needed if this is likely to be an interactive 1301ae7a6b38SJeff Roberson * task. Don't go through the expense of computing it if there's 1302ae7a6b38SJeff Roberson * no chance. 1303ae7a6b38SJeff Roberson */ 1304ae7a6b38SJeff Roberson if (sched_interact <= SCHED_INTERACT_HALF && 1305ae7a6b38SJeff Roberson ts->ts_runtime >= ts->ts_slptime) 1306ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF); 1307ae7a6b38SJeff Roberson 1308ae7a6b38SJeff Roberson if (ts->ts_runtime > ts->ts_slptime) { 1309ae7a6b38SJeff Roberson div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF); 1310ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF + 1311ae7a6b38SJeff Roberson (SCHED_INTERACT_HALF - (ts->ts_slptime / div))); 1312ae7a6b38SJeff Roberson } 1313ae7a6b38SJeff Roberson if (ts->ts_slptime > ts->ts_runtime) { 1314ae7a6b38SJeff Roberson div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF); 1315ae7a6b38SJeff Roberson return (ts->ts_runtime / div); 1316ae7a6b38SJeff Roberson } 1317ae7a6b38SJeff Roberson /* runtime == slptime */ 1318ae7a6b38SJeff Roberson if (ts->ts_runtime) 1319ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF); 1320ae7a6b38SJeff Roberson 1321ae7a6b38SJeff Roberson /* 1322ae7a6b38SJeff Roberson * This can happen if slptime and runtime are 0. 1323ae7a6b38SJeff Roberson */ 1324ae7a6b38SJeff Roberson return (0); 1325ae7a6b38SJeff Roberson 1326ae7a6b38SJeff Roberson } 1327ae7a6b38SJeff Roberson 1328ae7a6b38SJeff Roberson /* 132935e6168fSJeff Roberson * Scale the scheduling priority according to the "interactivity" of this 133035e6168fSJeff Roberson * process. 133135e6168fSJeff Roberson */ 133215dc847eSJeff Roberson static void 13338460a577SJohn Birrell sched_priority(struct thread *td) 133435e6168fSJeff Roberson { 1335e7d50326SJeff Roberson int score; 133635e6168fSJeff Roberson int pri; 133735e6168fSJeff Roberson 13388460a577SJohn Birrell if (td->td_pri_class != PRI_TIMESHARE) 133915dc847eSJeff Roberson return; 1340e7d50326SJeff Roberson /* 1341e7d50326SJeff Roberson * If the score is interactive we place the thread in the realtime 1342e7d50326SJeff Roberson * queue with a priority that is less than kernel and interrupt 1343e7d50326SJeff Roberson * priorities. These threads are not subject to nice restrictions. 1344e7d50326SJeff Roberson * 1345ae7a6b38SJeff Roberson * Scores greater than this are placed on the normal timeshare queue 1346e7d50326SJeff Roberson * where the priority is partially decided by the most recent cpu 1347e7d50326SJeff Roberson * utilization and the rest is decided by nice value. 1348a5423ea3SJeff Roberson * 1349a5423ea3SJeff Roberson * The nice value of the process has a linear effect on the calculated 1350a5423ea3SJeff Roberson * score. Negative nice values make it easier for a thread to be 1351a5423ea3SJeff Roberson * considered interactive. 1352e7d50326SJeff Roberson */ 1353e270652bSJeff Roberson score = imax(0, sched_interact_score(td) - td->td_proc->p_nice); 1354e7d50326SJeff Roberson if (score < sched_interact) { 1355e7d50326SJeff Roberson pri = PRI_MIN_REALTIME; 1356e7d50326SJeff Roberson pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact) 1357e7d50326SJeff Roberson * score; 1358e7d50326SJeff Roberson KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME, 13599a93305aSJeff Roberson ("sched_priority: invalid interactive priority %d score %d", 13609a93305aSJeff Roberson pri, score)); 1361e7d50326SJeff Roberson } else { 1362e7d50326SJeff Roberson pri = SCHED_PRI_MIN; 1363e7d50326SJeff Roberson if (td->td_sched->ts_ticks) 1364e7d50326SJeff Roberson pri += SCHED_PRI_TICKS(td->td_sched); 1365e7d50326SJeff Roberson pri += SCHED_PRI_NICE(td->td_proc->p_nice); 1366ae7a6b38SJeff Roberson KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE, 1367ae7a6b38SJeff Roberson ("sched_priority: invalid priority %d: nice %d, " 1368ae7a6b38SJeff Roberson "ticks %d ftick %d ltick %d tick pri %d", 1369ae7a6b38SJeff Roberson pri, td->td_proc->p_nice, td->td_sched->ts_ticks, 1370ae7a6b38SJeff Roberson td->td_sched->ts_ftick, td->td_sched->ts_ltick, 1371ae7a6b38SJeff Roberson SCHED_PRI_TICKS(td->td_sched))); 1372e7d50326SJeff Roberson } 13738460a577SJohn Birrell sched_user_prio(td, pri); 137435e6168fSJeff Roberson 137515dc847eSJeff Roberson return; 137635e6168fSJeff Roberson } 137735e6168fSJeff Roberson 137835e6168fSJeff Roberson /* 1379d322132cSJeff Roberson * This routine enforces a maximum limit on the amount of scheduling history 1380ae7a6b38SJeff Roberson * kept. It is called after either the slptime or runtime is adjusted. This 1381ae7a6b38SJeff Roberson * function is ugly due to integer math. 1382d322132cSJeff Roberson */ 13834b60e324SJeff Roberson static void 13848460a577SJohn Birrell sched_interact_update(struct thread *td) 13854b60e324SJeff Roberson { 1386155b6ca1SJeff Roberson struct td_sched *ts; 13879a93305aSJeff Roberson u_int sum; 13883f741ca1SJeff Roberson 1389155b6ca1SJeff Roberson ts = td->td_sched; 1390ae7a6b38SJeff Roberson sum = ts->ts_runtime + ts->ts_slptime; 1391d322132cSJeff Roberson if (sum < SCHED_SLP_RUN_MAX) 1392d322132cSJeff Roberson return; 1393d322132cSJeff Roberson /* 1394155b6ca1SJeff Roberson * This only happens from two places: 1395155b6ca1SJeff Roberson * 1) We have added an unusual amount of run time from fork_exit. 1396155b6ca1SJeff Roberson * 2) We have added an unusual amount of sleep time from sched_sleep(). 1397155b6ca1SJeff Roberson */ 1398155b6ca1SJeff Roberson if (sum > SCHED_SLP_RUN_MAX * 2) { 1399ae7a6b38SJeff Roberson if (ts->ts_runtime > ts->ts_slptime) { 1400ae7a6b38SJeff Roberson ts->ts_runtime = SCHED_SLP_RUN_MAX; 1401ae7a6b38SJeff Roberson ts->ts_slptime = 1; 1402155b6ca1SJeff Roberson } else { 1403ae7a6b38SJeff Roberson ts->ts_slptime = SCHED_SLP_RUN_MAX; 1404ae7a6b38SJeff Roberson ts->ts_runtime = 1; 1405155b6ca1SJeff Roberson } 1406155b6ca1SJeff Roberson return; 1407155b6ca1SJeff Roberson } 1408155b6ca1SJeff Roberson /* 1409d322132cSJeff Roberson * If we have exceeded by more than 1/5th then the algorithm below 1410d322132cSJeff Roberson * will not bring us back into range. Dividing by two here forces 14112454aaf5SJeff Roberson * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1412d322132cSJeff Roberson */ 141337a35e4aSJeff Roberson if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1414ae7a6b38SJeff Roberson ts->ts_runtime /= 2; 1415ae7a6b38SJeff Roberson ts->ts_slptime /= 2; 1416d322132cSJeff Roberson return; 1417d322132cSJeff Roberson } 1418ae7a6b38SJeff Roberson ts->ts_runtime = (ts->ts_runtime / 5) * 4; 1419ae7a6b38SJeff Roberson ts->ts_slptime = (ts->ts_slptime / 5) * 4; 1420d322132cSJeff Roberson } 1421d322132cSJeff Roberson 1422ae7a6b38SJeff Roberson /* 1423ae7a6b38SJeff Roberson * Scale back the interactivity history when a child thread is created. The 1424ae7a6b38SJeff Roberson * history is inherited from the parent but the thread may behave totally 1425ae7a6b38SJeff Roberson * differently. For example, a shell spawning a compiler process. We want 1426ae7a6b38SJeff Roberson * to learn that the compiler is behaving badly very quickly. 1427ae7a6b38SJeff Roberson */ 1428d322132cSJeff Roberson static void 14298460a577SJohn Birrell sched_interact_fork(struct thread *td) 1430d322132cSJeff Roberson { 1431d322132cSJeff Roberson int ratio; 1432d322132cSJeff Roberson int sum; 1433d322132cSJeff Roberson 1434ae7a6b38SJeff Roberson sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime; 1435d322132cSJeff Roberson if (sum > SCHED_SLP_RUN_FORK) { 1436d322132cSJeff Roberson ratio = sum / SCHED_SLP_RUN_FORK; 1437ae7a6b38SJeff Roberson td->td_sched->ts_runtime /= ratio; 1438ae7a6b38SJeff Roberson td->td_sched->ts_slptime /= ratio; 14394b60e324SJeff Roberson } 14404b60e324SJeff Roberson } 14414b60e324SJeff Roberson 144215dc847eSJeff Roberson /* 1443ae7a6b38SJeff Roberson * Called from proc0_init() to setup the scheduler fields. 1444ed062c8dSJulian Elischer */ 1445ed062c8dSJulian Elischer void 1446ed062c8dSJulian Elischer schedinit(void) 1447ed062c8dSJulian Elischer { 1448e7d50326SJeff Roberson 1449ed062c8dSJulian Elischer /* 1450ed062c8dSJulian Elischer * Set up the scheduler specific parts of proc0. 1451ed062c8dSJulian Elischer */ 1452ed062c8dSJulian Elischer proc0.p_sched = NULL; /* XXX */ 1453ad1e7d28SJulian Elischer thread0.td_sched = &td_sched0; 1454e7d50326SJeff Roberson td_sched0.ts_ltick = ticks; 14558ab80cf0SJeff Roberson td_sched0.ts_ftick = ticks; 1456ad1e7d28SJulian Elischer td_sched0.ts_thread = &thread0; 1457ed062c8dSJulian Elischer } 1458ed062c8dSJulian Elischer 1459ed062c8dSJulian Elischer /* 146015dc847eSJeff Roberson * This is only somewhat accurate since given many processes of the same 146115dc847eSJeff Roberson * priority they will switch when their slices run out, which will be 1462e7d50326SJeff Roberson * at most sched_slice stathz ticks. 146315dc847eSJeff Roberson */ 146435e6168fSJeff Roberson int 146535e6168fSJeff Roberson sched_rr_interval(void) 146635e6168fSJeff Roberson { 1467e7d50326SJeff Roberson 1468e7d50326SJeff Roberson /* Convert sched_slice to hz */ 1469e7d50326SJeff Roberson return (hz/(realstathz/sched_slice)); 147035e6168fSJeff Roberson } 147135e6168fSJeff Roberson 1472ae7a6b38SJeff Roberson /* 1473ae7a6b38SJeff Roberson * Update the percent cpu tracking information when it is requested or 1474ae7a6b38SJeff Roberson * the total history exceeds the maximum. We keep a sliding history of 1475ae7a6b38SJeff Roberson * tick counts that slowly decays. This is less precise than the 4BSD 1476ae7a6b38SJeff Roberson * mechanism since it happens with less regular and frequent events. 1477ae7a6b38SJeff Roberson */ 147822bf7d9aSJeff Roberson static void 1479ad1e7d28SJulian Elischer sched_pctcpu_update(struct td_sched *ts) 148035e6168fSJeff Roberson { 1481e7d50326SJeff Roberson 1482e7d50326SJeff Roberson if (ts->ts_ticks == 0) 1483e7d50326SJeff Roberson return; 14848ab80cf0SJeff Roberson if (ticks - (hz / 10) < ts->ts_ltick && 14858ab80cf0SJeff Roberson SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX) 14868ab80cf0SJeff Roberson return; 148735e6168fSJeff Roberson /* 148835e6168fSJeff Roberson * Adjust counters and watermark for pctcpu calc. 1489210491d3SJeff Roberson */ 1490e7d50326SJeff Roberson if (ts->ts_ltick > ticks - SCHED_TICK_TARG) 1491ad1e7d28SJulian Elischer ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) * 1492e7d50326SJeff Roberson SCHED_TICK_TARG; 1493e7d50326SJeff Roberson else 1494ad1e7d28SJulian Elischer ts->ts_ticks = 0; 1495ad1e7d28SJulian Elischer ts->ts_ltick = ticks; 1496e7d50326SJeff Roberson ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG; 149735e6168fSJeff Roberson } 149835e6168fSJeff Roberson 1499ae7a6b38SJeff Roberson /* 1500ae7a6b38SJeff Roberson * Adjust the priority of a thread. Move it to the appropriate run-queue 1501ae7a6b38SJeff Roberson * if necessary. This is the back-end for several priority related 1502ae7a6b38SJeff Roberson * functions. 1503ae7a6b38SJeff Roberson */ 1504e7d50326SJeff Roberson static void 1505f5c157d9SJohn Baldwin sched_thread_priority(struct thread *td, u_char prio) 150635e6168fSJeff Roberson { 1507ad1e7d28SJulian Elischer struct td_sched *ts; 150835e6168fSJeff Roberson 150981d47d3fSJeff Roberson CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", 1510431f8906SJulian Elischer td, td->td_name, td->td_priority, prio, curthread, 1511431f8906SJulian Elischer curthread->td_name); 1512ad1e7d28SJulian Elischer ts = td->td_sched; 15137b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1514f5c157d9SJohn Baldwin if (td->td_priority == prio) 1515f5c157d9SJohn Baldwin return; 1516e7d50326SJeff Roberson 15173f872f85SJeff Roberson if (TD_ON_RUNQ(td) && prio < td->td_priority) { 15183f741ca1SJeff Roberson /* 15193f741ca1SJeff Roberson * If the priority has been elevated due to priority 15203f741ca1SJeff Roberson * propagation, we may have to move ourselves to a new 1521e7d50326SJeff Roberson * queue. This could be optimized to not re-add in some 1522e7d50326SJeff Roberson * cases. 1523f2b74cbfSJeff Roberson */ 1524e7d50326SJeff Roberson sched_rem(td); 1525e7d50326SJeff Roberson td->td_priority = prio; 1526ae7a6b38SJeff Roberson sched_add(td, SRQ_BORROWING); 1527317da705SJeff Roberson } else if (TD_IS_RUNNING(td)) { 1528ae7a6b38SJeff Roberson struct tdq *tdq; 152962fa74d9SJeff Roberson int oldpri; 1530ae7a6b38SJeff Roberson 1531ae7a6b38SJeff Roberson tdq = TDQ_CPU(ts->ts_cpu); 153262fa74d9SJeff Roberson oldpri = td->td_priority; 15333f741ca1SJeff Roberson td->td_priority = prio; 153462fa74d9SJeff Roberson if (prio < tdq->tdq_lowpri) 153562fa74d9SJeff Roberson tdq->tdq_lowpri = prio; 153662fa74d9SJeff Roberson else if (tdq->tdq_lowpri == oldpri) 153762fa74d9SJeff Roberson tdq_setlowpri(tdq, td); 1538317da705SJeff Roberson } else 1539317da705SJeff Roberson td->td_priority = prio; 1540ae7a6b38SJeff Roberson } 154135e6168fSJeff Roberson 1542f5c157d9SJohn Baldwin /* 1543f5c157d9SJohn Baldwin * Update a thread's priority when it is lent another thread's 1544f5c157d9SJohn Baldwin * priority. 1545f5c157d9SJohn Baldwin */ 1546f5c157d9SJohn Baldwin void 1547f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio) 1548f5c157d9SJohn Baldwin { 1549f5c157d9SJohn Baldwin 1550f5c157d9SJohn Baldwin td->td_flags |= TDF_BORROWING; 1551f5c157d9SJohn Baldwin sched_thread_priority(td, prio); 1552f5c157d9SJohn Baldwin } 1553f5c157d9SJohn Baldwin 1554f5c157d9SJohn Baldwin /* 1555f5c157d9SJohn Baldwin * Restore a thread's priority when priority propagation is 1556f5c157d9SJohn Baldwin * over. The prio argument is the minimum priority the thread 1557f5c157d9SJohn Baldwin * needs to have to satisfy other possible priority lending 1558f5c157d9SJohn Baldwin * requests. If the thread's regular priority is less 1559f5c157d9SJohn Baldwin * important than prio, the thread will keep a priority boost 1560f5c157d9SJohn Baldwin * of prio. 1561f5c157d9SJohn Baldwin */ 1562f5c157d9SJohn Baldwin void 1563f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio) 1564f5c157d9SJohn Baldwin { 1565f5c157d9SJohn Baldwin u_char base_pri; 1566f5c157d9SJohn Baldwin 1567f5c157d9SJohn Baldwin if (td->td_base_pri >= PRI_MIN_TIMESHARE && 1568f5c157d9SJohn Baldwin td->td_base_pri <= PRI_MAX_TIMESHARE) 15698460a577SJohn Birrell base_pri = td->td_user_pri; 1570f5c157d9SJohn Baldwin else 1571f5c157d9SJohn Baldwin base_pri = td->td_base_pri; 1572f5c157d9SJohn Baldwin if (prio >= base_pri) { 1573f5c157d9SJohn Baldwin td->td_flags &= ~TDF_BORROWING; 1574f5c157d9SJohn Baldwin sched_thread_priority(td, base_pri); 1575f5c157d9SJohn Baldwin } else 1576f5c157d9SJohn Baldwin sched_lend_prio(td, prio); 1577f5c157d9SJohn Baldwin } 1578f5c157d9SJohn Baldwin 1579ae7a6b38SJeff Roberson /* 1580ae7a6b38SJeff Roberson * Standard entry for setting the priority to an absolute value. 1581ae7a6b38SJeff Roberson */ 1582f5c157d9SJohn Baldwin void 1583f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio) 1584f5c157d9SJohn Baldwin { 1585f5c157d9SJohn Baldwin u_char oldprio; 1586f5c157d9SJohn Baldwin 1587f5c157d9SJohn Baldwin /* First, update the base priority. */ 1588f5c157d9SJohn Baldwin td->td_base_pri = prio; 1589f5c157d9SJohn Baldwin 1590f5c157d9SJohn Baldwin /* 159150aaa791SJohn Baldwin * If the thread is borrowing another thread's priority, don't 1592f5c157d9SJohn Baldwin * ever lower the priority. 1593f5c157d9SJohn Baldwin */ 1594f5c157d9SJohn Baldwin if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 1595f5c157d9SJohn Baldwin return; 1596f5c157d9SJohn Baldwin 1597f5c157d9SJohn Baldwin /* Change the real priority. */ 1598f5c157d9SJohn Baldwin oldprio = td->td_priority; 1599f5c157d9SJohn Baldwin sched_thread_priority(td, prio); 1600f5c157d9SJohn Baldwin 1601f5c157d9SJohn Baldwin /* 1602f5c157d9SJohn Baldwin * If the thread is on a turnstile, then let the turnstile update 1603f5c157d9SJohn Baldwin * its state. 1604f5c157d9SJohn Baldwin */ 1605f5c157d9SJohn Baldwin if (TD_ON_LOCK(td) && oldprio != prio) 1606f5c157d9SJohn Baldwin turnstile_adjust(td, oldprio); 1607f5c157d9SJohn Baldwin } 1608f5c157d9SJohn Baldwin 1609ae7a6b38SJeff Roberson /* 1610ae7a6b38SJeff Roberson * Set the base user priority, does not effect current running priority. 1611ae7a6b38SJeff Roberson */ 161235e6168fSJeff Roberson void 16138460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio) 16143db720fdSDavid Xu { 16153db720fdSDavid Xu u_char oldprio; 16163db720fdSDavid Xu 16178460a577SJohn Birrell td->td_base_user_pri = prio; 1618fc6c30f6SJulian Elischer if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio) 1619fc6c30f6SJulian Elischer return; 16208460a577SJohn Birrell oldprio = td->td_user_pri; 16218460a577SJohn Birrell td->td_user_pri = prio; 16223db720fdSDavid Xu } 16233db720fdSDavid Xu 16243db720fdSDavid Xu void 16253db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio) 16263db720fdSDavid Xu { 16273db720fdSDavid Xu u_char oldprio; 16283db720fdSDavid Xu 1629435806d3SDavid Xu THREAD_LOCK_ASSERT(td, MA_OWNED); 16303db720fdSDavid Xu td->td_flags |= TDF_UBORROWING; 1631f645b5daSMaxim Konovalov oldprio = td->td_user_pri; 16328460a577SJohn Birrell td->td_user_pri = prio; 16333db720fdSDavid Xu } 16343db720fdSDavid Xu 16353db720fdSDavid Xu void 16363db720fdSDavid Xu sched_unlend_user_prio(struct thread *td, u_char prio) 16373db720fdSDavid Xu { 16383db720fdSDavid Xu u_char base_pri; 16393db720fdSDavid Xu 1640435806d3SDavid Xu THREAD_LOCK_ASSERT(td, MA_OWNED); 16418460a577SJohn Birrell base_pri = td->td_base_user_pri; 16423db720fdSDavid Xu if (prio >= base_pri) { 16433db720fdSDavid Xu td->td_flags &= ~TDF_UBORROWING; 16448460a577SJohn Birrell sched_user_prio(td, base_pri); 1645435806d3SDavid Xu } else { 16463db720fdSDavid Xu sched_lend_user_prio(td, prio); 16473db720fdSDavid Xu } 1648435806d3SDavid Xu } 16493db720fdSDavid Xu 1650ae7a6b38SJeff Roberson /* 165108c9a16cSJeff Roberson * Add the thread passed as 'newtd' to the run queue before selecting 165208c9a16cSJeff Roberson * the next thread to run. This is only used for KSE. 165308c9a16cSJeff Roberson */ 165408c9a16cSJeff Roberson static void 165508c9a16cSJeff Roberson sched_switchin(struct tdq *tdq, struct thread *td) 165608c9a16cSJeff Roberson { 165708c9a16cSJeff Roberson #ifdef SMP 165808c9a16cSJeff Roberson spinlock_enter(); 165908c9a16cSJeff Roberson TDQ_UNLOCK(tdq); 166008c9a16cSJeff Roberson thread_lock(td); 166108c9a16cSJeff Roberson spinlock_exit(); 166208c9a16cSJeff Roberson sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING); 166308c9a16cSJeff Roberson #else 166408c9a16cSJeff Roberson td->td_lock = TDQ_LOCKPTR(tdq); 166508c9a16cSJeff Roberson #endif 166608c9a16cSJeff Roberson tdq_add(tdq, td, SRQ_YIELDING); 166708c9a16cSJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 166808c9a16cSJeff Roberson } 166908c9a16cSJeff Roberson 167008c9a16cSJeff Roberson /* 1671731016feSWojciech A. Koszek * Block a thread for switching. Similar to thread_block() but does not 1672731016feSWojciech A. Koszek * bump the spin count. 1673731016feSWojciech A. Koszek */ 1674731016feSWojciech A. Koszek static inline struct mtx * 1675731016feSWojciech A. Koszek thread_block_switch(struct thread *td) 1676731016feSWojciech A. Koszek { 1677731016feSWojciech A. Koszek struct mtx *lock; 1678731016feSWojciech A. Koszek 1679731016feSWojciech A. Koszek THREAD_LOCK_ASSERT(td, MA_OWNED); 1680731016feSWojciech A. Koszek lock = td->td_lock; 1681731016feSWojciech A. Koszek td->td_lock = &blocked_lock; 1682731016feSWojciech A. Koszek mtx_unlock_spin(lock); 1683731016feSWojciech A. Koszek 1684731016feSWojciech A. Koszek return (lock); 1685731016feSWojciech A. Koszek } 1686731016feSWojciech A. Koszek 1687731016feSWojciech A. Koszek /* 1688c47f202bSJeff Roberson * Handle migration from sched_switch(). This happens only for 1689c47f202bSJeff Roberson * cpu binding. 1690c47f202bSJeff Roberson */ 1691c47f202bSJeff Roberson static struct mtx * 1692c47f202bSJeff Roberson sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags) 1693c47f202bSJeff Roberson { 1694c47f202bSJeff Roberson struct tdq *tdn; 1695c47f202bSJeff Roberson 1696c47f202bSJeff Roberson tdn = TDQ_CPU(td->td_sched->ts_cpu); 1697c47f202bSJeff Roberson #ifdef SMP 1698c47f202bSJeff Roberson /* 1699c47f202bSJeff Roberson * Do the lock dance required to avoid LOR. We grab an extra 1700c47f202bSJeff Roberson * spinlock nesting to prevent preemption while we're 1701c47f202bSJeff Roberson * not holding either run-queue lock. 1702c47f202bSJeff Roberson */ 1703c47f202bSJeff Roberson spinlock_enter(); 1704c47f202bSJeff Roberson thread_block_switch(td); /* This releases the lock on tdq. */ 1705c47f202bSJeff Roberson TDQ_LOCK(tdn); 1706c47f202bSJeff Roberson tdq_add(tdn, td, flags); 1707c47f202bSJeff Roberson tdq_notify(td->td_sched); 1708c47f202bSJeff Roberson /* 1709c47f202bSJeff Roberson * After we unlock tdn the new cpu still can't switch into this 1710c47f202bSJeff Roberson * thread until we've unblocked it in cpu_switch(). The lock 1711c47f202bSJeff Roberson * pointers may match in the case of HTT cores. Don't unlock here 1712c47f202bSJeff Roberson * or we can deadlock when the other CPU runs the IPI handler. 1713c47f202bSJeff Roberson */ 1714c47f202bSJeff Roberson if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) { 1715c47f202bSJeff Roberson TDQ_UNLOCK(tdn); 1716c47f202bSJeff Roberson TDQ_LOCK(tdq); 1717c47f202bSJeff Roberson } 1718c47f202bSJeff Roberson spinlock_exit(); 1719c47f202bSJeff Roberson #endif 1720c47f202bSJeff Roberson return (TDQ_LOCKPTR(tdn)); 1721c47f202bSJeff Roberson } 1722c47f202bSJeff Roberson 1723c47f202bSJeff Roberson /* 1724ae7a6b38SJeff Roberson * Release a thread that was blocked with thread_block_switch(). 1725ae7a6b38SJeff Roberson */ 1726ae7a6b38SJeff Roberson static inline void 1727ae7a6b38SJeff Roberson thread_unblock_switch(struct thread *td, struct mtx *mtx) 1728ae7a6b38SJeff Roberson { 1729ae7a6b38SJeff Roberson atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock, 1730ae7a6b38SJeff Roberson (uintptr_t)mtx); 1731ae7a6b38SJeff Roberson } 1732ae7a6b38SJeff Roberson 1733ae7a6b38SJeff Roberson /* 1734ae7a6b38SJeff Roberson * Switch threads. This function has to handle threads coming in while 1735ae7a6b38SJeff Roberson * blocked for some reason, running, or idle. It also must deal with 1736ae7a6b38SJeff Roberson * migrating a thread from one queue to another as running threads may 1737ae7a6b38SJeff Roberson * be assigned elsewhere via binding. 1738ae7a6b38SJeff Roberson */ 17393db720fdSDavid Xu void 17403389af30SJulian Elischer sched_switch(struct thread *td, struct thread *newtd, int flags) 174135e6168fSJeff Roberson { 1742c02bbb43SJeff Roberson struct tdq *tdq; 1743ad1e7d28SJulian Elischer struct td_sched *ts; 1744ae7a6b38SJeff Roberson struct mtx *mtx; 1745c47f202bSJeff Roberson int srqflag; 1746ae7a6b38SJeff Roberson int cpuid; 174735e6168fSJeff Roberson 17487b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 174935e6168fSJeff Roberson 1750ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 1751ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpuid); 1752e7d50326SJeff Roberson ts = td->td_sched; 1753c47f202bSJeff Roberson mtx = td->td_lock; 1754ae7a6b38SJeff Roberson ts->ts_rltick = ticks; 1755060563ecSJulian Elischer td->td_lastcpu = td->td_oncpu; 1756060563ecSJulian Elischer td->td_oncpu = NOCPU; 175752eb8464SJohn Baldwin td->td_flags &= ~TDF_NEEDRESCHED; 175877918643SStephan Uphoff td->td_owepreempt = 0; 1759b11fdad0SJeff Roberson /* 1760ae7a6b38SJeff Roberson * The lock pointer in an idle thread should never change. Reset it 1761ae7a6b38SJeff Roberson * to CAN_RUN as well. 1762b11fdad0SJeff Roberson */ 1763486a9414SJulian Elischer if (TD_IS_IDLETHREAD(td)) { 1764ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1765bf0acc27SJohn Baldwin TD_SET_CAN_RUN(td); 17667b20fb19SJeff Roberson } else if (TD_IS_RUNNING(td)) { 1767ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 17687b20fb19SJeff Roberson tdq_load_rem(tdq, ts); 1769c47f202bSJeff Roberson srqflag = (flags & SW_PREEMPT) ? 1770598b368dSJeff Roberson SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 1771c47f202bSJeff Roberson SRQ_OURSELF|SRQ_YIELDING; 1772c47f202bSJeff Roberson if (ts->ts_cpu == cpuid) 1773c47f202bSJeff Roberson tdq_add(tdq, td, srqflag); 1774c47f202bSJeff Roberson else 1775c47f202bSJeff Roberson mtx = sched_switch_migrate(tdq, td, srqflag); 1776ae7a6b38SJeff Roberson } else { 1777ae7a6b38SJeff Roberson /* This thread must be going to sleep. */ 1778ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1779ae7a6b38SJeff Roberson mtx = thread_block_switch(td); 1780ae7a6b38SJeff Roberson tdq_load_rem(tdq, ts); 1781ae7a6b38SJeff Roberson } 1782ae7a6b38SJeff Roberson /* 1783ae7a6b38SJeff Roberson * We enter here with the thread blocked and assigned to the 1784ae7a6b38SJeff Roberson * appropriate cpu run-queue or sleep-queue and with the current 1785ae7a6b38SJeff Roberson * thread-queue locked. 1786ae7a6b38SJeff Roberson */ 1787ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 1788ae7a6b38SJeff Roberson /* 178908c9a16cSJeff Roberson * If KSE assigned a new thread just add it here and let choosethread 179008c9a16cSJeff Roberson * select the best one. 1791ae7a6b38SJeff Roberson */ 179208c9a16cSJeff Roberson if (newtd != NULL) 179308c9a16cSJeff Roberson sched_switchin(tdq, newtd); 17942454aaf5SJeff Roberson newtd = choosethread(); 1795ae7a6b38SJeff Roberson /* 1796ae7a6b38SJeff Roberson * Call the MD code to switch contexts if necessary. 1797ae7a6b38SJeff Roberson */ 1798ebccf1e3SJoseph Koshy if (td != newtd) { 1799ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 1800ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1801ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 1802ebccf1e3SJoseph Koshy #endif 1803eea4f254SJeff Roberson lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object); 180459c68134SJeff Roberson TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 1805ae7a6b38SJeff Roberson cpu_switch(td, newtd, mtx); 1806ae7a6b38SJeff Roberson /* 1807ae7a6b38SJeff Roberson * We may return from cpu_switch on a different cpu. However, 1808ae7a6b38SJeff Roberson * we always return with td_lock pointing to the current cpu's 1809ae7a6b38SJeff Roberson * run queue lock. 1810ae7a6b38SJeff Roberson */ 1811ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 1812ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpuid); 1813eea4f254SJeff Roberson lock_profile_obtain_lock_success( 1814eea4f254SJeff Roberson &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__); 1815ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 1816ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1817ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1818ebccf1e3SJoseph Koshy #endif 1819ae7a6b38SJeff Roberson } else 1820ae7a6b38SJeff Roberson thread_unblock_switch(td, mtx); 1821ae7a6b38SJeff Roberson /* 182262fa74d9SJeff Roberson * We should always get here with the lowest priority td possible. 182362fa74d9SJeff Roberson */ 182462fa74d9SJeff Roberson tdq->tdq_lowpri = td->td_priority; 182562fa74d9SJeff Roberson /* 1826ae7a6b38SJeff Roberson * Assert that all went well and return. 1827ae7a6b38SJeff Roberson */ 1828ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED); 1829ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1830ae7a6b38SJeff Roberson td->td_oncpu = cpuid; 183135e6168fSJeff Roberson } 183235e6168fSJeff Roberson 1833ae7a6b38SJeff Roberson /* 1834ae7a6b38SJeff Roberson * Adjust thread priorities as a result of a nice request. 1835ae7a6b38SJeff Roberson */ 183635e6168fSJeff Roberson void 1837fa885116SJulian Elischer sched_nice(struct proc *p, int nice) 183835e6168fSJeff Roberson { 183935e6168fSJeff Roberson struct thread *td; 184035e6168fSJeff Roberson 1841fa885116SJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 18427b20fb19SJeff Roberson PROC_SLOCK_ASSERT(p, MA_OWNED); 1843e7d50326SJeff Roberson 1844fa885116SJulian Elischer p->p_nice = nice; 18458460a577SJohn Birrell FOREACH_THREAD_IN_PROC(p, td) { 18467b20fb19SJeff Roberson thread_lock(td); 18478460a577SJohn Birrell sched_priority(td); 1848e7d50326SJeff Roberson sched_prio(td, td->td_base_user_pri); 18497b20fb19SJeff Roberson thread_unlock(td); 185035e6168fSJeff Roberson } 1851fa885116SJulian Elischer } 185235e6168fSJeff Roberson 1853ae7a6b38SJeff Roberson /* 1854ae7a6b38SJeff Roberson * Record the sleep time for the interactivity scorer. 1855ae7a6b38SJeff Roberson */ 185635e6168fSJeff Roberson void 185744f3b092SJohn Baldwin sched_sleep(struct thread *td) 185835e6168fSJeff Roberson { 1859e7d50326SJeff Roberson 18607b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 186135e6168fSJeff Roberson 186254b0e65fSJeff Roberson td->td_slptick = ticks; 186335e6168fSJeff Roberson } 186435e6168fSJeff Roberson 1865ae7a6b38SJeff Roberson /* 1866ae7a6b38SJeff Roberson * Schedule a thread to resume execution and record how long it voluntarily 1867ae7a6b38SJeff Roberson * slept. We also update the pctcpu, interactivity, and priority. 1868ae7a6b38SJeff Roberson */ 186935e6168fSJeff Roberson void 187035e6168fSJeff Roberson sched_wakeup(struct thread *td) 187135e6168fSJeff Roberson { 187214618990SJeff Roberson struct td_sched *ts; 1873ae7a6b38SJeff Roberson int slptick; 1874e7d50326SJeff Roberson 18757b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 187614618990SJeff Roberson ts = td->td_sched; 187735e6168fSJeff Roberson /* 1878e7d50326SJeff Roberson * If we slept for more than a tick update our interactivity and 1879e7d50326SJeff Roberson * priority. 188035e6168fSJeff Roberson */ 188154b0e65fSJeff Roberson slptick = td->td_slptick; 188254b0e65fSJeff Roberson td->td_slptick = 0; 1883ae7a6b38SJeff Roberson if (slptick && slptick != ticks) { 18849a93305aSJeff Roberson u_int hzticks; 1885f1e8dc4aSJeff Roberson 1886ae7a6b38SJeff Roberson hzticks = (ticks - slptick) << SCHED_TICK_SHIFT; 1887ae7a6b38SJeff Roberson ts->ts_slptime += hzticks; 18888460a577SJohn Birrell sched_interact_update(td); 188914618990SJeff Roberson sched_pctcpu_update(ts); 18908460a577SJohn Birrell sched_priority(td); 1891f1e8dc4aSJeff Roberson } 189214618990SJeff Roberson /* Reset the slice value after we sleep. */ 189314618990SJeff Roberson ts->ts_slice = sched_slice; 18947a5e5e2aSJeff Roberson sched_add(td, SRQ_BORING); 189535e6168fSJeff Roberson } 189635e6168fSJeff Roberson 189735e6168fSJeff Roberson /* 189835e6168fSJeff Roberson * Penalize the parent for creating a new child and initialize the child's 189935e6168fSJeff Roberson * priority. 190035e6168fSJeff Roberson */ 190135e6168fSJeff Roberson void 19028460a577SJohn Birrell sched_fork(struct thread *td, struct thread *child) 190315dc847eSJeff Roberson { 19047b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1905ad1e7d28SJulian Elischer sched_fork_thread(td, child); 1906e7d50326SJeff Roberson /* 1907e7d50326SJeff Roberson * Penalize the parent and child for forking. 1908e7d50326SJeff Roberson */ 1909e7d50326SJeff Roberson sched_interact_fork(child); 1910e7d50326SJeff Roberson sched_priority(child); 1911ae7a6b38SJeff Roberson td->td_sched->ts_runtime += tickincr; 1912e7d50326SJeff Roberson sched_interact_update(td); 1913e7d50326SJeff Roberson sched_priority(td); 1914ad1e7d28SJulian Elischer } 1915ad1e7d28SJulian Elischer 1916ae7a6b38SJeff Roberson /* 1917ae7a6b38SJeff Roberson * Fork a new thread, may be within the same process. 1918ae7a6b38SJeff Roberson */ 1919ad1e7d28SJulian Elischer void 1920ad1e7d28SJulian Elischer sched_fork_thread(struct thread *td, struct thread *child) 1921ad1e7d28SJulian Elischer { 1922ad1e7d28SJulian Elischer struct td_sched *ts; 1923ad1e7d28SJulian Elischer struct td_sched *ts2; 19248460a577SJohn Birrell 1925e7d50326SJeff Roberson /* 1926e7d50326SJeff Roberson * Initialize child. 1927e7d50326SJeff Roberson */ 19287b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1929ed062c8dSJulian Elischer sched_newthread(child); 1930ae7a6b38SJeff Roberson child->td_lock = TDQ_LOCKPTR(TDQ_SELF()); 193162fa74d9SJeff Roberson child->td_cpuset = cpuset_ref(td->td_cpuset); 1932ad1e7d28SJulian Elischer ts = td->td_sched; 1933ad1e7d28SJulian Elischer ts2 = child->td_sched; 1934ad1e7d28SJulian Elischer ts2->ts_cpu = ts->ts_cpu; 1935ad1e7d28SJulian Elischer ts2->ts_runq = NULL; 1936e7d50326SJeff Roberson /* 1937e7d50326SJeff Roberson * Grab our parents cpu estimation information and priority. 1938e7d50326SJeff Roberson */ 1939ad1e7d28SJulian Elischer ts2->ts_ticks = ts->ts_ticks; 1940ad1e7d28SJulian Elischer ts2->ts_ltick = ts->ts_ltick; 1941ad1e7d28SJulian Elischer ts2->ts_ftick = ts->ts_ftick; 1942e7d50326SJeff Roberson child->td_user_pri = td->td_user_pri; 1943e7d50326SJeff Roberson child->td_base_user_pri = td->td_base_user_pri; 1944e7d50326SJeff Roberson /* 1945e7d50326SJeff Roberson * And update interactivity score. 1946e7d50326SJeff Roberson */ 1947ae7a6b38SJeff Roberson ts2->ts_slptime = ts->ts_slptime; 1948ae7a6b38SJeff Roberson ts2->ts_runtime = ts->ts_runtime; 1949e7d50326SJeff Roberson ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */ 195015dc847eSJeff Roberson } 195115dc847eSJeff Roberson 1952ae7a6b38SJeff Roberson /* 1953ae7a6b38SJeff Roberson * Adjust the priority class of a thread. 1954ae7a6b38SJeff Roberson */ 195515dc847eSJeff Roberson void 19568460a577SJohn Birrell sched_class(struct thread *td, int class) 195715dc847eSJeff Roberson { 195815dc847eSJeff Roberson 19597b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 19608460a577SJohn Birrell if (td->td_pri_class == class) 196115dc847eSJeff Roberson return; 1962155b9987SJeff Roberson /* 1963155b9987SJeff Roberson * On SMP if we're on the RUNQ we must adjust the transferable 1964155b9987SJeff Roberson * count because could be changing to or from an interrupt 1965155b9987SJeff Roberson * class. 1966155b9987SJeff Roberson */ 19677a5e5e2aSJeff Roberson if (TD_ON_RUNQ(td)) { 19681e516cf5SJeff Roberson struct tdq *tdq; 19691e516cf5SJeff Roberson 19701e516cf5SJeff Roberson tdq = TDQ_CPU(td->td_sched->ts_cpu); 197162fa74d9SJeff Roberson if (THREAD_CAN_MIGRATE(td)) 1972d2ad694cSJeff Roberson tdq->tdq_transferable--; 19731e516cf5SJeff Roberson td->td_pri_class = class; 197462fa74d9SJeff Roberson if (THREAD_CAN_MIGRATE(td)) 1975d2ad694cSJeff Roberson tdq->tdq_transferable++; 197680f86c9fSJeff Roberson } 19778460a577SJohn Birrell td->td_pri_class = class; 197835e6168fSJeff Roberson } 197935e6168fSJeff Roberson 198035e6168fSJeff Roberson /* 198135e6168fSJeff Roberson * Return some of the child's priority and interactivity to the parent. 198235e6168fSJeff Roberson */ 198335e6168fSJeff Roberson void 1984fc6c30f6SJulian Elischer sched_exit(struct proc *p, struct thread *child) 198535e6168fSJeff Roberson { 1986e7d50326SJeff Roberson struct thread *td; 1987141ad61cSJeff Roberson 19888460a577SJohn Birrell CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", 1989431f8906SJulian Elischer child, child->td_name, child->td_priority); 19908460a577SJohn Birrell 19917b20fb19SJeff Roberson PROC_SLOCK_ASSERT(p, MA_OWNED); 1992e7d50326SJeff Roberson td = FIRST_THREAD_IN_PROC(p); 1993e7d50326SJeff Roberson sched_exit_thread(td, child); 1994ad1e7d28SJulian Elischer } 1995ad1e7d28SJulian Elischer 1996ae7a6b38SJeff Roberson /* 1997ae7a6b38SJeff Roberson * Penalize another thread for the time spent on this one. This helps to 1998ae7a6b38SJeff Roberson * worsen the priority and interactivity of processes which schedule batch 1999ae7a6b38SJeff Roberson * jobs such as make. This has little effect on the make process itself but 2000ae7a6b38SJeff Roberson * causes new processes spawned by it to receive worse scores immediately. 2001ae7a6b38SJeff Roberson */ 2002ad1e7d28SJulian Elischer void 2003fc6c30f6SJulian Elischer sched_exit_thread(struct thread *td, struct thread *child) 2004ad1e7d28SJulian Elischer { 2005fc6c30f6SJulian Elischer 2006e7d50326SJeff Roberson CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", 2007431f8906SJulian Elischer child, child->td_name, child->td_priority); 2008e7d50326SJeff Roberson 2009e7d50326SJeff Roberson #ifdef KSE 2010e7d50326SJeff Roberson /* 2011e7d50326SJeff Roberson * KSE forks and exits so often that this penalty causes short-lived 2012e7d50326SJeff Roberson * threads to always be non-interactive. This causes mozilla to 2013e7d50326SJeff Roberson * crawl under load. 2014e7d50326SJeff Roberson */ 2015e7d50326SJeff Roberson if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc) 2016e7d50326SJeff Roberson return; 2017e7d50326SJeff Roberson #endif 2018e7d50326SJeff Roberson /* 2019e7d50326SJeff Roberson * Give the child's runtime to the parent without returning the 2020e7d50326SJeff Roberson * sleep time as a penalty to the parent. This causes shells that 2021e7d50326SJeff Roberson * launch expensive things to mark their children as expensive. 2022e7d50326SJeff Roberson */ 20237b20fb19SJeff Roberson thread_lock(td); 2024ae7a6b38SJeff Roberson td->td_sched->ts_runtime += child->td_sched->ts_runtime; 2025fc6c30f6SJulian Elischer sched_interact_update(td); 2026e7d50326SJeff Roberson sched_priority(td); 20277b20fb19SJeff Roberson thread_unlock(td); 2028ad1e7d28SJulian Elischer } 2029ad1e7d28SJulian Elischer 2030ae7a6b38SJeff Roberson /* 2031ae7a6b38SJeff Roberson * Fix priorities on return to user-space. Priorities may be elevated due 2032ae7a6b38SJeff Roberson * to static priorities in msleep() or similar. 2033ae7a6b38SJeff Roberson */ 2034ad1e7d28SJulian Elischer void 2035ad1e7d28SJulian Elischer sched_userret(struct thread *td) 2036ad1e7d28SJulian Elischer { 2037ad1e7d28SJulian Elischer /* 2038ad1e7d28SJulian Elischer * XXX we cheat slightly on the locking here to avoid locking in 2039ad1e7d28SJulian Elischer * the usual case. Setting td_priority here is essentially an 2040ad1e7d28SJulian Elischer * incomplete workaround for not setting it properly elsewhere. 2041ad1e7d28SJulian Elischer * Now that some interrupt handlers are threads, not setting it 2042ad1e7d28SJulian Elischer * properly elsewhere can clobber it in the window between setting 2043ad1e7d28SJulian Elischer * it here and returning to user mode, so don't waste time setting 2044ad1e7d28SJulian Elischer * it perfectly here. 2045ad1e7d28SJulian Elischer */ 2046ad1e7d28SJulian Elischer KASSERT((td->td_flags & TDF_BORROWING) == 0, 2047ad1e7d28SJulian Elischer ("thread with borrowed priority returning to userland")); 2048ad1e7d28SJulian Elischer if (td->td_priority != td->td_user_pri) { 20497b20fb19SJeff Roberson thread_lock(td); 2050ad1e7d28SJulian Elischer td->td_priority = td->td_user_pri; 2051ad1e7d28SJulian Elischer td->td_base_pri = td->td_user_pri; 205262fa74d9SJeff Roberson if (lowpri_userret) 205362fa74d9SJeff Roberson tdq_setlowpri(TDQ_SELF(), td); 20547b20fb19SJeff Roberson thread_unlock(td); 2055ad1e7d28SJulian Elischer } 205635e6168fSJeff Roberson } 205735e6168fSJeff Roberson 2058ae7a6b38SJeff Roberson /* 2059ae7a6b38SJeff Roberson * Handle a stathz tick. This is really only relevant for timeshare 2060ae7a6b38SJeff Roberson * threads. 2061ae7a6b38SJeff Roberson */ 206235e6168fSJeff Roberson void 20637cf90fb3SJeff Roberson sched_clock(struct thread *td) 206435e6168fSJeff Roberson { 2065ad1e7d28SJulian Elischer struct tdq *tdq; 2066ad1e7d28SJulian Elischer struct td_sched *ts; 206735e6168fSJeff Roberson 2068ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 20693f872f85SJeff Roberson tdq = TDQ_SELF(); 20707fcf154aSJeff Roberson #ifdef SMP 20717fcf154aSJeff Roberson /* 20727fcf154aSJeff Roberson * We run the long term load balancer infrequently on the first cpu. 20737fcf154aSJeff Roberson */ 20747fcf154aSJeff Roberson if (balance_tdq == tdq) { 20757fcf154aSJeff Roberson if (balance_ticks && --balance_ticks == 0) 20767fcf154aSJeff Roberson sched_balance(); 20777fcf154aSJeff Roberson } 20787fcf154aSJeff Roberson #endif 20793f872f85SJeff Roberson /* 20803f872f85SJeff Roberson * Advance the insert index once for each tick to ensure that all 20813f872f85SJeff Roberson * threads get a chance to run. 20823f872f85SJeff Roberson */ 20833f872f85SJeff Roberson if (tdq->tdq_idx == tdq->tdq_ridx) { 20843f872f85SJeff Roberson tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS; 20853f872f85SJeff Roberson if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx])) 20863f872f85SJeff Roberson tdq->tdq_ridx = tdq->tdq_idx; 20873f872f85SJeff Roberson } 20883f872f85SJeff Roberson ts = td->td_sched; 2089fd0b8c78SJeff Roberson if (td->td_pri_class & PRI_FIFO_BIT) 2090a8949de2SJeff Roberson return; 2091fd0b8c78SJeff Roberson if (td->td_pri_class == PRI_TIMESHARE) { 2092a8949de2SJeff Roberson /* 2093fd0b8c78SJeff Roberson * We used a tick; charge it to the thread so 2094fd0b8c78SJeff Roberson * that we can compute our interactivity. 209515dc847eSJeff Roberson */ 2096ae7a6b38SJeff Roberson td->td_sched->ts_runtime += tickincr; 20978460a577SJohn Birrell sched_interact_update(td); 2098fd0b8c78SJeff Roberson } 209935e6168fSJeff Roberson /* 210035e6168fSJeff Roberson * We used up one time slice. 210135e6168fSJeff Roberson */ 2102ad1e7d28SJulian Elischer if (--ts->ts_slice > 0) 210315dc847eSJeff Roberson return; 210435e6168fSJeff Roberson /* 210515dc847eSJeff Roberson * We're out of time, recompute priorities and requeue. 210635e6168fSJeff Roberson */ 21078460a577SJohn Birrell sched_priority(td); 21084a338afdSJulian Elischer td->td_flags |= TDF_NEEDRESCHED; 210935e6168fSJeff Roberson } 211035e6168fSJeff Roberson 2111ae7a6b38SJeff Roberson /* 2112ae7a6b38SJeff Roberson * Called once per hz tick. Used for cpu utilization information. This 2113ae7a6b38SJeff Roberson * is easier than trying to scale based on stathz. 2114ae7a6b38SJeff Roberson */ 2115ae7a6b38SJeff Roberson void 2116ae7a6b38SJeff Roberson sched_tick(void) 2117ae7a6b38SJeff Roberson { 2118ae7a6b38SJeff Roberson struct td_sched *ts; 2119ae7a6b38SJeff Roberson 2120ae7a6b38SJeff Roberson ts = curthread->td_sched; 2121ae7a6b38SJeff Roberson /* Adjust ticks for pctcpu */ 2122ae7a6b38SJeff Roberson ts->ts_ticks += 1 << SCHED_TICK_SHIFT; 2123ae7a6b38SJeff Roberson ts->ts_ltick = ticks; 2124ae7a6b38SJeff Roberson /* 2125ae7a6b38SJeff Roberson * Update if we've exceeded our desired tick threshhold by over one 2126ae7a6b38SJeff Roberson * second. 2127ae7a6b38SJeff Roberson */ 2128ae7a6b38SJeff Roberson if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick) 2129ae7a6b38SJeff Roberson sched_pctcpu_update(ts); 2130ae7a6b38SJeff Roberson } 2131ae7a6b38SJeff Roberson 2132ae7a6b38SJeff Roberson /* 2133ae7a6b38SJeff Roberson * Return whether the current CPU has runnable tasks. Used for in-kernel 2134ae7a6b38SJeff Roberson * cooperative idle threads. 2135ae7a6b38SJeff Roberson */ 213635e6168fSJeff Roberson int 213735e6168fSJeff Roberson sched_runnable(void) 213835e6168fSJeff Roberson { 2139ad1e7d28SJulian Elischer struct tdq *tdq; 2140b90816f1SJeff Roberson int load; 214135e6168fSJeff Roberson 2142b90816f1SJeff Roberson load = 1; 2143b90816f1SJeff Roberson 2144ad1e7d28SJulian Elischer tdq = TDQ_SELF(); 21453f741ca1SJeff Roberson if ((curthread->td_flags & TDF_IDLETD) != 0) { 2146d2ad694cSJeff Roberson if (tdq->tdq_load > 0) 21473f741ca1SJeff Roberson goto out; 21483f741ca1SJeff Roberson } else 2149d2ad694cSJeff Roberson if (tdq->tdq_load - 1 > 0) 2150b90816f1SJeff Roberson goto out; 2151b90816f1SJeff Roberson load = 0; 2152b90816f1SJeff Roberson out: 2153b90816f1SJeff Roberson return (load); 215435e6168fSJeff Roberson } 215535e6168fSJeff Roberson 2156ae7a6b38SJeff Roberson /* 2157ae7a6b38SJeff Roberson * Choose the highest priority thread to run. The thread is removed from 2158ae7a6b38SJeff Roberson * the run-queue while running however the load remains. For SMP we set 2159ae7a6b38SJeff Roberson * the tdq in the global idle bitmask if it idles here. 2160ae7a6b38SJeff Roberson */ 21617a5e5e2aSJeff Roberson struct thread * 2162c9f25d8fSJeff Roberson sched_choose(void) 2163c9f25d8fSJeff Roberson { 2164ae7a6b38SJeff Roberson struct td_sched *ts; 2165ae7a6b38SJeff Roberson struct tdq *tdq; 2166ae7a6b38SJeff Roberson 2167ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 2168ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2169ad1e7d28SJulian Elischer ts = tdq_choose(tdq); 2170ad1e7d28SJulian Elischer if (ts) { 2171ad1e7d28SJulian Elischer tdq_runq_rem(tdq, ts); 21727a5e5e2aSJeff Roberson return (ts->ts_thread); 217335e6168fSJeff Roberson } 217462fa74d9SJeff Roberson return (PCPU_GET(idlethread)); 21757a5e5e2aSJeff Roberson } 21767a5e5e2aSJeff Roberson 2177ae7a6b38SJeff Roberson /* 2178ae7a6b38SJeff Roberson * Set owepreempt if necessary. Preemption never happens directly in ULE, 2179ae7a6b38SJeff Roberson * we always request it once we exit a critical section. 2180ae7a6b38SJeff Roberson */ 2181ae7a6b38SJeff Roberson static inline void 2182ae7a6b38SJeff Roberson sched_setpreempt(struct thread *td) 21837a5e5e2aSJeff Roberson { 21847a5e5e2aSJeff Roberson struct thread *ctd; 21857a5e5e2aSJeff Roberson int cpri; 21867a5e5e2aSJeff Roberson int pri; 21877a5e5e2aSJeff Roberson 21887a5e5e2aSJeff Roberson ctd = curthread; 21897a5e5e2aSJeff Roberson pri = td->td_priority; 21907a5e5e2aSJeff Roberson cpri = ctd->td_priority; 219162fa74d9SJeff Roberson if (td->td_priority < cpri) 2192ae7a6b38SJeff Roberson curthread->td_flags |= TDF_NEEDRESCHED; 21937a5e5e2aSJeff Roberson if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd)) 2194ae7a6b38SJeff Roberson return; 21957a5e5e2aSJeff Roberson /* 21967a5e5e2aSJeff Roberson * Always preempt IDLE threads. Otherwise only if the preempting 21977a5e5e2aSJeff Roberson * thread is an ithread. 21987a5e5e2aSJeff Roberson */ 2199ae7a6b38SJeff Roberson if (pri > preempt_thresh && cpri < PRI_MIN_IDLE) 2200ae7a6b38SJeff Roberson return; 22017a5e5e2aSJeff Roberson ctd->td_owepreempt = 1; 2202ae7a6b38SJeff Roberson return; 220335e6168fSJeff Roberson } 220435e6168fSJeff Roberson 2205ae7a6b38SJeff Roberson /* 2206ae7a6b38SJeff Roberson * Add a thread to a thread queue. Initializes priority, slice, runq, and 2207ae7a6b38SJeff Roberson * add it to the appropriate queue. This is the internal function called 2208ae7a6b38SJeff Roberson * when the tdq is predetermined. 2209ae7a6b38SJeff Roberson */ 221035e6168fSJeff Roberson void 2211ae7a6b38SJeff Roberson tdq_add(struct tdq *tdq, struct thread *td, int flags) 221235e6168fSJeff Roberson { 2213ad1e7d28SJulian Elischer struct td_sched *ts; 221422bf7d9aSJeff Roberson int class; 2215c9f25d8fSJeff Roberson 2216ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 22177a5e5e2aSJeff Roberson KASSERT((td->td_inhibitors == 0), 22187a5e5e2aSJeff Roberson ("sched_add: trying to run inhibited thread")); 22197a5e5e2aSJeff Roberson KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 22207a5e5e2aSJeff Roberson ("sched_add: bad thread state")); 2221b61ce5b0SJeff Roberson KASSERT(td->td_flags & TDF_INMEM, 2222b61ce5b0SJeff Roberson ("sched_add: thread swapped out")); 2223ae7a6b38SJeff Roberson 2224ae7a6b38SJeff Roberson ts = td->td_sched; 22257a5e5e2aSJeff Roberson class = PRI_BASE(td->td_pri_class); 2226ae7a6b38SJeff Roberson TD_SET_RUNQ(td); 22277a5e5e2aSJeff Roberson if (ts->ts_slice == 0) 22287a5e5e2aSJeff Roberson ts->ts_slice = sched_slice; 22292454aaf5SJeff Roberson /* 2230ae7a6b38SJeff Roberson * Pick the run queue based on priority. 22312454aaf5SJeff Roberson */ 2232ae7a6b38SJeff Roberson if (td->td_priority <= PRI_MAX_REALTIME) 2233ae7a6b38SJeff Roberson ts->ts_runq = &tdq->tdq_realtime; 2234ae7a6b38SJeff Roberson else if (td->td_priority <= PRI_MAX_TIMESHARE) 2235ae7a6b38SJeff Roberson ts->ts_runq = &tdq->tdq_timeshare; 22367b8bfa0dSJeff Roberson else 2237ae7a6b38SJeff Roberson ts->ts_runq = &tdq->tdq_idle; 2238ae7a6b38SJeff Roberson if (td->td_priority < tdq->tdq_lowpri) 2239ae7a6b38SJeff Roberson tdq->tdq_lowpri = td->td_priority; 2240ad1e7d28SJulian Elischer tdq_runq_add(tdq, ts, flags); 2241ad1e7d28SJulian Elischer tdq_load_add(tdq, ts); 2242ae7a6b38SJeff Roberson } 2243ae7a6b38SJeff Roberson 2244ae7a6b38SJeff Roberson /* 2245ae7a6b38SJeff Roberson * Select the target thread queue and add a thread to it. Request 2246ae7a6b38SJeff Roberson * preemption or IPI a remote processor if required. 2247ae7a6b38SJeff Roberson */ 2248ae7a6b38SJeff Roberson void 2249ae7a6b38SJeff Roberson sched_add(struct thread *td, int flags) 2250ae7a6b38SJeff Roberson { 2251ae7a6b38SJeff Roberson struct td_sched *ts; 2252ae7a6b38SJeff Roberson struct tdq *tdq; 22537b8bfa0dSJeff Roberson #ifdef SMP 2254ae7a6b38SJeff Roberson int cpuid; 2255ae7a6b38SJeff Roberson int cpu; 2256ae7a6b38SJeff Roberson #endif 2257ae7a6b38SJeff Roberson CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 2258431f8906SJulian Elischer td, td->td_name, td->td_priority, curthread, 2259431f8906SJulian Elischer curthread->td_name); 2260ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2261ae7a6b38SJeff Roberson ts = td->td_sched; 2262ae7a6b38SJeff Roberson /* 2263ae7a6b38SJeff Roberson * Recalculate the priority before we select the target cpu or 2264ae7a6b38SJeff Roberson * run-queue. 2265ae7a6b38SJeff Roberson */ 2266ae7a6b38SJeff Roberson if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 2267ae7a6b38SJeff Roberson sched_priority(td); 2268ae7a6b38SJeff Roberson #ifdef SMP 2269ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 2270ae7a6b38SJeff Roberson /* 2271ae7a6b38SJeff Roberson * Pick the destination cpu and if it isn't ours transfer to the 2272ae7a6b38SJeff Roberson * target cpu. 2273ae7a6b38SJeff Roberson */ 2274ae7a6b38SJeff Roberson cpu = sched_pickcpu(ts, flags); 2275ae7a6b38SJeff Roberson tdq = sched_setcpu(ts, cpu, flags); 2276ae7a6b38SJeff Roberson tdq_add(tdq, td, flags); 2277ae7a6b38SJeff Roberson if (cpu != cpuid) { 22787b8bfa0dSJeff Roberson tdq_notify(ts); 22797b8bfa0dSJeff Roberson return; 22807b8bfa0dSJeff Roberson } 2281ae7a6b38SJeff Roberson #else 2282ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 2283ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 2284ae7a6b38SJeff Roberson /* 2285ae7a6b38SJeff Roberson * Now that the thread is moving to the run-queue, set the lock 2286ae7a6b38SJeff Roberson * to the scheduler's lock. 2287ae7a6b38SJeff Roberson */ 2288ae7a6b38SJeff Roberson thread_lock_set(td, TDQ_LOCKPTR(tdq)); 2289ae7a6b38SJeff Roberson tdq_add(tdq, td, flags); 22907b8bfa0dSJeff Roberson #endif 2291ae7a6b38SJeff Roberson if (!(flags & SRQ_YIELDING)) 2292ae7a6b38SJeff Roberson sched_setpreempt(td); 229335e6168fSJeff Roberson } 229435e6168fSJeff Roberson 2295ae7a6b38SJeff Roberson /* 2296ae7a6b38SJeff Roberson * Remove a thread from a run-queue without running it. This is used 2297ae7a6b38SJeff Roberson * when we're stealing a thread from a remote queue. Otherwise all threads 2298ae7a6b38SJeff Roberson * exit by calling sched_exit_thread() and sched_throw() themselves. 2299ae7a6b38SJeff Roberson */ 230035e6168fSJeff Roberson void 23017cf90fb3SJeff Roberson sched_rem(struct thread *td) 230235e6168fSJeff Roberson { 2303ad1e7d28SJulian Elischer struct tdq *tdq; 2304ad1e7d28SJulian Elischer struct td_sched *ts; 23057cf90fb3SJeff Roberson 230681d47d3fSJeff Roberson CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 2307431f8906SJulian Elischer td, td->td_name, td->td_priority, curthread, 2308431f8906SJulian Elischer curthread->td_name); 2309ad1e7d28SJulian Elischer ts = td->td_sched; 2310ae7a6b38SJeff Roberson tdq = TDQ_CPU(ts->ts_cpu); 2311ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2312ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 23137a5e5e2aSJeff Roberson KASSERT(TD_ON_RUNQ(td), 2314ad1e7d28SJulian Elischer ("sched_rem: thread not on run queue")); 2315ad1e7d28SJulian Elischer tdq_runq_rem(tdq, ts); 2316ad1e7d28SJulian Elischer tdq_load_rem(tdq, ts); 23177a5e5e2aSJeff Roberson TD_SET_CAN_RUN(td); 231862fa74d9SJeff Roberson if (td->td_priority == tdq->tdq_lowpri) 231962fa74d9SJeff Roberson tdq_setlowpri(tdq, NULL); 232035e6168fSJeff Roberson } 232135e6168fSJeff Roberson 2322ae7a6b38SJeff Roberson /* 2323ae7a6b38SJeff Roberson * Fetch cpu utilization information. Updates on demand. 2324ae7a6b38SJeff Roberson */ 232535e6168fSJeff Roberson fixpt_t 23267cf90fb3SJeff Roberson sched_pctcpu(struct thread *td) 232735e6168fSJeff Roberson { 232835e6168fSJeff Roberson fixpt_t pctcpu; 2329ad1e7d28SJulian Elischer struct td_sched *ts; 233035e6168fSJeff Roberson 233135e6168fSJeff Roberson pctcpu = 0; 2332ad1e7d28SJulian Elischer ts = td->td_sched; 2333ad1e7d28SJulian Elischer if (ts == NULL) 2334484288deSJeff Roberson return (0); 233535e6168fSJeff Roberson 23367b20fb19SJeff Roberson thread_lock(td); 2337ad1e7d28SJulian Elischer if (ts->ts_ticks) { 233835e6168fSJeff Roberson int rtick; 233935e6168fSJeff Roberson 2340ad1e7d28SJulian Elischer sched_pctcpu_update(ts); 234135e6168fSJeff Roberson /* How many rtick per second ? */ 2342e7d50326SJeff Roberson rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz); 2343e7d50326SJeff Roberson pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT; 234435e6168fSJeff Roberson } 23457b20fb19SJeff Roberson thread_unlock(td); 234635e6168fSJeff Roberson 234735e6168fSJeff Roberson return (pctcpu); 234835e6168fSJeff Roberson } 234935e6168fSJeff Roberson 235062fa74d9SJeff Roberson /* 235162fa74d9SJeff Roberson * Enforce affinity settings for a thread. Called after adjustments to 235262fa74d9SJeff Roberson * cpumask. 235362fa74d9SJeff Roberson */ 2354885d51a3SJeff Roberson void 2355885d51a3SJeff Roberson sched_affinity(struct thread *td) 2356885d51a3SJeff Roberson { 235762fa74d9SJeff Roberson #ifdef SMP 235862fa74d9SJeff Roberson struct td_sched *ts; 235962fa74d9SJeff Roberson int cpu; 236062fa74d9SJeff Roberson 236162fa74d9SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 236262fa74d9SJeff Roberson ts = td->td_sched; 236362fa74d9SJeff Roberson if (THREAD_CAN_SCHED(td, ts->ts_cpu)) 236462fa74d9SJeff Roberson return; 236562fa74d9SJeff Roberson if (!TD_IS_RUNNING(td)) 236662fa74d9SJeff Roberson return; 236762fa74d9SJeff Roberson td->td_flags |= TDF_NEEDRESCHED; 236862fa74d9SJeff Roberson if (!THREAD_CAN_MIGRATE(td)) 236962fa74d9SJeff Roberson return; 237062fa74d9SJeff Roberson /* 237162fa74d9SJeff Roberson * Assign the new cpu and force a switch before returning to 237262fa74d9SJeff Roberson * userspace. If the target thread is not running locally send 237362fa74d9SJeff Roberson * an ipi to force the issue. 237462fa74d9SJeff Roberson */ 237562fa74d9SJeff Roberson cpu = ts->ts_cpu; 237662fa74d9SJeff Roberson ts->ts_cpu = sched_pickcpu(ts, 0); 237762fa74d9SJeff Roberson if (cpu != PCPU_GET(cpuid)) 237862fa74d9SJeff Roberson ipi_selected(1 << cpu, IPI_PREEMPT); 237962fa74d9SJeff Roberson #endif 2380885d51a3SJeff Roberson } 2381885d51a3SJeff Roberson 2382ae7a6b38SJeff Roberson /* 2383ae7a6b38SJeff Roberson * Bind a thread to a target cpu. 2384ae7a6b38SJeff Roberson */ 23859bacd788SJeff Roberson void 23869bacd788SJeff Roberson sched_bind(struct thread *td, int cpu) 23879bacd788SJeff Roberson { 2388ad1e7d28SJulian Elischer struct td_sched *ts; 23899bacd788SJeff Roberson 2390c47f202bSJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); 2391ad1e7d28SJulian Elischer ts = td->td_sched; 23926b2f763fSJeff Roberson if (ts->ts_flags & TSF_BOUND) 2393c95d2db2SJeff Roberson sched_unbind(td); 2394ad1e7d28SJulian Elischer ts->ts_flags |= TSF_BOUND; 23956b2f763fSJeff Roberson sched_pin(); 239680f86c9fSJeff Roberson if (PCPU_GET(cpuid) == cpu) 23979bacd788SJeff Roberson return; 23986b2f763fSJeff Roberson ts->ts_cpu = cpu; 23999bacd788SJeff Roberson /* When we return from mi_switch we'll be on the correct cpu. */ 2400279f949eSPoul-Henning Kamp mi_switch(SW_VOL, NULL); 24019bacd788SJeff Roberson } 24029bacd788SJeff Roberson 2403ae7a6b38SJeff Roberson /* 2404ae7a6b38SJeff Roberson * Release a bound thread. 2405ae7a6b38SJeff Roberson */ 24069bacd788SJeff Roberson void 24079bacd788SJeff Roberson sched_unbind(struct thread *td) 24089bacd788SJeff Roberson { 2409e7d50326SJeff Roberson struct td_sched *ts; 2410e7d50326SJeff Roberson 24117b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2412e7d50326SJeff Roberson ts = td->td_sched; 24136b2f763fSJeff Roberson if ((ts->ts_flags & TSF_BOUND) == 0) 24146b2f763fSJeff Roberson return; 2415e7d50326SJeff Roberson ts->ts_flags &= ~TSF_BOUND; 2416e7d50326SJeff Roberson sched_unpin(); 24179bacd788SJeff Roberson } 24189bacd788SJeff Roberson 241935e6168fSJeff Roberson int 2420ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td) 2421ebccf1e3SJoseph Koshy { 24227b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2423ad1e7d28SJulian Elischer return (td->td_sched->ts_flags & TSF_BOUND); 2424ebccf1e3SJoseph Koshy } 2425ebccf1e3SJoseph Koshy 2426ae7a6b38SJeff Roberson /* 2427ae7a6b38SJeff Roberson * Basic yield call. 2428ae7a6b38SJeff Roberson */ 242936ec198bSDavid Xu void 243036ec198bSDavid Xu sched_relinquish(struct thread *td) 243136ec198bSDavid Xu { 24327b20fb19SJeff Roberson thread_lock(td); 24337b20fb19SJeff Roberson SCHED_STAT_INC(switch_relinquish); 243436ec198bSDavid Xu mi_switch(SW_VOL, NULL); 24357b20fb19SJeff Roberson thread_unlock(td); 243636ec198bSDavid Xu } 243736ec198bSDavid Xu 2438ae7a6b38SJeff Roberson /* 2439ae7a6b38SJeff Roberson * Return the total system load. 2440ae7a6b38SJeff Roberson */ 2441ebccf1e3SJoseph Koshy int 244233916c36SJeff Roberson sched_load(void) 244333916c36SJeff Roberson { 244433916c36SJeff Roberson #ifdef SMP 244533916c36SJeff Roberson int total; 244633916c36SJeff Roberson int i; 244733916c36SJeff Roberson 244833916c36SJeff Roberson total = 0; 244962fa74d9SJeff Roberson for (i = 0; i <= mp_maxid; i++) 245062fa74d9SJeff Roberson total += TDQ_CPU(i)->tdq_sysload; 245133916c36SJeff Roberson return (total); 245233916c36SJeff Roberson #else 2453d2ad694cSJeff Roberson return (TDQ_SELF()->tdq_sysload); 245433916c36SJeff Roberson #endif 245533916c36SJeff Roberson } 245633916c36SJeff Roberson 245733916c36SJeff Roberson int 245835e6168fSJeff Roberson sched_sizeof_proc(void) 245935e6168fSJeff Roberson { 246035e6168fSJeff Roberson return (sizeof(struct proc)); 246135e6168fSJeff Roberson } 246235e6168fSJeff Roberson 246335e6168fSJeff Roberson int 246435e6168fSJeff Roberson sched_sizeof_thread(void) 246535e6168fSJeff Roberson { 246635e6168fSJeff Roberson return (sizeof(struct thread) + sizeof(struct td_sched)); 246735e6168fSJeff Roberson } 2468b41f1452SDavid Xu 24697a5e5e2aSJeff Roberson /* 24707a5e5e2aSJeff Roberson * The actual idle process. 24717a5e5e2aSJeff Roberson */ 24727a5e5e2aSJeff Roberson void 24737a5e5e2aSJeff Roberson sched_idletd(void *dummy) 24747a5e5e2aSJeff Roberson { 24757a5e5e2aSJeff Roberson struct thread *td; 2476ae7a6b38SJeff Roberson struct tdq *tdq; 24777a5e5e2aSJeff Roberson 24787a5e5e2aSJeff Roberson td = curthread; 2479ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 24807a5e5e2aSJeff Roberson mtx_assert(&Giant, MA_NOTOWNED); 2481ae7a6b38SJeff Roberson /* ULE relies on preemption for idle interruption. */ 2482ae7a6b38SJeff Roberson for (;;) { 2483ae7a6b38SJeff Roberson #ifdef SMP 2484ae7a6b38SJeff Roberson if (tdq_idled(tdq)) 24857a5e5e2aSJeff Roberson cpu_idle(); 2486ae7a6b38SJeff Roberson #else 2487ae7a6b38SJeff Roberson cpu_idle(); 2488ae7a6b38SJeff Roberson #endif 2489ae7a6b38SJeff Roberson } 2490b41f1452SDavid Xu } 2491e7d50326SJeff Roberson 24927b20fb19SJeff Roberson /* 24937b20fb19SJeff Roberson * A CPU is entering for the first time or a thread is exiting. 24947b20fb19SJeff Roberson */ 24957b20fb19SJeff Roberson void 24967b20fb19SJeff Roberson sched_throw(struct thread *td) 24977b20fb19SJeff Roberson { 249859c68134SJeff Roberson struct thread *newtd; 2499ae7a6b38SJeff Roberson struct tdq *tdq; 2500ae7a6b38SJeff Roberson 2501ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 25027b20fb19SJeff Roberson if (td == NULL) { 2503ae7a6b38SJeff Roberson /* Correct spinlock nesting and acquire the correct lock. */ 2504ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 25057b20fb19SJeff Roberson spinlock_exit(); 25067b20fb19SJeff Roberson } else { 2507ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2508ae7a6b38SJeff Roberson tdq_load_rem(tdq, td->td_sched); 2509eea4f254SJeff Roberson lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object); 25107b20fb19SJeff Roberson } 25117b20fb19SJeff Roberson KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 251259c68134SJeff Roberson newtd = choosethread(); 251359c68134SJeff Roberson TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 25147b20fb19SJeff Roberson PCPU_SET(switchtime, cpu_ticks()); 25157b20fb19SJeff Roberson PCPU_SET(switchticks, ticks); 251659c68134SJeff Roberson cpu_throw(td, newtd); /* doesn't return */ 25177b20fb19SJeff Roberson } 25187b20fb19SJeff Roberson 2519ae7a6b38SJeff Roberson /* 2520ae7a6b38SJeff Roberson * This is called from fork_exit(). Just acquire the correct locks and 2521ae7a6b38SJeff Roberson * let fork do the rest of the work. 2522ae7a6b38SJeff Roberson */ 25237b20fb19SJeff Roberson void 2524fe54587fSJeff Roberson sched_fork_exit(struct thread *td) 25257b20fb19SJeff Roberson { 2526ae7a6b38SJeff Roberson struct td_sched *ts; 2527ae7a6b38SJeff Roberson struct tdq *tdq; 2528ae7a6b38SJeff Roberson int cpuid; 25297b20fb19SJeff Roberson 25307b20fb19SJeff Roberson /* 25317b20fb19SJeff Roberson * Finish setting up thread glue so that it begins execution in a 2532ae7a6b38SJeff Roberson * non-nested critical section with the scheduler lock held. 25337b20fb19SJeff Roberson */ 2534ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 2535ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpuid); 2536ae7a6b38SJeff Roberson ts = td->td_sched; 2537ae7a6b38SJeff Roberson if (TD_IS_IDLETHREAD(td)) 2538ae7a6b38SJeff Roberson td->td_lock = TDQ_LOCKPTR(tdq); 2539ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2540ae7a6b38SJeff Roberson td->td_oncpu = cpuid; 254159c68134SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 2542eea4f254SJeff Roberson lock_profile_obtain_lock_success( 2543eea4f254SJeff Roberson &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__); 254462fa74d9SJeff Roberson tdq->tdq_lowpri = td->td_priority; 25457b20fb19SJeff Roberson } 25467b20fb19SJeff Roberson 2547ae7a6b38SJeff Roberson static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, 2548ae7a6b38SJeff Roberson "Scheduler"); 2549ae7a6b38SJeff Roberson SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0, 2550e7d50326SJeff Roberson "Scheduler name"); 2551ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, 2552ae7a6b38SJeff Roberson "Slice size for timeshare threads"); 2553ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, 2554ae7a6b38SJeff Roberson "Interactivity score threshold"); 2555ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh, 2556ae7a6b38SJeff Roberson 0,"Min priority for preemption, lower priorities have greater precedence"); 25577b8bfa0dSJeff Roberson #ifdef SMP 2558ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0, 2559ae7a6b38SJeff Roberson "Pick the target cpu based on priority rather than load."); 2560ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0, 2561ae7a6b38SJeff Roberson "Number of hz ticks to keep thread affinity for"); 2562ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, tryself, CTLFLAG_RW, &tryself, 0, ""); 256362fa74d9SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, userret, CTLFLAG_RW, &lowpri_userret, 0, ""); 256462fa74d9SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, oldtryself, CTLFLAG_RW, &oldtryself, 0, ""); 2565ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0, 2566ae7a6b38SJeff Roberson "Enables the long-term load balancer"); 25677fcf154aSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW, 25687fcf154aSJeff Roberson &balance_interval, 0, 25697fcf154aSJeff Roberson "Average frequency in stathz ticks to run the long-term balancer"); 2570ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0, 2571ae7a6b38SJeff Roberson "Steals work from another hyper-threaded core on idle"); 2572ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0, 2573ae7a6b38SJeff Roberson "Attempts to steal work from other cores before idling"); 257428994a58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0, 257528994a58SJeff Roberson "Minimum load on remote cpu before we'll steal"); 25767b8bfa0dSJeff Roberson #endif 2577e7d50326SJeff Roberson 257854b0e65fSJeff Roberson /* ps compat. All cpu percentages from ULE are weighted. */ 2579a5423ea3SJeff Roberson static int ccpu = 0; 2580e7d50326SJeff Roberson SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 2581e7d50326SJeff Roberson 2582e7d50326SJeff Roberson 2583ed062c8dSJulian Elischer #define KERN_SWITCH_INCLUDE 1 2584ed062c8dSJulian Elischer #include "kern/kern_switch.c" 2585