135e6168fSJeff Roberson /*- 28a36da99SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 38a36da99SPedro F. Giffuni * 4e7d50326SJeff Roberson * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org> 535e6168fSJeff Roberson * All rights reserved. 635e6168fSJeff Roberson * 735e6168fSJeff Roberson * Redistribution and use in source and binary forms, with or without 835e6168fSJeff Roberson * modification, are permitted provided that the following conditions 935e6168fSJeff Roberson * are met: 1035e6168fSJeff Roberson * 1. Redistributions of source code must retain the above copyright 1135e6168fSJeff Roberson * notice unmodified, this list of conditions, and the following 1235e6168fSJeff Roberson * disclaimer. 1335e6168fSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 1435e6168fSJeff Roberson * notice, this list of conditions and the following disclaimer in the 1535e6168fSJeff Roberson * documentation and/or other materials provided with the distribution. 1635e6168fSJeff Roberson * 1735e6168fSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1835e6168fSJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1935e6168fSJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 2035e6168fSJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 2135e6168fSJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2235e6168fSJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2335e6168fSJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2435e6168fSJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2535e6168fSJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2635e6168fSJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2735e6168fSJeff Roberson */ 2835e6168fSJeff Roberson 29ae7a6b38SJeff Roberson /* 30ae7a6b38SJeff Roberson * This file implements the ULE scheduler. ULE supports independent CPU 31ae7a6b38SJeff Roberson * run queues and fine grain locking. It has superior interactive 32ae7a6b38SJeff Roberson * performance under load even on uni-processor systems. 33ae7a6b38SJeff Roberson * 34ae7a6b38SJeff Roberson * etymology: 35a5423ea3SJeff Roberson * ULE is the last three letters in schedule. It owes its name to a 36ae7a6b38SJeff Roberson * generic user created for a scheduling system by Paul Mikesell at 37ae7a6b38SJeff Roberson * Isilon Systems and a general lack of creativity on the part of the author. 38ae7a6b38SJeff Roberson */ 39ae7a6b38SJeff Roberson 40677b542eSDavid E. O'Brien #include <sys/cdefs.h> 41113dda8aSJeff Roberson __FBSDID("$FreeBSD$"); 42677b542eSDavid E. O'Brien 434da0d332SPeter Wemm #include "opt_hwpmc_hooks.h" 444da0d332SPeter Wemm #include "opt_sched.h" 459923b511SScott Long 4635e6168fSJeff Roberson #include <sys/param.h> 4735e6168fSJeff Roberson #include <sys/systm.h> 482c3490b1SMarcel Moolenaar #include <sys/kdb.h> 4935e6168fSJeff Roberson #include <sys/kernel.h> 5035e6168fSJeff Roberson #include <sys/ktr.h> 51c149e542SAttilio Rao #include <sys/limits.h> 5235e6168fSJeff Roberson #include <sys/lock.h> 5335e6168fSJeff Roberson #include <sys/mutex.h> 5435e6168fSJeff Roberson #include <sys/proc.h> 55245f3abfSJeff Roberson #include <sys/resource.h> 569bacd788SJeff Roberson #include <sys/resourcevar.h> 5735e6168fSJeff Roberson #include <sys/sched.h> 58b3e9e682SRyan Stone #include <sys/sdt.h> 5935e6168fSJeff Roberson #include <sys/smp.h> 6035e6168fSJeff Roberson #include <sys/sx.h> 6135e6168fSJeff Roberson #include <sys/sysctl.h> 6235e6168fSJeff Roberson #include <sys/sysproto.h> 63f5c157d9SJohn Baldwin #include <sys/turnstile.h> 64af29f399SDmitry Chagin #include <sys/umtxvar.h> 6535e6168fSJeff Roberson #include <sys/vmmeter.h> 6662fa74d9SJeff Roberson #include <sys/cpuset.h> 6707095abfSIvan Voras #include <sys/sbuf.h> 6835e6168fSJeff Roberson 69ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 70ebccf1e3SJoseph Koshy #include <sys/pmckern.h> 71ebccf1e3SJoseph Koshy #endif 72ebccf1e3SJoseph Koshy 736f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS 746f5f25e5SJohn Birrell #include <sys/dtrace_bsd.h> 7561322a0aSAlexander Motin int __read_mostly dtrace_vtime_active; 766f5f25e5SJohn Birrell dtrace_vtime_switch_func_t dtrace_vtime_switch_func; 776f5f25e5SJohn Birrell #endif 786f5f25e5SJohn Birrell 7935e6168fSJeff Roberson #include <machine/cpu.h> 8022bf7d9aSJeff Roberson #include <machine/smp.h> 8135e6168fSJeff Roberson 82ae7a6b38SJeff Roberson #define KTR_ULE 0 8314618990SJeff Roberson 840d2cf837SJeff Roberson #define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX))) 850d2cf837SJeff Roberson #define TDQ_NAME_LEN (sizeof("sched lock ") + sizeof(__XSTRING(MAXCPU))) 866338c579SAttilio Rao #define TDQ_LOADNAME_LEN (sizeof("CPU ") + sizeof(__XSTRING(MAXCPU)) - 1 + sizeof(" load")) 878f51ad55SJeff Roberson 886b2f763fSJeff Roberson /* 89ae7a6b38SJeff Roberson * Thread scheduler specific section. All fields are protected 90ae7a6b38SJeff Roberson * by the thread lock. 91ed062c8dSJulian Elischer */ 92ad1e7d28SJulian Elischer struct td_sched { 93ae7a6b38SJeff Roberson struct runq *ts_runq; /* Run-queue we're queued on. */ 94ae7a6b38SJeff Roberson short ts_flags; /* TSF_* flags. */ 95e77f9fedSAdrian Chadd int ts_cpu; /* CPU that we have affinity for. */ 9673daf66fSJeff Roberson int ts_rltick; /* Real last tick, for affinity. */ 97ae7a6b38SJeff Roberson int ts_slice; /* Ticks of slice remaining. */ 98ae7a6b38SJeff Roberson u_int ts_slptime; /* Number of ticks we vol. slept */ 99ae7a6b38SJeff Roberson u_int ts_runtime; /* Number of ticks we were running */ 100ad1e7d28SJulian Elischer int ts_ltick; /* Last tick that we were running on */ 101ad1e7d28SJulian Elischer int ts_ftick; /* First tick that we were running on */ 102ad1e7d28SJulian Elischer int ts_ticks; /* Tick count */ 1038f51ad55SJeff Roberson #ifdef KTR 1048f51ad55SJeff Roberson char ts_name[TS_NAME_LEN]; 1058f51ad55SJeff Roberson #endif 106ed062c8dSJulian Elischer }; 107ad1e7d28SJulian Elischer /* flags kept in ts_flags */ 1087b8bfa0dSJeff Roberson #define TSF_BOUND 0x0001 /* Thread can not migrate. */ 1097b8bfa0dSJeff Roberson #define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */ 11035e6168fSJeff Roberson 11162fa74d9SJeff Roberson #define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0) 11262fa74d9SJeff Roberson #define THREAD_CAN_SCHED(td, cpu) \ 11362fa74d9SJeff Roberson CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask) 11462fa74d9SJeff Roberson 11593ccd6bfSKonstantin Belousov _Static_assert(sizeof(struct thread) + sizeof(struct td_sched) <= 11693ccd6bfSKonstantin Belousov sizeof(struct thread0_storage), 11793ccd6bfSKonstantin Belousov "increase struct thread0_storage.t0st_sched size"); 11893ccd6bfSKonstantin Belousov 11935e6168fSJeff Roberson /* 12012d56c0fSJohn Baldwin * Priority ranges used for interactive and non-interactive timeshare 1212dc29adbSJohn Baldwin * threads. The timeshare priorities are split up into four ranges. 1222dc29adbSJohn Baldwin * The first range handles interactive threads. The last three ranges 1232dc29adbSJohn Baldwin * (NHALF, x, and NHALF) handle non-interactive threads with the outer 1242dc29adbSJohn Baldwin * ranges supporting nice values. 12512d56c0fSJohn Baldwin */ 1262dc29adbSJohn Baldwin #define PRI_TIMESHARE_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 1272dc29adbSJohn Baldwin #define PRI_INTERACT_RANGE ((PRI_TIMESHARE_RANGE - SCHED_PRI_NRESV) / 2) 12816705791SAndriy Gapon #define PRI_BATCH_RANGE (PRI_TIMESHARE_RANGE - PRI_INTERACT_RANGE) 1292dc29adbSJohn Baldwin 1302dc29adbSJohn Baldwin #define PRI_MIN_INTERACT PRI_MIN_TIMESHARE 1312dc29adbSJohn Baldwin #define PRI_MAX_INTERACT (PRI_MIN_TIMESHARE + PRI_INTERACT_RANGE - 1) 1322dc29adbSJohn Baldwin #define PRI_MIN_BATCH (PRI_MIN_TIMESHARE + PRI_INTERACT_RANGE) 13312d56c0fSJohn Baldwin #define PRI_MAX_BATCH PRI_MAX_TIMESHARE 13412d56c0fSJohn Baldwin 13512d56c0fSJohn Baldwin /* 136e7d50326SJeff Roberson * Cpu percentage computation macros and defines. 137e1f89c22SJeff Roberson * 138e7d50326SJeff Roberson * SCHED_TICK_SECS: Number of seconds to average the cpu usage across. 139e7d50326SJeff Roberson * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across. 1408ab80cf0SJeff Roberson * SCHED_TICK_MAX: Maximum number of ticks before scaling back. 141e7d50326SJeff Roberson * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results. 142e7d50326SJeff Roberson * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count. 143e7d50326SJeff Roberson * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks. 14435e6168fSJeff Roberson */ 145e7d50326SJeff Roberson #define SCHED_TICK_SECS 10 146e7d50326SJeff Roberson #define SCHED_TICK_TARG (hz * SCHED_TICK_SECS) 1478ab80cf0SJeff Roberson #define SCHED_TICK_MAX (SCHED_TICK_TARG + hz) 148e7d50326SJeff Roberson #define SCHED_TICK_SHIFT 10 149e7d50326SJeff Roberson #define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT) 150eddb4efaSJeff Roberson #define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz)) 15135e6168fSJeff Roberson 15235e6168fSJeff Roberson /* 153e7d50326SJeff Roberson * These macros determine priorities for non-interactive threads. They are 154e7d50326SJeff Roberson * assigned a priority based on their recent cpu utilization as expressed 155e7d50326SJeff Roberson * by the ratio of ticks to the tick total. NHALF priorities at the start 156e7d50326SJeff Roberson * and end of the MIN to MAX timeshare range are only reachable with negative 157e7d50326SJeff Roberson * or positive nice respectively. 158e7d50326SJeff Roberson * 159e7d50326SJeff Roberson * PRI_RANGE: Priority range for utilization dependent priorities. 160e7d50326SJeff Roberson * PRI_NRESV: Number of nice values. 161e7d50326SJeff Roberson * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total. 162e7d50326SJeff Roberson * PRI_NICE: Determines the part of the priority inherited from nice. 163e7d50326SJeff Roberson */ 164e7d50326SJeff Roberson #define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN) 165e7d50326SJeff Roberson #define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 16612d56c0fSJohn Baldwin #define SCHED_PRI_MIN (PRI_MIN_BATCH + SCHED_PRI_NHALF) 16712d56c0fSJohn Baldwin #define SCHED_PRI_MAX (PRI_MAX_BATCH - SCHED_PRI_NHALF) 16878920008SJohn Baldwin #define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN + 1) 169e7d50326SJeff Roberson #define SCHED_PRI_TICKS(ts) \ 170e7d50326SJeff Roberson (SCHED_TICK_HZ((ts)) / \ 1711e516cf5SJeff Roberson (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE)) 172e7d50326SJeff Roberson #define SCHED_PRI_NICE(nice) (nice) 173e7d50326SJeff Roberson 174e7d50326SJeff Roberson /* 175e7d50326SJeff Roberson * These determine the interactivity of a process. Interactivity differs from 176e7d50326SJeff Roberson * cpu utilization in that it expresses the voluntary time slept vs time ran 177e7d50326SJeff Roberson * while cpu utilization includes all time not running. This more accurately 178e7d50326SJeff Roberson * models the intent of the thread. 17935e6168fSJeff Roberson * 180407b0157SJeff Roberson * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 181407b0157SJeff Roberson * before throttling back. 182d322132cSJeff Roberson * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 183210491d3SJeff Roberson * INTERACT_MAX: Maximum interactivity value. Smaller is better. 1849f518f20SAttilio Rao * INTERACT_THRESH: Threshold for placement on the current runq. 18535e6168fSJeff Roberson */ 186e7d50326SJeff Roberson #define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT) 187e7d50326SJeff Roberson #define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT) 188210491d3SJeff Roberson #define SCHED_INTERACT_MAX (100) 189210491d3SJeff Roberson #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 1904c9612c6SJeff Roberson #define SCHED_INTERACT_THRESH (30) 191e1f89c22SJeff Roberson 1925e5c3873SJeff Roberson /* 1935e5c3873SJeff Roberson * These parameters determine the slice behavior for batch work. 1945e5c3873SJeff Roberson */ 1955e5c3873SJeff Roberson #define SCHED_SLICE_DEFAULT_DIVISOR 10 /* ~94 ms, 12 stathz ticks. */ 1965e5c3873SJeff Roberson #define SCHED_SLICE_MIN_DIVISOR 6 /* DEFAULT/MIN = ~16 ms. */ 1975e5c3873SJeff Roberson 1983d7f4117SAlexander Motin /* Flags kept in td_flags. */ 199e745d729SAlexander Motin #define TDF_PICKCPU TDF_SCHED0 /* Thread should pick new CPU. */ 2003d7f4117SAlexander Motin #define TDF_SLICEEND TDF_SCHED2 /* Thread time slice is over. */ 2013d7f4117SAlexander Motin 20235e6168fSJeff Roberson /* 203e7d50326SJeff Roberson * tickincr: Converts a stathz tick into a hz domain scaled by 204e7d50326SJeff Roberson * the shift factor. Without the shift the error rate 205e7d50326SJeff Roberson * due to rounding would be unacceptably high. 206e7d50326SJeff Roberson * realstathz: stathz is sometimes 0 and run off of hz. 207e7d50326SJeff Roberson * sched_slice: Runtime of each thread before rescheduling. 208ae7a6b38SJeff Roberson * preempt_thresh: Priority threshold for preemption and remote IPIs. 20935e6168fSJeff Roberson */ 2101c119e17SAlexander Motin static u_int __read_mostly sched_interact = SCHED_INTERACT_THRESH; 21161322a0aSAlexander Motin static int __read_mostly tickincr = 8 << SCHED_TICK_SHIFT; 21261322a0aSAlexander Motin static int __read_mostly realstathz = 127; /* reset during boot. */ 21361322a0aSAlexander Motin static int __read_mostly sched_slice = 10; /* reset during boot. */ 21461322a0aSAlexander Motin static int __read_mostly sched_slice_min = 1; /* reset during boot. */ 21502e2d6b4SJeff Roberson #ifdef PREEMPTION 21602e2d6b4SJeff Roberson #ifdef FULL_PREEMPTION 21761322a0aSAlexander Motin static int __read_mostly preempt_thresh = PRI_MAX_IDLE; 21802e2d6b4SJeff Roberson #else 21961322a0aSAlexander Motin static int __read_mostly preempt_thresh = PRI_MIN_KERN; 22002e2d6b4SJeff Roberson #endif 22102e2d6b4SJeff Roberson #else 22261322a0aSAlexander Motin static int __read_mostly preempt_thresh = 0; 22302e2d6b4SJeff Roberson #endif 22461322a0aSAlexander Motin static int __read_mostly static_boost = PRI_MIN_BATCH; 22561322a0aSAlexander Motin static int __read_mostly sched_idlespins = 10000; 22661322a0aSAlexander Motin static int __read_mostly sched_idlespinthresh = -1; 227ae7a6b38SJeff Roberson 22835e6168fSJeff Roberson /* 229ae7a6b38SJeff Roberson * tdq - per processor runqs and statistics. All fields are protected by the 230ae7a6b38SJeff Roberson * tdq_lock. The load and lowpri may be accessed without to avoid excess 231ae7a6b38SJeff Roberson * locking in sched_pickcpu(); 23235e6168fSJeff Roberson */ 233ad1e7d28SJulian Elischer struct tdq { 23439f819e2SJim Harris /* 23539f819e2SJim Harris * Ordered to improve efficiency of cpu_search() and switch(). 23639f819e2SJim Harris * tdq_lock is padded to avoid false sharing with tdq_load and 23739f819e2SJim Harris * tdq_cpu_idle. 23839f819e2SJim Harris */ 2394ceaf45dSAttilio Rao struct mtx_padalign tdq_lock; /* run queue lock. */ 24073daf66fSJeff Roberson struct cpu_group *tdq_cg; /* Pointer to cpu topology. */ 2416d3f74a1SMark Johnston struct thread *tdq_curthread; /* Current executing thread. */ 2421690c6c1SJeff Roberson volatile int tdq_load; /* Aggregate load. */ 2439f9ad565SAlexander Motin volatile int tdq_cpu_idle; /* cpu_idle() is active. */ 24473daf66fSJeff Roberson int tdq_sysload; /* For loadavg, !ITHD load. */ 24597e9382dSDon Lewis volatile int tdq_transferable; /* Transferable thread count. */ 24697e9382dSDon Lewis volatile short tdq_switchcnt; /* Switches this tick. */ 24797e9382dSDon Lewis volatile short tdq_oldswitchcnt; /* Switches last tick. */ 24873daf66fSJeff Roberson u_char tdq_lowpri; /* Lowest priority thread. */ 2497789ab32SMark Johnston u_char tdq_owepreempt; /* Remote preemption pending. */ 25073daf66fSJeff Roberson u_char tdq_idx; /* Current insert index. */ 25173daf66fSJeff Roberson u_char tdq_ridx; /* Current removal index. */ 252018ff686SJeff Roberson int tdq_id; /* cpuid. */ 253e7d50326SJeff Roberson struct runq tdq_realtime; /* real-time run queue. */ 254ae7a6b38SJeff Roberson struct runq tdq_timeshare; /* timeshare run queue. */ 255ae7a6b38SJeff Roberson struct runq tdq_idle; /* Queue of IDLE threads. */ 2568f51ad55SJeff Roberson char tdq_name[TDQ_NAME_LEN]; 2578f51ad55SJeff Roberson #ifdef KTR 2588f51ad55SJeff Roberson char tdq_loadname[TDQ_LOADNAME_LEN]; 2598f51ad55SJeff Roberson #endif 260ae7a6b38SJeff Roberson } __aligned(64); 26135e6168fSJeff Roberson 2621690c6c1SJeff Roberson /* Idle thread states and config. */ 2631690c6c1SJeff Roberson #define TDQ_RUNNING 1 2641690c6c1SJeff Roberson #define TDQ_IDLE 2 2657b8bfa0dSJeff Roberson 26680f86c9fSJeff Roberson #ifdef SMP 26761322a0aSAlexander Motin struct cpu_group __read_mostly *cpu_top; /* CPU topology */ 2687b8bfa0dSJeff Roberson 26962fa74d9SJeff Roberson #define SCHED_AFFINITY_DEFAULT (max(1, hz / 1000)) 27062fa74d9SJeff Roberson #define SCHED_AFFINITY(ts, t) ((ts)->ts_rltick > ticks - ((t) * affinity)) 2717b8bfa0dSJeff Roberson 2727b8bfa0dSJeff Roberson /* 2737b8bfa0dSJeff Roberson * Run-time tunables. 2747b8bfa0dSJeff Roberson */ 27528994a58SJeff Roberson static int rebalance = 1; 2767fcf154aSJeff Roberson static int balance_interval = 128; /* Default set in sched_initticks(). */ 27761322a0aSAlexander Motin static int __read_mostly affinity; 27861322a0aSAlexander Motin static int __read_mostly steal_idle = 1; 27961322a0aSAlexander Motin static int __read_mostly steal_thresh = 2; 28061322a0aSAlexander Motin static int __read_mostly always_steal = 0; 28161322a0aSAlexander Motin static int __read_mostly trysteal_limit = 2; 28280f86c9fSJeff Roberson 28335e6168fSJeff Roberson /* 284d2ad694cSJeff Roberson * One thread queue per processor. 28535e6168fSJeff Roberson */ 28661322a0aSAlexander Motin static struct tdq __read_mostly *balance_tdq; 2877fcf154aSJeff Roberson static int balance_ticks; 288018ff686SJeff Roberson DPCPU_DEFINE_STATIC(struct tdq, tdq); 2892bf95012SAndrew Turner DPCPU_DEFINE_STATIC(uint32_t, randomval); 290dc03363dSJeff Roberson 291018ff686SJeff Roberson #define TDQ_SELF() ((struct tdq *)PCPU_GET(sched)) 292018ff686SJeff Roberson #define TDQ_CPU(x) (DPCPU_ID_PTR((x), tdq)) 293018ff686SJeff Roberson #define TDQ_ID(x) ((x)->tdq_id) 29480f86c9fSJeff Roberson #else /* !SMP */ 295ad1e7d28SJulian Elischer static struct tdq tdq_cpu; 296dc03363dSJeff Roberson 29736b36916SJeff Roberson #define TDQ_ID(x) (0) 298ad1e7d28SJulian Elischer #define TDQ_SELF() (&tdq_cpu) 299ad1e7d28SJulian Elischer #define TDQ_CPU(x) (&tdq_cpu) 3000a016a05SJeff Roberson #endif 30135e6168fSJeff Roberson 302ae7a6b38SJeff Roberson #define TDQ_LOCK_ASSERT(t, type) mtx_assert(TDQ_LOCKPTR((t)), (type)) 303ae7a6b38SJeff Roberson #define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t))) 304ae7a6b38SJeff Roberson #define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f)) 3058bb173fbSAlexander Motin #define TDQ_TRYLOCK(t) mtx_trylock_spin(TDQ_LOCKPTR((t))) 3068bb173fbSAlexander Motin #define TDQ_TRYLOCK_FLAGS(t, f) mtx_trylock_spin_flags(TDQ_LOCKPTR((t)), (f)) 307ae7a6b38SJeff Roberson #define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t))) 3084ceaf45dSAttilio Rao #define TDQ_LOCKPTR(t) ((struct mtx *)(&(t)->tdq_lock)) 309ae7a6b38SJeff Roberson 310*0927ff78SMark Johnston static void sched_setpreempt(int); 3118460a577SJohn Birrell static void sched_priority(struct thread *); 31221381d1bSJeff Roberson static void sched_thread_priority(struct thread *, u_char); 3138460a577SJohn Birrell static int sched_interact_score(struct thread *); 3148460a577SJohn Birrell static void sched_interact_update(struct thread *); 3158460a577SJohn Birrell static void sched_interact_fork(struct thread *); 3167295465eSAlexander Motin static void sched_pctcpu_update(struct td_sched *, int); 31735e6168fSJeff Roberson 3185d7ef00cSJeff Roberson /* Operations on per processor queues */ 3199727e637SJeff Roberson static struct thread *tdq_choose(struct tdq *); 320018ff686SJeff Roberson static void tdq_setup(struct tdq *, int i); 3219727e637SJeff Roberson static void tdq_load_add(struct tdq *, struct thread *); 3229727e637SJeff Roberson static void tdq_load_rem(struct tdq *, struct thread *); 3239727e637SJeff Roberson static __inline void tdq_runq_add(struct tdq *, struct thread *, int); 3249727e637SJeff Roberson static __inline void tdq_runq_rem(struct tdq *, struct thread *); 325ff256d9cSJeff Roberson static inline int sched_shouldpreempt(int, int, int); 326ad1e7d28SJulian Elischer void tdq_print(int cpu); 327e7d50326SJeff Roberson static void runq_print(struct runq *rq); 3286d3f74a1SMark Johnston static int tdq_add(struct tdq *, struct thread *, int); 3295d7ef00cSJeff Roberson #ifdef SMP 3306d3f74a1SMark Johnston static int tdq_move(struct tdq *, struct tdq *); 331ad1e7d28SJulian Elischer static int tdq_idled(struct tdq *); 3326d3f74a1SMark Johnston static void tdq_notify(struct tdq *, int lowpri); 3339727e637SJeff Roberson static struct thread *tdq_steal(struct tdq *, int); 3349727e637SJeff Roberson static struct thread *runq_steal(struct runq *, int); 3359727e637SJeff Roberson static int sched_pickcpu(struct thread *, int); 3367fcf154aSJeff Roberson static void sched_balance(void); 3376d3f74a1SMark Johnston static bool sched_balance_pair(struct tdq *, struct tdq *); 3389727e637SJeff Roberson static inline struct tdq *sched_setcpu(struct thread *, int, int); 339ae7a6b38SJeff Roberson static inline void thread_unblock_switch(struct thread *, struct mtx *); 34007095abfSIvan Voras static int sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS); 34107095abfSIvan Voras static int sysctl_kern_sched_topology_spec_internal(struct sbuf *sb, 34207095abfSIvan Voras struct cpu_group *cg, int indent); 3435d7ef00cSJeff Roberson #endif 3445d7ef00cSJeff Roberson 345e7d50326SJeff Roberson static void sched_setup(void *dummy); 346237fdd78SRobert Watson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL); 347e7d50326SJeff Roberson 348e7d50326SJeff Roberson static void sched_initticks(void *dummy); 349237fdd78SRobert Watson SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, 350237fdd78SRobert Watson NULL); 351e7d50326SJeff Roberson 352b3e9e682SRyan Stone SDT_PROVIDER_DEFINE(sched); 353b3e9e682SRyan Stone 354d9fae5abSAndriy Gapon SDT_PROBE_DEFINE3(sched, , , change__pri, "struct thread *", 355b3e9e682SRyan Stone "struct proc *", "uint8_t"); 356d9fae5abSAndriy Gapon SDT_PROBE_DEFINE3(sched, , , dequeue, "struct thread *", 357b3e9e682SRyan Stone "struct proc *", "void *"); 358d9fae5abSAndriy Gapon SDT_PROBE_DEFINE4(sched, , , enqueue, "struct thread *", 359b3e9e682SRyan Stone "struct proc *", "void *", "int"); 360d9fae5abSAndriy Gapon SDT_PROBE_DEFINE4(sched, , , lend__pri, "struct thread *", 361b3e9e682SRyan Stone "struct proc *", "uint8_t", "struct thread *"); 362d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , load__change, "int", "int"); 363d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , off__cpu, "struct thread *", 364b3e9e682SRyan Stone "struct proc *"); 365d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(sched, , , on__cpu); 366d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(sched, , , remain__cpu); 367d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , surrender, "struct thread *", 368b3e9e682SRyan Stone "struct proc *"); 369b3e9e682SRyan Stone 3700567b6ccSWarner Losh /* 371ae7a6b38SJeff Roberson * Print the threads waiting on a run-queue. 372ae7a6b38SJeff Roberson */ 373e7d50326SJeff Roberson static void 374e7d50326SJeff Roberson runq_print(struct runq *rq) 375e7d50326SJeff Roberson { 376e7d50326SJeff Roberson struct rqhead *rqh; 3779727e637SJeff Roberson struct thread *td; 378e7d50326SJeff Roberson int pri; 379e7d50326SJeff Roberson int j; 380e7d50326SJeff Roberson int i; 381e7d50326SJeff Roberson 382e7d50326SJeff Roberson for (i = 0; i < RQB_LEN; i++) { 383e7d50326SJeff Roberson printf("\t\trunq bits %d 0x%zx\n", 384e7d50326SJeff Roberson i, rq->rq_status.rqb_bits[i]); 385e7d50326SJeff Roberson for (j = 0; j < RQB_BPW; j++) 386e7d50326SJeff Roberson if (rq->rq_status.rqb_bits[i] & (1ul << j)) { 387e7d50326SJeff Roberson pri = j + (i << RQB_L2BPW); 388e7d50326SJeff Roberson rqh = &rq->rq_queues[pri]; 3899727e637SJeff Roberson TAILQ_FOREACH(td, rqh, td_runq) { 390e7d50326SJeff Roberson printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n", 3919727e637SJeff Roberson td, td->td_name, td->td_priority, 3929727e637SJeff Roberson td->td_rqindex, pri); 393e7d50326SJeff Roberson } 394e7d50326SJeff Roberson } 395e7d50326SJeff Roberson } 396e7d50326SJeff Roberson } 397e7d50326SJeff Roberson 398ae7a6b38SJeff Roberson /* 399ae7a6b38SJeff Roberson * Print the status of a per-cpu thread queue. Should be a ddb show cmd. 400ae7a6b38SJeff Roberson */ 40115dc847eSJeff Roberson void 402ad1e7d28SJulian Elischer tdq_print(int cpu) 40315dc847eSJeff Roberson { 404ad1e7d28SJulian Elischer struct tdq *tdq; 40515dc847eSJeff Roberson 406ad1e7d28SJulian Elischer tdq = TDQ_CPU(cpu); 40715dc847eSJeff Roberson 408c47f202bSJeff Roberson printf("tdq %d:\n", TDQ_ID(tdq)); 40962fa74d9SJeff Roberson printf("\tlock %p\n", TDQ_LOCKPTR(tdq)); 41062fa74d9SJeff Roberson printf("\tLock name: %s\n", tdq->tdq_name); 411d2ad694cSJeff Roberson printf("\tload: %d\n", tdq->tdq_load); 4121690c6c1SJeff Roberson printf("\tswitch cnt: %d\n", tdq->tdq_switchcnt); 4131690c6c1SJeff Roberson printf("\told switch cnt: %d\n", tdq->tdq_oldswitchcnt); 414e7d50326SJeff Roberson printf("\ttimeshare idx: %d\n", tdq->tdq_idx); 4153f872f85SJeff Roberson printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx); 4161690c6c1SJeff Roberson printf("\tload transferable: %d\n", tdq->tdq_transferable); 4171690c6c1SJeff Roberson printf("\tlowest priority: %d\n", tdq->tdq_lowpri); 418e7d50326SJeff Roberson printf("\trealtime runq:\n"); 419e7d50326SJeff Roberson runq_print(&tdq->tdq_realtime); 420e7d50326SJeff Roberson printf("\ttimeshare runq:\n"); 421e7d50326SJeff Roberson runq_print(&tdq->tdq_timeshare); 422e7d50326SJeff Roberson printf("\tidle runq:\n"); 423e7d50326SJeff Roberson runq_print(&tdq->tdq_idle); 42415dc847eSJeff Roberson } 42515dc847eSJeff Roberson 426ff256d9cSJeff Roberson static inline int 427ff256d9cSJeff Roberson sched_shouldpreempt(int pri, int cpri, int remote) 428ff256d9cSJeff Roberson { 429ff256d9cSJeff Roberson /* 430ff256d9cSJeff Roberson * If the new priority is not better than the current priority there is 431ff256d9cSJeff Roberson * nothing to do. 432ff256d9cSJeff Roberson */ 433ff256d9cSJeff Roberson if (pri >= cpri) 434ff256d9cSJeff Roberson return (0); 435ff256d9cSJeff Roberson /* 436ff256d9cSJeff Roberson * Always preempt idle. 437ff256d9cSJeff Roberson */ 438ff256d9cSJeff Roberson if (cpri >= PRI_MIN_IDLE) 439ff256d9cSJeff Roberson return (1); 440ff256d9cSJeff Roberson /* 441ff256d9cSJeff Roberson * If preemption is disabled don't preempt others. 442ff256d9cSJeff Roberson */ 443ff256d9cSJeff Roberson if (preempt_thresh == 0) 444ff256d9cSJeff Roberson return (0); 445ff256d9cSJeff Roberson /* 446ff256d9cSJeff Roberson * Preempt if we exceed the threshold. 447ff256d9cSJeff Roberson */ 448ff256d9cSJeff Roberson if (pri <= preempt_thresh) 449ff256d9cSJeff Roberson return (1); 450ff256d9cSJeff Roberson /* 45112d56c0fSJohn Baldwin * If we're interactive or better and there is non-interactive 45212d56c0fSJohn Baldwin * or worse running preempt only remote processors. 453ff256d9cSJeff Roberson */ 45412d56c0fSJohn Baldwin if (remote && pri <= PRI_MAX_INTERACT && cpri > PRI_MAX_INTERACT) 455ff256d9cSJeff Roberson return (1); 456ff256d9cSJeff Roberson return (0); 457ff256d9cSJeff Roberson } 458ff256d9cSJeff Roberson 459ae7a6b38SJeff Roberson /* 460ae7a6b38SJeff Roberson * Add a thread to the actual run-queue. Keeps transferable counts up to 461ae7a6b38SJeff Roberson * date with what is actually on the run-queue. Selects the correct 462ae7a6b38SJeff Roberson * queue position for timeshare threads. 463ae7a6b38SJeff Roberson */ 464155b9987SJeff Roberson static __inline void 4659727e637SJeff Roberson tdq_runq_add(struct tdq *tdq, struct thread *td, int flags) 466155b9987SJeff Roberson { 4679727e637SJeff Roberson struct td_sched *ts; 468c143ac21SJeff Roberson u_char pri; 469c143ac21SJeff Roberson 470ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 47161a74c5cSJeff Roberson THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED); 47273daf66fSJeff Roberson 4739727e637SJeff Roberson pri = td->td_priority; 47493ccd6bfSKonstantin Belousov ts = td_get_sched(td); 4759727e637SJeff Roberson TD_SET_RUNQ(td); 4769727e637SJeff Roberson if (THREAD_CAN_MIGRATE(td)) { 477d2ad694cSJeff Roberson tdq->tdq_transferable++; 478ad1e7d28SJulian Elischer ts->ts_flags |= TSF_XFERABLE; 47980f86c9fSJeff Roberson } 48012d56c0fSJohn Baldwin if (pri < PRI_MIN_BATCH) { 481c143ac21SJeff Roberson ts->ts_runq = &tdq->tdq_realtime; 48212d56c0fSJohn Baldwin } else if (pri <= PRI_MAX_BATCH) { 483c143ac21SJeff Roberson ts->ts_runq = &tdq->tdq_timeshare; 48412d56c0fSJohn Baldwin KASSERT(pri <= PRI_MAX_BATCH && pri >= PRI_MIN_BATCH, 485e7d50326SJeff Roberson ("Invalid priority %d on timeshare runq", pri)); 486e7d50326SJeff Roberson /* 487e7d50326SJeff Roberson * This queue contains only priorities between MIN and MAX 488ba71333fSMark Johnston * batch. Use the whole queue to represent these values. 489e7d50326SJeff Roberson */ 490c47f202bSJeff Roberson if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) { 49116705791SAndriy Gapon pri = RQ_NQS * (pri - PRI_MIN_BATCH) / PRI_BATCH_RANGE; 492e7d50326SJeff Roberson pri = (pri + tdq->tdq_idx) % RQ_NQS; 4933f872f85SJeff Roberson /* 4943f872f85SJeff Roberson * This effectively shortens the queue by one so we 4953f872f85SJeff Roberson * can have a one slot difference between idx and 4963f872f85SJeff Roberson * ridx while we wait for threads to drain. 4973f872f85SJeff Roberson */ 4983f872f85SJeff Roberson if (tdq->tdq_ridx != tdq->tdq_idx && 4993f872f85SJeff Roberson pri == tdq->tdq_ridx) 5004499aff6SJeff Roberson pri = (unsigned char)(pri - 1) % RQ_NQS; 501e7d50326SJeff Roberson } else 5023f872f85SJeff Roberson pri = tdq->tdq_ridx; 5039727e637SJeff Roberson runq_add_pri(ts->ts_runq, td, pri, flags); 504c143ac21SJeff Roberson return; 505e7d50326SJeff Roberson } else 50673daf66fSJeff Roberson ts->ts_runq = &tdq->tdq_idle; 5079727e637SJeff Roberson runq_add(ts->ts_runq, td, flags); 50873daf66fSJeff Roberson } 50973daf66fSJeff Roberson 51073daf66fSJeff Roberson /* 511ae7a6b38SJeff Roberson * Remove a thread from a run-queue. This typically happens when a thread 512ae7a6b38SJeff Roberson * is selected to run. Running threads are not on the queue and the 513ae7a6b38SJeff Roberson * transferable count does not reflect them. 514ae7a6b38SJeff Roberson */ 515155b9987SJeff Roberson static __inline void 5169727e637SJeff Roberson tdq_runq_rem(struct tdq *tdq, struct thread *td) 517155b9987SJeff Roberson { 5189727e637SJeff Roberson struct td_sched *ts; 5199727e637SJeff Roberson 52093ccd6bfSKonstantin Belousov ts = td_get_sched(td); 521ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 52261a74c5cSJeff Roberson THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED); 523ae7a6b38SJeff Roberson KASSERT(ts->ts_runq != NULL, 5249727e637SJeff Roberson ("tdq_runq_remove: thread %p null ts_runq", td)); 525ad1e7d28SJulian Elischer if (ts->ts_flags & TSF_XFERABLE) { 526d2ad694cSJeff Roberson tdq->tdq_transferable--; 527ad1e7d28SJulian Elischer ts->ts_flags &= ~TSF_XFERABLE; 52880f86c9fSJeff Roberson } 5293f872f85SJeff Roberson if (ts->ts_runq == &tdq->tdq_timeshare) { 5303f872f85SJeff Roberson if (tdq->tdq_idx != tdq->tdq_ridx) 5319727e637SJeff Roberson runq_remove_idx(ts->ts_runq, td, &tdq->tdq_ridx); 532e7d50326SJeff Roberson else 5339727e637SJeff Roberson runq_remove_idx(ts->ts_runq, td, NULL); 5343f872f85SJeff Roberson } else 5359727e637SJeff Roberson runq_remove(ts->ts_runq, td); 536155b9987SJeff Roberson } 537155b9987SJeff Roberson 538ae7a6b38SJeff Roberson /* 539ae7a6b38SJeff Roberson * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load 540ae7a6b38SJeff Roberson * for this thread to the referenced thread queue. 541ae7a6b38SJeff Roberson */ 542a8949de2SJeff Roberson static void 5439727e637SJeff Roberson tdq_load_add(struct tdq *tdq, struct thread *td) 5445d7ef00cSJeff Roberson { 545ae7a6b38SJeff Roberson 546ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 54761a74c5cSJeff Roberson THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED); 54803d17db7SJeff Roberson 549d2ad694cSJeff Roberson tdq->tdq_load++; 5501b9d701fSAttilio Rao if ((td->td_flags & TDF_NOLOAD) == 0) 551d2ad694cSJeff Roberson tdq->tdq_sysload++; 5528f51ad55SJeff Roberson KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load); 553d9fae5abSAndriy Gapon SDT_PROBE2(sched, , , load__change, (int)TDQ_ID(tdq), tdq->tdq_load); 5545d7ef00cSJeff Roberson } 55515dc847eSJeff Roberson 556ae7a6b38SJeff Roberson /* 557ae7a6b38SJeff Roberson * Remove the load from a thread that is transitioning to a sleep state or 558ae7a6b38SJeff Roberson * exiting. 559ae7a6b38SJeff Roberson */ 560a8949de2SJeff Roberson static void 5619727e637SJeff Roberson tdq_load_rem(struct tdq *tdq, struct thread *td) 5625d7ef00cSJeff Roberson { 563ae7a6b38SJeff Roberson 564ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 56561a74c5cSJeff Roberson THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED); 566ae7a6b38SJeff Roberson KASSERT(tdq->tdq_load != 0, 567c47f202bSJeff Roberson ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq))); 56803d17db7SJeff Roberson 569d2ad694cSJeff Roberson tdq->tdq_load--; 5701b9d701fSAttilio Rao if ((td->td_flags & TDF_NOLOAD) == 0) 57103d17db7SJeff Roberson tdq->tdq_sysload--; 5728f51ad55SJeff Roberson KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load); 573d9fae5abSAndriy Gapon SDT_PROBE2(sched, , , load__change, (int)TDQ_ID(tdq), tdq->tdq_load); 57415dc847eSJeff Roberson } 57515dc847eSJeff Roberson 576356500a3SJeff Roberson /* 5775e5c3873SJeff Roberson * Bound timeshare latency by decreasing slice size as load increases. We 5785e5c3873SJeff Roberson * consider the maximum latency as the sum of the threads waiting to run 5795e5c3873SJeff Roberson * aside from curthread and target no more than sched_slice latency but 5805e5c3873SJeff Roberson * no less than sched_slice_min runtime. 5815e5c3873SJeff Roberson */ 5825e5c3873SJeff Roberson static inline int 5835e5c3873SJeff Roberson tdq_slice(struct tdq *tdq) 5845e5c3873SJeff Roberson { 5855e5c3873SJeff Roberson int load; 5865e5c3873SJeff Roberson 5875e5c3873SJeff Roberson /* 5885e5c3873SJeff Roberson * It is safe to use sys_load here because this is called from 5895e5c3873SJeff Roberson * contexts where timeshare threads are running and so there 5905e5c3873SJeff Roberson * cannot be higher priority load in the system. 5915e5c3873SJeff Roberson */ 5925e5c3873SJeff Roberson load = tdq->tdq_sysload - 1; 5935e5c3873SJeff Roberson if (load >= SCHED_SLICE_MIN_DIVISOR) 5945e5c3873SJeff Roberson return (sched_slice_min); 5955e5c3873SJeff Roberson if (load <= 1) 5965e5c3873SJeff Roberson return (sched_slice); 5975e5c3873SJeff Roberson return (sched_slice / load); 5985e5c3873SJeff Roberson } 5995e5c3873SJeff Roberson 6005e5c3873SJeff Roberson /* 60162fa74d9SJeff Roberson * Set lowpri to its exact value by searching the run-queue and 60262fa74d9SJeff Roberson * evaluating curthread. curthread may be passed as an optimization. 603356500a3SJeff Roberson */ 60422bf7d9aSJeff Roberson static void 60562fa74d9SJeff Roberson tdq_setlowpri(struct tdq *tdq, struct thread *ctd) 60662fa74d9SJeff Roberson { 60762fa74d9SJeff Roberson struct thread *td; 60862fa74d9SJeff Roberson 60962fa74d9SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 61062fa74d9SJeff Roberson if (ctd == NULL) 6116d3f74a1SMark Johnston ctd = atomic_load_ptr(&tdq->tdq_curthread); 6129727e637SJeff Roberson td = tdq_choose(tdq); 6139727e637SJeff Roberson if (td == NULL || td->td_priority > ctd->td_priority) 61462fa74d9SJeff Roberson tdq->tdq_lowpri = ctd->td_priority; 61562fa74d9SJeff Roberson else 61662fa74d9SJeff Roberson tdq->tdq_lowpri = td->td_priority; 61762fa74d9SJeff Roberson } 61862fa74d9SJeff Roberson 61962fa74d9SJeff Roberson #ifdef SMP 6209129dd59SPedro F. Giffuni /* 6219129dd59SPedro F. Giffuni * We need some randomness. Implement a classic Linear Congruential 6229129dd59SPedro F. Giffuni * Generator X_{n+1}=(aX_n+c) mod m. These values are optimized for 6239129dd59SPedro F. Giffuni * m = 2^32, a = 69069 and c = 5. We only return the upper 16 bits 6249129dd59SPedro F. Giffuni * of the random state (in the low bits of our answer) to keep 6259129dd59SPedro F. Giffuni * the maximum randomness. 6269129dd59SPedro F. Giffuni */ 6279129dd59SPedro F. Giffuni static uint32_t 6289129dd59SPedro F. Giffuni sched_random(void) 6299129dd59SPedro F. Giffuni { 6309129dd59SPedro F. Giffuni uint32_t *rndptr; 6319129dd59SPedro F. Giffuni 6329129dd59SPedro F. Giffuni rndptr = DPCPU_PTR(randomval); 6339129dd59SPedro F. Giffuni *rndptr = *rndptr * 69069 + 5; 6349129dd59SPedro F. Giffuni 6359129dd59SPedro F. Giffuni return (*rndptr >> 16); 6369129dd59SPedro F. Giffuni } 6379129dd59SPedro F. Giffuni 63862fa74d9SJeff Roberson struct cpu_search { 639e745d729SAlexander Motin cpuset_t *cs_mask; /* The mask of allowed CPUs to choose from. */ 640e745d729SAlexander Motin int cs_prefer; /* Prefer this CPU and groups including it. */ 641e745d729SAlexander Motin int cs_running; /* The thread is now running at cs_prefer. */ 64236acfc65SAlexander Motin int cs_pri; /* Min priority for low. */ 64308063e9fSAlexander Motin int cs_load; /* Max load for low, min load for high. */ 64408063e9fSAlexander Motin int cs_trans; /* Min transferable load for high. */ 645aefe0a8cSAlexander Motin }; 646aefe0a8cSAlexander Motin 647aefe0a8cSAlexander Motin struct cpu_search_res { 64808063e9fSAlexander Motin int csr_cpu; /* The best CPU found. */ 64908063e9fSAlexander Motin int csr_load; /* The load of cs_cpu. */ 65062fa74d9SJeff Roberson }; 65162fa74d9SJeff Roberson 65262fa74d9SJeff Roberson /* 653aefe0a8cSAlexander Motin * Search the tree of cpu_groups for the lowest or highest loaded CPU. 654aefe0a8cSAlexander Motin * These routines actually compare the load on all paths through the tree 655aefe0a8cSAlexander Motin * and find the least loaded cpu on the least loaded path, which may differ 656aefe0a8cSAlexander Motin * from the least loaded cpu in the system. This balances work among caches 657aefe0a8cSAlexander Motin * and buses. 65862fa74d9SJeff Roberson */ 659aefe0a8cSAlexander Motin static int 660aefe0a8cSAlexander Motin cpu_search_lowest(const struct cpu_group *cg, const struct cpu_search *s, 661aefe0a8cSAlexander Motin struct cpu_search_res *r) 66262fa74d9SJeff Roberson { 663aefe0a8cSAlexander Motin struct cpu_search_res lr; 66436acfc65SAlexander Motin struct tdq *tdq; 665e745d729SAlexander Motin int c, bload, l, load, p, total; 66662fa74d9SJeff Roberson 66736acfc65SAlexander Motin total = 0; 668aefe0a8cSAlexander Motin bload = INT_MAX; 66908063e9fSAlexander Motin r->csr_cpu = -1; 67036acfc65SAlexander Motin 671aefe0a8cSAlexander Motin /* Loop through children CPU groups if there are any. */ 672aefe0a8cSAlexander Motin if (cg->cg_children > 0) { 673aefe0a8cSAlexander Motin for (c = cg->cg_children - 1; c >= 0; c--) { 674aefe0a8cSAlexander Motin load = cpu_search_lowest(&cg->cg_child[c], s, &lr); 67536acfc65SAlexander Motin total += load; 676e745d729SAlexander Motin 677e745d729SAlexander Motin /* 678e745d729SAlexander Motin * When balancing do not prefer SMT groups with load >1. 679e745d729SAlexander Motin * It allows round-robin between SMT groups with equal 680e745d729SAlexander Motin * load within parent group for more fair scheduling. 681e745d729SAlexander Motin */ 682e745d729SAlexander Motin if (__predict_false(s->cs_running) && 683e745d729SAlexander Motin (cg->cg_child[c].cg_flags & CG_FLAG_THREAD) && 684e745d729SAlexander Motin load >= 128 && (load & 128) != 0) 685e745d729SAlexander Motin load += 128; 686e745d729SAlexander Motin 68708063e9fSAlexander Motin if (lr.csr_cpu >= 0 && (load < bload || 68808063e9fSAlexander Motin (load == bload && lr.csr_load < r->csr_load))) { 689aefe0a8cSAlexander Motin bload = load; 69008063e9fSAlexander Motin r->csr_cpu = lr.csr_cpu; 69108063e9fSAlexander Motin r->csr_load = lr.csr_load; 69236acfc65SAlexander Motin } 69336acfc65SAlexander Motin } 69462fa74d9SJeff Roberson return (total); 69562fa74d9SJeff Roberson } 69662fa74d9SJeff Roberson 697aefe0a8cSAlexander Motin /* Loop through children CPUs otherwise. */ 698aefe0a8cSAlexander Motin for (c = cg->cg_last; c >= cg->cg_first; c--) { 699aefe0a8cSAlexander Motin if (!CPU_ISSET(c, &cg->cg_mask)) 700aefe0a8cSAlexander Motin continue; 701aefe0a8cSAlexander Motin tdq = TDQ_CPU(c); 702aefe0a8cSAlexander Motin l = tdq->tdq_load; 703e745d729SAlexander Motin if (c == s->cs_prefer) { 704e745d729SAlexander Motin if (__predict_false(s->cs_running)) 705e745d729SAlexander Motin l--; 706e745d729SAlexander Motin p = 128; 707e745d729SAlexander Motin } else 708e745d729SAlexander Motin p = 0; 709aefe0a8cSAlexander Motin load = l * 256; 710e745d729SAlexander Motin total += load - p; 711e745d729SAlexander Motin 712e745d729SAlexander Motin /* 713e745d729SAlexander Motin * Check this CPU is acceptable. 714e745d729SAlexander Motin * If the threads is already on the CPU, don't look on the TDQ 715e745d729SAlexander Motin * priority, since it can be the priority of the thread itself. 716e745d729SAlexander Motin */ 71708063e9fSAlexander Motin if (l > s->cs_load || (tdq->tdq_lowpri <= s->cs_pri && 718e745d729SAlexander Motin (!s->cs_running || c != s->cs_prefer)) || 719aefe0a8cSAlexander Motin !CPU_ISSET(c, s->cs_mask)) 720aefe0a8cSAlexander Motin continue; 721e745d729SAlexander Motin 722e745d729SAlexander Motin /* 723e745d729SAlexander Motin * When balancing do not prefer CPUs with load > 1. 724e745d729SAlexander Motin * It allows round-robin between CPUs with equal load 725e745d729SAlexander Motin * within the CPU group for more fair scheduling. 726e745d729SAlexander Motin */ 727e745d729SAlexander Motin if (__predict_false(s->cs_running) && l > 0) 728e745d729SAlexander Motin p = 0; 729e745d729SAlexander Motin 730aefe0a8cSAlexander Motin load -= sched_random() % 128; 731e745d729SAlexander Motin if (bload > load - p) { 732e745d729SAlexander Motin bload = load - p; 73308063e9fSAlexander Motin r->csr_cpu = c; 73408063e9fSAlexander Motin r->csr_load = load; 735aefe0a8cSAlexander Motin } 736aefe0a8cSAlexander Motin } 737aefe0a8cSAlexander Motin return (total); 73862fa74d9SJeff Roberson } 73962fa74d9SJeff Roberson 740aefe0a8cSAlexander Motin static int 741aefe0a8cSAlexander Motin cpu_search_highest(const struct cpu_group *cg, const struct cpu_search *s, 742aefe0a8cSAlexander Motin struct cpu_search_res *r) 74362fa74d9SJeff Roberson { 744aefe0a8cSAlexander Motin struct cpu_search_res lr; 745aefe0a8cSAlexander Motin struct tdq *tdq; 746aefe0a8cSAlexander Motin int c, bload, l, load, total; 747aefe0a8cSAlexander Motin 748aefe0a8cSAlexander Motin total = 0; 749aefe0a8cSAlexander Motin bload = INT_MIN; 75008063e9fSAlexander Motin r->csr_cpu = -1; 751aefe0a8cSAlexander Motin 752aefe0a8cSAlexander Motin /* Loop through children CPU groups if there are any. */ 753aefe0a8cSAlexander Motin if (cg->cg_children > 0) { 754aefe0a8cSAlexander Motin for (c = cg->cg_children - 1; c >= 0; c--) { 755aefe0a8cSAlexander Motin load = cpu_search_highest(&cg->cg_child[c], s, &lr); 756aefe0a8cSAlexander Motin total += load; 75708063e9fSAlexander Motin if (lr.csr_cpu >= 0 && (load > bload || 75808063e9fSAlexander Motin (load == bload && lr.csr_load > r->csr_load))) { 759aefe0a8cSAlexander Motin bload = load; 76008063e9fSAlexander Motin r->csr_cpu = lr.csr_cpu; 76108063e9fSAlexander Motin r->csr_load = lr.csr_load; 762aefe0a8cSAlexander Motin } 763aefe0a8cSAlexander Motin } 764aefe0a8cSAlexander Motin return (total); 76562fa74d9SJeff Roberson } 76662fa74d9SJeff Roberson 767aefe0a8cSAlexander Motin /* Loop through children CPUs otherwise. */ 768aefe0a8cSAlexander Motin for (c = cg->cg_last; c >= cg->cg_first; c--) { 769aefe0a8cSAlexander Motin if (!CPU_ISSET(c, &cg->cg_mask)) 770aefe0a8cSAlexander Motin continue; 771aefe0a8cSAlexander Motin tdq = TDQ_CPU(c); 772aefe0a8cSAlexander Motin l = tdq->tdq_load; 773aefe0a8cSAlexander Motin load = l * 256; 774aefe0a8cSAlexander Motin total += load; 775e745d729SAlexander Motin 776e745d729SAlexander Motin /* 777e745d729SAlexander Motin * Check this CPU is acceptable. 778e745d729SAlexander Motin */ 77908063e9fSAlexander Motin if (l < s->cs_load || (tdq->tdq_transferable < s->cs_trans) || 780aefe0a8cSAlexander Motin !CPU_ISSET(c, s->cs_mask)) 781aefe0a8cSAlexander Motin continue; 782e745d729SAlexander Motin 783ca34553bSAlexander Motin load -= sched_random() % 256; 784aefe0a8cSAlexander Motin if (load > bload) { 785aefe0a8cSAlexander Motin bload = load; 78608063e9fSAlexander Motin r->csr_cpu = c; 787aefe0a8cSAlexander Motin } 788aefe0a8cSAlexander Motin } 78908063e9fSAlexander Motin r->csr_load = bload; 790aefe0a8cSAlexander Motin return (total); 79162fa74d9SJeff Roberson } 79262fa74d9SJeff Roberson 79362fa74d9SJeff Roberson /* 79462fa74d9SJeff Roberson * Find the cpu with the least load via the least loaded path that has a 79562fa74d9SJeff Roberson * lowpri greater than pri pri. A pri of -1 indicates any priority is 79662fa74d9SJeff Roberson * acceptable. 79762fa74d9SJeff Roberson */ 79862fa74d9SJeff Roberson static inline int 799aefe0a8cSAlexander Motin sched_lowest(const struct cpu_group *cg, cpuset_t *mask, int pri, int maxload, 800e745d729SAlexander Motin int prefer, int running) 80162fa74d9SJeff Roberson { 802aefe0a8cSAlexander Motin struct cpu_search s; 803aefe0a8cSAlexander Motin struct cpu_search_res r; 80462fa74d9SJeff Roberson 805aefe0a8cSAlexander Motin s.cs_prefer = prefer; 806e745d729SAlexander Motin s.cs_running = running; 807aefe0a8cSAlexander Motin s.cs_mask = mask; 808aefe0a8cSAlexander Motin s.cs_pri = pri; 80908063e9fSAlexander Motin s.cs_load = maxload; 810aefe0a8cSAlexander Motin cpu_search_lowest(cg, &s, &r); 81108063e9fSAlexander Motin return (r.csr_cpu); 81262fa74d9SJeff Roberson } 81362fa74d9SJeff Roberson 81462fa74d9SJeff Roberson /* 81562fa74d9SJeff Roberson * Find the cpu with the highest load via the highest loaded path. 81662fa74d9SJeff Roberson */ 81762fa74d9SJeff Roberson static inline int 81808063e9fSAlexander Motin sched_highest(const struct cpu_group *cg, cpuset_t *mask, int minload, 81908063e9fSAlexander Motin int mintrans) 82062fa74d9SJeff Roberson { 821aefe0a8cSAlexander Motin struct cpu_search s; 822aefe0a8cSAlexander Motin struct cpu_search_res r; 82362fa74d9SJeff Roberson 824aefe0a8cSAlexander Motin s.cs_mask = mask; 82508063e9fSAlexander Motin s.cs_load = minload; 82608063e9fSAlexander Motin s.cs_trans = mintrans; 827aefe0a8cSAlexander Motin cpu_search_highest(cg, &s, &r); 82808063e9fSAlexander Motin return (r.csr_cpu); 82962fa74d9SJeff Roberson } 83062fa74d9SJeff Roberson 83162fa74d9SJeff Roberson static void 83262fa74d9SJeff Roberson sched_balance_group(struct cpu_group *cg) 83362fa74d9SJeff Roberson { 834018ff686SJeff Roberson struct tdq *tdq; 835e745d729SAlexander Motin struct thread *td; 83636acfc65SAlexander Motin cpuset_t hmask, lmask; 83736acfc65SAlexander Motin int high, low, anylow; 83862fa74d9SJeff Roberson 83936acfc65SAlexander Motin CPU_FILL(&hmask); 84062fa74d9SJeff Roberson for (;;) { 84108063e9fSAlexander Motin high = sched_highest(cg, &hmask, 1, 0); 84236acfc65SAlexander Motin /* Stop if there is no more CPU with transferrable threads. */ 84336acfc65SAlexander Motin if (high == -1) 84462fa74d9SJeff Roberson break; 84536acfc65SAlexander Motin CPU_CLR(high, &hmask); 84636acfc65SAlexander Motin CPU_COPY(&hmask, &lmask); 84736acfc65SAlexander Motin /* Stop if there is no more CPU left for low. */ 84836acfc65SAlexander Motin if (CPU_EMPTY(&lmask)) 84962fa74d9SJeff Roberson break; 850018ff686SJeff Roberson tdq = TDQ_CPU(high); 851e745d729SAlexander Motin if (tdq->tdq_load == 1) { 852e745d729SAlexander Motin /* 853e745d729SAlexander Motin * There is only one running thread. We can't move 854e745d729SAlexander Motin * it from here, so tell it to pick new CPU by itself. 855e745d729SAlexander Motin */ 856e745d729SAlexander Motin TDQ_LOCK(tdq); 8576d3f74a1SMark Johnston td = atomic_load_ptr(&tdq->tdq_curthread); 858e745d729SAlexander Motin if ((td->td_flags & TDF_IDLETD) == 0 && 859e745d729SAlexander Motin THREAD_CAN_MIGRATE(td)) { 860e745d729SAlexander Motin td->td_flags |= TDF_NEEDRESCHED | TDF_PICKCPU; 861e745d729SAlexander Motin if (high != curcpu) 862e745d729SAlexander Motin ipi_cpu(high, IPI_AST); 863e745d729SAlexander Motin } 864e745d729SAlexander Motin TDQ_UNLOCK(tdq); 865e745d729SAlexander Motin break; 866e745d729SAlexander Motin } 867e745d729SAlexander Motin anylow = 1; 86836acfc65SAlexander Motin nextlow: 869e745d729SAlexander Motin if (tdq->tdq_transferable == 0) 870e745d729SAlexander Motin continue; 871e745d729SAlexander Motin low = sched_lowest(cg, &lmask, -1, tdq->tdq_load - 1, high, 1); 87236acfc65SAlexander Motin /* Stop if we looked well and found no less loaded CPU. */ 87336acfc65SAlexander Motin if (anylow && low == -1) 87436acfc65SAlexander Motin break; 87536acfc65SAlexander Motin /* Go to next high if we found no less loaded CPU. */ 87636acfc65SAlexander Motin if (low == -1) 87736acfc65SAlexander Motin continue; 87836acfc65SAlexander Motin /* Transfer thread from high to low. */ 879018ff686SJeff Roberson if (sched_balance_pair(tdq, TDQ_CPU(low))) { 88036acfc65SAlexander Motin /* CPU that got thread can no longer be a donor. */ 88136acfc65SAlexander Motin CPU_CLR(low, &hmask); 88236acfc65SAlexander Motin } else { 88362fa74d9SJeff Roberson /* 88436acfc65SAlexander Motin * If failed, then there is no threads on high 88536acfc65SAlexander Motin * that can run on this low. Drop low from low 88636acfc65SAlexander Motin * mask and look for different one. 88762fa74d9SJeff Roberson */ 88836acfc65SAlexander Motin CPU_CLR(low, &lmask); 88936acfc65SAlexander Motin anylow = 0; 89036acfc65SAlexander Motin goto nextlow; 89162fa74d9SJeff Roberson } 89236acfc65SAlexander Motin } 89362fa74d9SJeff Roberson } 89462fa74d9SJeff Roberson 89562fa74d9SJeff Roberson static void 89662375ca8SEd Schouten sched_balance(void) 897356500a3SJeff Roberson { 8987fcf154aSJeff Roberson struct tdq *tdq; 899356500a3SJeff Roberson 9000567b6ccSWarner Losh balance_ticks = max(balance_interval / 2, 1) + 901b250ad34SWarner Losh (sched_random() % balance_interval); 9027fcf154aSJeff Roberson tdq = TDQ_SELF(); 9037fcf154aSJeff Roberson TDQ_UNLOCK(tdq); 90462fa74d9SJeff Roberson sched_balance_group(cpu_top); 9057fcf154aSJeff Roberson TDQ_LOCK(tdq); 906cac77d04SJeff Roberson } 90786f8ae96SJeff Roberson 908ae7a6b38SJeff Roberson /* 909ae7a6b38SJeff Roberson * Lock two thread queues using their address to maintain lock order. 910ae7a6b38SJeff Roberson */ 911ae7a6b38SJeff Roberson static void 912ae7a6b38SJeff Roberson tdq_lock_pair(struct tdq *one, struct tdq *two) 913ae7a6b38SJeff Roberson { 914ae7a6b38SJeff Roberson if (one < two) { 915ae7a6b38SJeff Roberson TDQ_LOCK(one); 916ae7a6b38SJeff Roberson TDQ_LOCK_FLAGS(two, MTX_DUPOK); 917ae7a6b38SJeff Roberson } else { 918ae7a6b38SJeff Roberson TDQ_LOCK(two); 919ae7a6b38SJeff Roberson TDQ_LOCK_FLAGS(one, MTX_DUPOK); 920ae7a6b38SJeff Roberson } 921ae7a6b38SJeff Roberson } 922ae7a6b38SJeff Roberson 923ae7a6b38SJeff Roberson /* 9247fcf154aSJeff Roberson * Unlock two thread queues. Order is not important here. 9257fcf154aSJeff Roberson */ 9267fcf154aSJeff Roberson static void 9277fcf154aSJeff Roberson tdq_unlock_pair(struct tdq *one, struct tdq *two) 9287fcf154aSJeff Roberson { 9297fcf154aSJeff Roberson TDQ_UNLOCK(one); 9307fcf154aSJeff Roberson TDQ_UNLOCK(two); 9317fcf154aSJeff Roberson } 9327fcf154aSJeff Roberson 9337fcf154aSJeff Roberson /* 9346d3f74a1SMark Johnston * Transfer load between two imbalanced thread queues. Returns true if a thread 9356d3f74a1SMark Johnston * was moved between the queues, and false otherwise. 936ae7a6b38SJeff Roberson */ 9376d3f74a1SMark Johnston static bool 938ad1e7d28SJulian Elischer sched_balance_pair(struct tdq *high, struct tdq *low) 939cac77d04SJeff Roberson { 9406d3f74a1SMark Johnston int cpu, lowpri; 9416d3f74a1SMark Johnston bool ret; 942cac77d04SJeff Roberson 9436d3f74a1SMark Johnston ret = false; 944ae7a6b38SJeff Roberson tdq_lock_pair(high, low); 9456d3f74a1SMark Johnston 946155b9987SJeff Roberson /* 94797e9382dSDon Lewis * Transfer a thread from high to low. 948155b9987SJeff Roberson */ 9496d3f74a1SMark Johnston if (high->tdq_transferable != 0 && high->tdq_load > low->tdq_load) { 9506d3f74a1SMark Johnston lowpri = tdq_move(high, low); 9516d3f74a1SMark Johnston if (lowpri != -1) { 952a5423ea3SJeff Roberson /* 953*0927ff78SMark Johnston * In case the target isn't the current CPU notify it of 9546d3f74a1SMark Johnston * the new load, possibly sending an IPI to force it to 955*0927ff78SMark Johnston * reschedule. Otherwise maybe schedule a preemption. 956a5423ea3SJeff Roberson */ 957880bf8b9SMarius Strobl cpu = TDQ_ID(low); 958880bf8b9SMarius Strobl if (cpu != PCPU_GET(cpuid)) 9596d3f74a1SMark Johnston tdq_notify(low, lowpri); 960*0927ff78SMark Johnston else 961*0927ff78SMark Johnston sched_setpreempt(low->tdq_lowpri); 9626d3f74a1SMark Johnston ret = true; 9636d3f74a1SMark Johnston } 964ae7a6b38SJeff Roberson } 9657fcf154aSJeff Roberson tdq_unlock_pair(high, low); 9666d3f74a1SMark Johnston return (ret); 967356500a3SJeff Roberson } 968356500a3SJeff Roberson 969ae7a6b38SJeff Roberson /* 9706d3f74a1SMark Johnston * Move a thread from one thread queue to another. Returns -1 if the source 9716d3f74a1SMark Johnston * queue was empty, else returns the maximum priority of all threads in 9726d3f74a1SMark Johnston * the destination queue prior to the addition of the new thread. In the latter 9736d3f74a1SMark Johnston * case, this priority can be used to determine whether an IPI needs to be 9746d3f74a1SMark Johnston * delivered. 975ae7a6b38SJeff Roberson */ 9766d3f74a1SMark Johnston static int 977ae7a6b38SJeff Roberson tdq_move(struct tdq *from, struct tdq *to) 978356500a3SJeff Roberson { 979ae7a6b38SJeff Roberson struct thread *td; 980ae7a6b38SJeff Roberson int cpu; 981356500a3SJeff Roberson 9827fcf154aSJeff Roberson TDQ_LOCK_ASSERT(from, MA_OWNED); 9837fcf154aSJeff Roberson TDQ_LOCK_ASSERT(to, MA_OWNED); 9847fcf154aSJeff Roberson 985ae7a6b38SJeff Roberson cpu = TDQ_ID(to); 98635dd6d6cSMark Johnston td = tdq_steal(from, cpu); 9879727e637SJeff Roberson if (td == NULL) 9886d3f74a1SMark Johnston return (-1); 98961a74c5cSJeff Roberson 990ae7a6b38SJeff Roberson /* 99161a74c5cSJeff Roberson * Although the run queue is locked the thread may be 99261a74c5cSJeff Roberson * blocked. We can not set the lock until it is unblocked. 993ae7a6b38SJeff Roberson */ 99461a74c5cSJeff Roberson thread_lock_block_wait(td); 995ae7a6b38SJeff Roberson sched_rem(td); 99661a74c5cSJeff Roberson THREAD_LOCKPTR_ASSERT(td, TDQ_LOCKPTR(from)); 997ae7a6b38SJeff Roberson td->td_lock = TDQ_LOCKPTR(to); 99861a74c5cSJeff Roberson td_get_sched(td)->ts_cpu = cpu; 9996d3f74a1SMark Johnston return (tdq_add(to, td, SRQ_YIELDING)); 1000356500a3SJeff Roberson } 100122bf7d9aSJeff Roberson 1002ae7a6b38SJeff Roberson /* 1003ae7a6b38SJeff Roberson * This tdq has idled. Try to steal a thread from another cpu and switch 1004ae7a6b38SJeff Roberson * to it. 1005ae7a6b38SJeff Roberson */ 100680f86c9fSJeff Roberson static int 1007ad1e7d28SJulian Elischer tdq_idled(struct tdq *tdq) 100822bf7d9aSJeff Roberson { 10092668bb2aSAlexander Motin struct cpu_group *cg, *parent; 1010ad1e7d28SJulian Elischer struct tdq *steal; 1011c76ee827SJeff Roberson cpuset_t mask; 10122668bb2aSAlexander Motin int cpu, switchcnt, goup; 101380f86c9fSJeff Roberson 101497e9382dSDon Lewis if (smp_started == 0 || steal_idle == 0 || tdq->tdq_cg == NULL) 101588f530ccSJeff Roberson return (1); 1016c76ee827SJeff Roberson CPU_FILL(&mask); 1017c76ee827SJeff Roberson CPU_CLR(PCPU_GET(cpuid), &mask); 101897e9382dSDon Lewis restart: 101997e9382dSDon Lewis switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt; 10202668bb2aSAlexander Motin for (cg = tdq->tdq_cg, goup = 0; ; ) { 102108063e9fSAlexander Motin cpu = sched_highest(cg, &mask, steal_thresh, 1); 102297e9382dSDon Lewis /* 102397e9382dSDon Lewis * We were assigned a thread but not preempted. Returning 102497e9382dSDon Lewis * 0 here will cause our caller to switch to it. 102597e9382dSDon Lewis */ 102697e9382dSDon Lewis if (tdq->tdq_load) 102797e9382dSDon Lewis return (0); 10282668bb2aSAlexander Motin 10292668bb2aSAlexander Motin /* 10302668bb2aSAlexander Motin * We found no CPU to steal from in this group. Escalate to 10312668bb2aSAlexander Motin * the parent and repeat. But if parent has only two children 10322668bb2aSAlexander Motin * groups we can avoid searching this group again by searching 10332668bb2aSAlexander Motin * the other one specifically and then escalating two levels. 10342668bb2aSAlexander Motin */ 103562fa74d9SJeff Roberson if (cpu == -1) { 10362668bb2aSAlexander Motin if (goup) { 103762fa74d9SJeff Roberson cg = cg->cg_parent; 10382668bb2aSAlexander Motin goup = 0; 10392668bb2aSAlexander Motin } 10402668bb2aSAlexander Motin parent = cg->cg_parent; 10412668bb2aSAlexander Motin if (parent == NULL) 104297e9382dSDon Lewis return (1); 10432668bb2aSAlexander Motin if (parent->cg_children == 2) { 10442668bb2aSAlexander Motin if (cg == &parent->cg_child[0]) 10452668bb2aSAlexander Motin cg = &parent->cg_child[1]; 10462668bb2aSAlexander Motin else 10472668bb2aSAlexander Motin cg = &parent->cg_child[0]; 10482668bb2aSAlexander Motin goup = 1; 10492668bb2aSAlexander Motin } else 10502668bb2aSAlexander Motin cg = parent; 105180f86c9fSJeff Roberson continue; 10527b8bfa0dSJeff Roberson } 10537b8bfa0dSJeff Roberson steal = TDQ_CPU(cpu); 105497e9382dSDon Lewis /* 105597e9382dSDon Lewis * The data returned by sched_highest() is stale and 105697e9382dSDon Lewis * the chosen CPU no longer has an eligible thread. 105797e9382dSDon Lewis * 105897e9382dSDon Lewis * Testing this ahead of tdq_lock_pair() only catches 105997e9382dSDon Lewis * this situation about 20% of the time on an 8 core 106097e9382dSDon Lewis * 16 thread Ryzen 7, but it still helps performance. 106197e9382dSDon Lewis */ 106297e9382dSDon Lewis if (steal->tdq_load < steal_thresh || 106397e9382dSDon Lewis steal->tdq_transferable == 0) 106497e9382dSDon Lewis goto restart; 106597e9382dSDon Lewis /* 10668bb173fbSAlexander Motin * Try to lock both queues. If we are assigned a thread while 10678bb173fbSAlexander Motin * waited for the lock, switch to it now instead of stealing. 10688bb173fbSAlexander Motin * If we can't get the lock, then somebody likely got there 10698bb173fbSAlexander Motin * first so continue searching. 107097e9382dSDon Lewis */ 10718bb173fbSAlexander Motin TDQ_LOCK(tdq); 10728bb173fbSAlexander Motin if (tdq->tdq_load > 0) { 10738bb173fbSAlexander Motin mi_switch(SW_VOL | SWT_IDLE); 10748bb173fbSAlexander Motin return (0); 10758bb173fbSAlexander Motin } 10768bb173fbSAlexander Motin if (TDQ_TRYLOCK_FLAGS(steal, MTX_DUPOK) == 0) { 10778bb173fbSAlexander Motin TDQ_UNLOCK(tdq); 10788bb173fbSAlexander Motin CPU_CLR(cpu, &mask); 10798bb173fbSAlexander Motin continue; 10808bb173fbSAlexander Motin } 108197e9382dSDon Lewis /* 108297e9382dSDon Lewis * The data returned by sched_highest() is stale and 108397e9382dSDon Lewis * the chosen CPU no longer has an eligible thread, or 108497e9382dSDon Lewis * we were preempted and the CPU loading info may be out 108597e9382dSDon Lewis * of date. The latter is rare. In either case restart 108697e9382dSDon Lewis * the search. 108797e9382dSDon Lewis */ 108897e9382dSDon Lewis if (steal->tdq_load < steal_thresh || 108997e9382dSDon Lewis steal->tdq_transferable == 0 || 109097e9382dSDon Lewis switchcnt != tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt) { 10917fcf154aSJeff Roberson tdq_unlock_pair(tdq, steal); 109297e9382dSDon Lewis goto restart; 109362fa74d9SJeff Roberson } 109462fa74d9SJeff Roberson /* 109597e9382dSDon Lewis * Steal the thread and switch to it. 109662fa74d9SJeff Roberson */ 10976d3f74a1SMark Johnston if (tdq_move(steal, tdq) != -1) 109897e9382dSDon Lewis break; 109997e9382dSDon Lewis /* 110097e9382dSDon Lewis * We failed to acquire a thread even though it looked 110197e9382dSDon Lewis * like one was available. This could be due to affinity 110297e9382dSDon Lewis * restrictions or for other reasons. Loop again after 110397e9382dSDon Lewis * removing this CPU from the set. The restart logic 110497e9382dSDon Lewis * above does not restore this CPU to the set due to the 110597e9382dSDon Lewis * likelyhood of failing here again. 110697e9382dSDon Lewis */ 110797e9382dSDon Lewis CPU_CLR(cpu, &mask); 110862fa74d9SJeff Roberson tdq_unlock_pair(tdq, steal); 110980f86c9fSJeff Roberson } 1110ae7a6b38SJeff Roberson TDQ_UNLOCK(steal); 1111686bcb5cSJeff Roberson mi_switch(SW_VOL | SWT_IDLE); 11127b8bfa0dSJeff Roberson return (0); 111322bf7d9aSJeff Roberson } 111422bf7d9aSJeff Roberson 1115ae7a6b38SJeff Roberson /* 1116ae7a6b38SJeff Roberson * Notify a remote cpu of new work. Sends an IPI if criteria are met. 11176d3f74a1SMark Johnston * 11186d3f74a1SMark Johnston * "lowpri" is the minimum scheduling priority among all threads on 11196d3f74a1SMark Johnston * the queue prior to the addition of the new thread. 1120ae7a6b38SJeff Roberson */ 112122bf7d9aSJeff Roberson static void 11226d3f74a1SMark Johnston tdq_notify(struct tdq *tdq, int lowpri) 112322bf7d9aSJeff Roberson { 11247b8bfa0dSJeff Roberson int cpu; 112522bf7d9aSJeff Roberson 11266d3f74a1SMark Johnston TDQ_LOCK_ASSERT(tdq, MA_OWNED); 11276d3f74a1SMark Johnston KASSERT(tdq->tdq_lowpri <= lowpri, 11286d3f74a1SMark Johnston ("tdq_notify: lowpri %d > tdq_lowpri %d", lowpri, tdq->tdq_lowpri)); 11296d3f74a1SMark Johnston 11307789ab32SMark Johnston if (tdq->tdq_owepreempt) 1131ff256d9cSJeff Roberson return; 11326d3f74a1SMark Johnston 11336d3f74a1SMark Johnston /* 11346d3f74a1SMark Johnston * Check to see if the newly added thread should preempt the one 11356d3f74a1SMark Johnston * currently running. 11366d3f74a1SMark Johnston */ 11376d3f74a1SMark Johnston if (!sched_shouldpreempt(tdq->tdq_lowpri, lowpri, 1)) 11386b2f763fSJeff Roberson return; 113979654969SAlexander Motin 114079654969SAlexander Motin /* 1141ae9e9b4fSAlexander Motin * Make sure that our caller's earlier update to tdq_load is 1142ae9e9b4fSAlexander Motin * globally visible before we read tdq_cpu_idle. Idle thread 114379654969SAlexander Motin * accesses both of them without locks, and the order is important. 114479654969SAlexander Motin */ 1145e8677f38SKonstantin Belousov atomic_thread_fence_seq_cst(); 114679654969SAlexander Motin 11471690c6c1SJeff Roberson /* 11486d3f74a1SMark Johnston * Try to figure out if we can signal the idle thread instead of sending 11496d3f74a1SMark Johnston * an IPI. This check is racy; at worst, we will deliever an IPI 11506d3f74a1SMark Johnston * unnecessarily. 11516c47aaaeSJeff Roberson */ 11526d3f74a1SMark Johnston cpu = TDQ_ID(tdq); 11536d3f74a1SMark Johnston if (TD_IS_IDLETHREAD(tdq->tdq_curthread) && 11546d3f74a1SMark Johnston (tdq->tdq_cpu_idle == 0 || cpu_idle_wakeup(cpu))) 11556c47aaaeSJeff Roberson return; 11567789ab32SMark Johnston 11577789ab32SMark Johnston /* 11587789ab32SMark Johnston * The run queues have been updated, so any switch on the remote CPU 11597789ab32SMark Johnston * will satisfy the preemption request. 11607789ab32SMark Johnston */ 11617789ab32SMark Johnston tdq->tdq_owepreempt = 1; 1162d9d8d144SJohn Baldwin ipi_cpu(cpu, IPI_PREEMPT); 116322bf7d9aSJeff Roberson } 116422bf7d9aSJeff Roberson 1165ae7a6b38SJeff Roberson /* 1166ae7a6b38SJeff Roberson * Steals load from a timeshare queue. Honors the rotating queue head 1167ae7a6b38SJeff Roberson * index. 1168ae7a6b38SJeff Roberson */ 11699727e637SJeff Roberson static struct thread * 117062fa74d9SJeff Roberson runq_steal_from(struct runq *rq, int cpu, u_char start) 1171ae7a6b38SJeff Roberson { 1172ae7a6b38SJeff Roberson struct rqbits *rqb; 1173ae7a6b38SJeff Roberson struct rqhead *rqh; 117436acfc65SAlexander Motin struct thread *td, *first; 1175ae7a6b38SJeff Roberson int bit; 1176ae7a6b38SJeff Roberson int i; 1177ae7a6b38SJeff Roberson 1178ae7a6b38SJeff Roberson rqb = &rq->rq_status; 1179ae7a6b38SJeff Roberson bit = start & (RQB_BPW -1); 118036acfc65SAlexander Motin first = NULL; 1181ae7a6b38SJeff Roberson again: 1182ae7a6b38SJeff Roberson for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) { 1183ae7a6b38SJeff Roberson if (rqb->rqb_bits[i] == 0) 1184ae7a6b38SJeff Roberson continue; 11858bc713f6SJeff Roberson if (bit == 0) 11868bc713f6SJeff Roberson bit = RQB_FFS(rqb->rqb_bits[i]); 11878bc713f6SJeff Roberson for (; bit < RQB_BPW; bit++) { 11888bc713f6SJeff Roberson if ((rqb->rqb_bits[i] & (1ul << bit)) == 0) 1189ae7a6b38SJeff Roberson continue; 11908bc713f6SJeff Roberson rqh = &rq->rq_queues[bit + (i << RQB_L2BPW)]; 11919727e637SJeff Roberson TAILQ_FOREACH(td, rqh, td_runq) { 1192bd84094aSAlexander Motin if (first) { 1193bd84094aSAlexander Motin if (THREAD_CAN_MIGRATE(td) && 11949727e637SJeff Roberson THREAD_CAN_SCHED(td, cpu)) 11959727e637SJeff Roberson return (td); 1196bd84094aSAlexander Motin } else 119736acfc65SAlexander Motin first = td; 1198ae7a6b38SJeff Roberson } 1199ae7a6b38SJeff Roberson } 12008bc713f6SJeff Roberson } 1201ae7a6b38SJeff Roberson if (start != 0) { 1202ae7a6b38SJeff Roberson start = 0; 1203ae7a6b38SJeff Roberson goto again; 1204ae7a6b38SJeff Roberson } 1205ae7a6b38SJeff Roberson 120636acfc65SAlexander Motin if (first && THREAD_CAN_MIGRATE(first) && 120736acfc65SAlexander Motin THREAD_CAN_SCHED(first, cpu)) 120836acfc65SAlexander Motin return (first); 1209ae7a6b38SJeff Roberson return (NULL); 1210ae7a6b38SJeff Roberson } 1211ae7a6b38SJeff Roberson 1212ae7a6b38SJeff Roberson /* 1213ae7a6b38SJeff Roberson * Steals load from a standard linear queue. 1214ae7a6b38SJeff Roberson */ 12159727e637SJeff Roberson static struct thread * 121662fa74d9SJeff Roberson runq_steal(struct runq *rq, int cpu) 121722bf7d9aSJeff Roberson { 121822bf7d9aSJeff Roberson struct rqhead *rqh; 121922bf7d9aSJeff Roberson struct rqbits *rqb; 12209727e637SJeff Roberson struct thread *td; 122122bf7d9aSJeff Roberson int word; 122222bf7d9aSJeff Roberson int bit; 122322bf7d9aSJeff Roberson 122422bf7d9aSJeff Roberson rqb = &rq->rq_status; 122522bf7d9aSJeff Roberson for (word = 0; word < RQB_LEN; word++) { 122622bf7d9aSJeff Roberson if (rqb->rqb_bits[word] == 0) 122722bf7d9aSJeff Roberson continue; 122822bf7d9aSJeff Roberson for (bit = 0; bit < RQB_BPW; bit++) { 1229a2640c9bSPeter Wemm if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 123022bf7d9aSJeff Roberson continue; 123122bf7d9aSJeff Roberson rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 12329727e637SJeff Roberson TAILQ_FOREACH(td, rqh, td_runq) 12339727e637SJeff Roberson if (THREAD_CAN_MIGRATE(td) && 12349727e637SJeff Roberson THREAD_CAN_SCHED(td, cpu)) 12359727e637SJeff Roberson return (td); 123622bf7d9aSJeff Roberson } 123722bf7d9aSJeff Roberson } 123822bf7d9aSJeff Roberson return (NULL); 123922bf7d9aSJeff Roberson } 124022bf7d9aSJeff Roberson 1241ae7a6b38SJeff Roberson /* 1242ae7a6b38SJeff Roberson * Attempt to steal a thread in priority order from a thread queue. 1243ae7a6b38SJeff Roberson */ 12449727e637SJeff Roberson static struct thread * 124562fa74d9SJeff Roberson tdq_steal(struct tdq *tdq, int cpu) 124622bf7d9aSJeff Roberson { 12479727e637SJeff Roberson struct thread *td; 124822bf7d9aSJeff Roberson 1249ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 12509727e637SJeff Roberson if ((td = runq_steal(&tdq->tdq_realtime, cpu)) != NULL) 12519727e637SJeff Roberson return (td); 12529727e637SJeff Roberson if ((td = runq_steal_from(&tdq->tdq_timeshare, 12539727e637SJeff Roberson cpu, tdq->tdq_ridx)) != NULL) 12549727e637SJeff Roberson return (td); 125562fa74d9SJeff Roberson return (runq_steal(&tdq->tdq_idle, cpu)); 125622bf7d9aSJeff Roberson } 125780f86c9fSJeff Roberson 1258ae7a6b38SJeff Roberson /* 1259ae7a6b38SJeff Roberson * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the 12607fcf154aSJeff Roberson * current lock and returns with the assigned queue locked. 1261ae7a6b38SJeff Roberson */ 1262ae7a6b38SJeff Roberson static inline struct tdq * 12639727e637SJeff Roberson sched_setcpu(struct thread *td, int cpu, int flags) 126480f86c9fSJeff Roberson { 12659727e637SJeff Roberson 1266ae7a6b38SJeff Roberson struct tdq *tdq; 126761a74c5cSJeff Roberson struct mtx *mtx; 126880f86c9fSJeff Roberson 12699727e637SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1270ae7a6b38SJeff Roberson tdq = TDQ_CPU(cpu); 127193ccd6bfSKonstantin Belousov td_get_sched(td)->ts_cpu = cpu; 12729727e637SJeff Roberson /* 12739727e637SJeff Roberson * If the lock matches just return the queue. 12749727e637SJeff Roberson */ 127561a74c5cSJeff Roberson if (td->td_lock == TDQ_LOCKPTR(tdq)) { 127661a74c5cSJeff Roberson KASSERT((flags & SRQ_HOLD) == 0, 127761a74c5cSJeff Roberson ("sched_setcpu: Invalid lock for SRQ_HOLD")); 1278ae7a6b38SJeff Roberson return (tdq); 1279ae7a6b38SJeff Roberson } 128061a74c5cSJeff Roberson 128180f86c9fSJeff Roberson /* 1282ae7a6b38SJeff Roberson * The hard case, migration, we need to block the thread first to 1283ae7a6b38SJeff Roberson * prevent order reversals with other cpus locks. 12847b8bfa0dSJeff Roberson */ 1285b0b9dee5SAttilio Rao spinlock_enter(); 128661a74c5cSJeff Roberson mtx = thread_lock_block(td); 128761a74c5cSJeff Roberson if ((flags & SRQ_HOLD) == 0) 128861a74c5cSJeff Roberson mtx_unlock_spin(mtx); 1289ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1290ae7a6b38SJeff Roberson thread_lock_unblock(td, TDQ_LOCKPTR(tdq)); 1291b0b9dee5SAttilio Rao spinlock_exit(); 1292ae7a6b38SJeff Roberson return (tdq); 129380f86c9fSJeff Roberson } 12942454aaf5SJeff Roberson 12958df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_intrbind, "Soft interrupt binding"); 12968df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_idle_affinity, "Picked idle cpu based on affinity"); 12978df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_affinity, "Picked cpu based on affinity"); 12988df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_lowest, "Selected lowest load"); 12998df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_local, "Migrated to current cpu"); 13008df78c41SJeff Roberson SCHED_STAT_DEFINE(pickcpu_migration, "Selection may have caused migration"); 13018df78c41SJeff Roberson 1302ae7a6b38SJeff Roberson static int 13039727e637SJeff Roberson sched_pickcpu(struct thread *td, int flags) 1304ae7a6b38SJeff Roberson { 130536acfc65SAlexander Motin struct cpu_group *cg, *ccg; 13069727e637SJeff Roberson struct td_sched *ts; 1307ae7a6b38SJeff Roberson struct tdq *tdq; 1308aefe0a8cSAlexander Motin cpuset_t *mask; 1309e745d729SAlexander Motin int cpu, pri, r, self, intr; 13107b8bfa0dSJeff Roberson 131162fa74d9SJeff Roberson self = PCPU_GET(cpuid); 131293ccd6bfSKonstantin Belousov ts = td_get_sched(td); 1313efe67753SNathan Whitehorn KASSERT(!CPU_ABSENT(ts->ts_cpu), ("sched_pickcpu: Start scheduler on " 1314efe67753SNathan Whitehorn "absent CPU %d for thread %s.", ts->ts_cpu, td->td_name)); 13157b8bfa0dSJeff Roberson if (smp_started == 0) 13167b8bfa0dSJeff Roberson return (self); 131728994a58SJeff Roberson /* 131828994a58SJeff Roberson * Don't migrate a running thread from sched_switch(). 131928994a58SJeff Roberson */ 132062fa74d9SJeff Roberson if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td)) 132162fa74d9SJeff Roberson return (ts->ts_cpu); 13227b8bfa0dSJeff Roberson /* 132362fa74d9SJeff Roberson * Prefer to run interrupt threads on the processors that generate 132462fa74d9SJeff Roberson * the interrupt. 13257b8bfa0dSJeff Roberson */ 132662fa74d9SJeff Roberson if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_SCHED(td, self) && 1327c9205e35SAlexander Motin curthread->td_intr_nesting_level) { 1328c55dc51cSAlexander Motin tdq = TDQ_SELF(); 1329c55dc51cSAlexander Motin if (tdq->tdq_lowpri >= PRI_MIN_IDLE) { 1330c55dc51cSAlexander Motin SCHED_STAT_INC(pickcpu_idle_affinity); 1331c55dc51cSAlexander Motin return (self); 1332c55dc51cSAlexander Motin } 133362fa74d9SJeff Roberson ts->ts_cpu = self; 1334c9205e35SAlexander Motin intr = 1; 1335c55dc51cSAlexander Motin cg = tdq->tdq_cg; 1336c55dc51cSAlexander Motin goto llc; 1337c55dc51cSAlexander Motin } else { 1338c9205e35SAlexander Motin intr = 0; 1339c55dc51cSAlexander Motin tdq = TDQ_CPU(ts->ts_cpu); 1340c55dc51cSAlexander Motin cg = tdq->tdq_cg; 1341c55dc51cSAlexander Motin } 13427b8bfa0dSJeff Roberson /* 134336acfc65SAlexander Motin * If the thread can run on the last cpu and the affinity has not 13440127914cSEric van Gyzen * expired and it is idle, run it there. 13457b8bfa0dSJeff Roberson */ 134636acfc65SAlexander Motin if (THREAD_CAN_SCHED(td, ts->ts_cpu) && 134736acfc65SAlexander Motin tdq->tdq_lowpri >= PRI_MIN_IDLE && 134836acfc65SAlexander Motin SCHED_AFFINITY(ts, CG_SHARE_L2)) { 1349c55dc51cSAlexander Motin if (cg->cg_flags & CG_FLAG_THREAD) { 1350176dd236SAlexander Motin /* Check all SMT threads for being idle. */ 1351aefe0a8cSAlexander Motin for (cpu = cg->cg_first; cpu <= cg->cg_last; cpu++) { 1352176dd236SAlexander Motin if (CPU_ISSET(cpu, &cg->cg_mask) && 1353176dd236SAlexander Motin TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE) 135462fa74d9SJeff Roberson break; 1355aefe0a8cSAlexander Motin } 1356aefe0a8cSAlexander Motin if (cpu > cg->cg_last) { 1357176dd236SAlexander Motin SCHED_STAT_INC(pickcpu_idle_affinity); 1358176dd236SAlexander Motin return (ts->ts_cpu); 135936acfc65SAlexander Motin } 1360176dd236SAlexander Motin } else { 136136acfc65SAlexander Motin SCHED_STAT_INC(pickcpu_idle_affinity); 136236acfc65SAlexander Motin return (ts->ts_cpu); 136336acfc65SAlexander Motin } 136436acfc65SAlexander Motin } 1365c55dc51cSAlexander Motin llc: 136636acfc65SAlexander Motin /* 136736acfc65SAlexander Motin * Search for the last level cache CPU group in the tree. 1368c9205e35SAlexander Motin * Skip SMT, identical groups and caches with expired affinity. 1369c9205e35SAlexander Motin * Interrupt threads affinity is explicit and never expires. 137036acfc65SAlexander Motin */ 137136acfc65SAlexander Motin for (ccg = NULL; cg != NULL; cg = cg->cg_parent) { 137236acfc65SAlexander Motin if (cg->cg_flags & CG_FLAG_THREAD) 137336acfc65SAlexander Motin continue; 1374c9205e35SAlexander Motin if (cg->cg_children == 1 || cg->cg_count == 1) 1375c9205e35SAlexander Motin continue; 1376c9205e35SAlexander Motin if (cg->cg_level == CG_SHARE_NONE || 1377c9205e35SAlexander Motin (!intr && !SCHED_AFFINITY(ts, cg->cg_level))) 137836acfc65SAlexander Motin continue; 137936acfc65SAlexander Motin ccg = cg; 138036acfc65SAlexander Motin } 1381c9205e35SAlexander Motin /* Found LLC shared by all CPUs, so do a global search. */ 1382c9205e35SAlexander Motin if (ccg == cpu_top) 1383c9205e35SAlexander Motin ccg = NULL; 138462fa74d9SJeff Roberson cpu = -1; 1385aefe0a8cSAlexander Motin mask = &td->td_cpuset->cs_mask; 1386c9205e35SAlexander Motin pri = td->td_priority; 1387e745d729SAlexander Motin r = TD_IS_RUNNING(td); 1388c9205e35SAlexander Motin /* 1389c9205e35SAlexander Motin * Try hard to keep interrupts within found LLC. Search the LLC for 1390c9205e35SAlexander Motin * the least loaded CPU we can run now. For NUMA systems it should 1391c9205e35SAlexander Motin * be within target domain, and it also reduces scheduling overhead. 1392c9205e35SAlexander Motin */ 1393c9205e35SAlexander Motin if (ccg != NULL && intr) { 1394e745d729SAlexander Motin cpu = sched_lowest(ccg, mask, pri, INT_MAX, ts->ts_cpu, r); 1395c9205e35SAlexander Motin if (cpu >= 0) 1396c9205e35SAlexander Motin SCHED_STAT_INC(pickcpu_intrbind); 1397c9205e35SAlexander Motin } else 1398c9205e35SAlexander Motin /* Search the LLC for the least loaded idle CPU we can run now. */ 1399c9205e35SAlexander Motin if (ccg != NULL) { 1400c9205e35SAlexander Motin cpu = sched_lowest(ccg, mask, max(pri, PRI_MAX_TIMESHARE), 1401e745d729SAlexander Motin INT_MAX, ts->ts_cpu, r); 1402c9205e35SAlexander Motin if (cpu >= 0) 1403c9205e35SAlexander Motin SCHED_STAT_INC(pickcpu_affinity); 1404c9205e35SAlexander Motin } 1405c9205e35SAlexander Motin /* Search globally for the least loaded CPU we can run now. */ 1406c9205e35SAlexander Motin if (cpu < 0) { 1407e745d729SAlexander Motin cpu = sched_lowest(cpu_top, mask, pri, INT_MAX, ts->ts_cpu, r); 1408c9205e35SAlexander Motin if (cpu >= 0) 1409c9205e35SAlexander Motin SCHED_STAT_INC(pickcpu_lowest); 1410c9205e35SAlexander Motin } 1411c9205e35SAlexander Motin /* Search globally for the least loaded CPU. */ 1412c9205e35SAlexander Motin if (cpu < 0) { 1413e745d729SAlexander Motin cpu = sched_lowest(cpu_top, mask, -1, INT_MAX, ts->ts_cpu, r); 1414c9205e35SAlexander Motin if (cpu >= 0) 1415c9205e35SAlexander Motin SCHED_STAT_INC(pickcpu_lowest); 1416c9205e35SAlexander Motin } 1417bb3dfc6aSAlexander Motin KASSERT(cpu >= 0, ("sched_pickcpu: Failed to find a cpu.")); 1418efe67753SNathan Whitehorn KASSERT(!CPU_ABSENT(cpu), ("sched_pickcpu: Picked absent CPU %d.", cpu)); 141962fa74d9SJeff Roberson /* 142062fa74d9SJeff Roberson * Compare the lowest loaded cpu to current cpu. 142162fa74d9SJeff Roberson */ 1422018ff686SJeff Roberson tdq = TDQ_CPU(cpu); 1423018ff686SJeff Roberson if (THREAD_CAN_SCHED(td, self) && TDQ_SELF()->tdq_lowpri > pri && 1424018ff686SJeff Roberson tdq->tdq_lowpri < PRI_MIN_IDLE && 1425018ff686SJeff Roberson TDQ_SELF()->tdq_load <= tdq->tdq_load + 1) { 14268df78c41SJeff Roberson SCHED_STAT_INC(pickcpu_local); 142762fa74d9SJeff Roberson cpu = self; 1428c9205e35SAlexander Motin } 14298df78c41SJeff Roberson if (cpu != ts->ts_cpu) 14308df78c41SJeff Roberson SCHED_STAT_INC(pickcpu_migration); 1431ae7a6b38SJeff Roberson return (cpu); 143280f86c9fSJeff Roberson } 143362fa74d9SJeff Roberson #endif 143422bf7d9aSJeff Roberson 143522bf7d9aSJeff Roberson /* 143622bf7d9aSJeff Roberson * Pick the highest priority task we have and return it. 14370c0a98b2SJeff Roberson */ 14389727e637SJeff Roberson static struct thread * 1439ad1e7d28SJulian Elischer tdq_choose(struct tdq *tdq) 14405d7ef00cSJeff Roberson { 14419727e637SJeff Roberson struct thread *td; 14425d7ef00cSJeff Roberson 1443ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 14449727e637SJeff Roberson td = runq_choose(&tdq->tdq_realtime); 14459727e637SJeff Roberson if (td != NULL) 14469727e637SJeff Roberson return (td); 14479727e637SJeff Roberson td = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx); 14489727e637SJeff Roberson if (td != NULL) { 144912d56c0fSJohn Baldwin KASSERT(td->td_priority >= PRI_MIN_BATCH, 1450e7d50326SJeff Roberson ("tdq_choose: Invalid priority on timeshare queue %d", 14519727e637SJeff Roberson td->td_priority)); 14529727e637SJeff Roberson return (td); 145315dc847eSJeff Roberson } 14549727e637SJeff Roberson td = runq_choose(&tdq->tdq_idle); 14559727e637SJeff Roberson if (td != NULL) { 14569727e637SJeff Roberson KASSERT(td->td_priority >= PRI_MIN_IDLE, 1457e7d50326SJeff Roberson ("tdq_choose: Invalid priority on idle queue %d", 14589727e637SJeff Roberson td->td_priority)); 14599727e637SJeff Roberson return (td); 1460e7d50326SJeff Roberson } 1461e7d50326SJeff Roberson 1462e7d50326SJeff Roberson return (NULL); 1463245f3abfSJeff Roberson } 14640a016a05SJeff Roberson 1465ae7a6b38SJeff Roberson /* 1466ae7a6b38SJeff Roberson * Initialize a thread queue. 1467ae7a6b38SJeff Roberson */ 14680a016a05SJeff Roberson static void 1469018ff686SJeff Roberson tdq_setup(struct tdq *tdq, int id) 14700a016a05SJeff Roberson { 1471ae7a6b38SJeff Roberson 1472c47f202bSJeff Roberson if (bootverbose) 1473018ff686SJeff Roberson printf("ULE: setup cpu %d\n", id); 1474e7d50326SJeff Roberson runq_init(&tdq->tdq_realtime); 1475e7d50326SJeff Roberson runq_init(&tdq->tdq_timeshare); 1476d2ad694cSJeff Roberson runq_init(&tdq->tdq_idle); 1477018ff686SJeff Roberson tdq->tdq_id = id; 147862fa74d9SJeff Roberson snprintf(tdq->tdq_name, sizeof(tdq->tdq_name), 147962fa74d9SJeff Roberson "sched lock %d", (int)TDQ_ID(tdq)); 148061a74c5cSJeff Roberson mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock", MTX_SPIN); 14818f51ad55SJeff Roberson #ifdef KTR 14828f51ad55SJeff Roberson snprintf(tdq->tdq_loadname, sizeof(tdq->tdq_loadname), 14838f51ad55SJeff Roberson "CPU %d load", (int)TDQ_ID(tdq)); 14848f51ad55SJeff Roberson #endif 14850a016a05SJeff Roberson } 14860a016a05SJeff Roberson 1487c47f202bSJeff Roberson #ifdef SMP 1488c47f202bSJeff Roberson static void 1489c47f202bSJeff Roberson sched_setup_smp(void) 1490c47f202bSJeff Roberson { 1491c47f202bSJeff Roberson struct tdq *tdq; 1492c47f202bSJeff Roberson int i; 1493c47f202bSJeff Roberson 149462fa74d9SJeff Roberson cpu_top = smp_topo(); 14953aa6d94eSJohn Baldwin CPU_FOREACH(i) { 1496018ff686SJeff Roberson tdq = DPCPU_ID_PTR(i, tdq); 1497018ff686SJeff Roberson tdq_setup(tdq, i); 149862fa74d9SJeff Roberson tdq->tdq_cg = smp_topo_find(cpu_top, i); 149962fa74d9SJeff Roberson if (tdq->tdq_cg == NULL) 150062fa74d9SJeff Roberson panic("Can't find cpu group for %d\n", i); 1501ca34553bSAlexander Motin DPCPU_ID_SET(i, randomval, i * 69069 + 5); 1502c47f202bSJeff Roberson } 1503018ff686SJeff Roberson PCPU_SET(sched, DPCPU_PTR(tdq)); 150462fa74d9SJeff Roberson balance_tdq = TDQ_SELF(); 1505c47f202bSJeff Roberson } 1506c47f202bSJeff Roberson #endif 1507c47f202bSJeff Roberson 1508ae7a6b38SJeff Roberson /* 1509ae7a6b38SJeff Roberson * Setup the thread queues and initialize the topology based on MD 1510ae7a6b38SJeff Roberson * information. 1511ae7a6b38SJeff Roberson */ 151235e6168fSJeff Roberson static void 151335e6168fSJeff Roberson sched_setup(void *dummy) 151435e6168fSJeff Roberson { 1515ae7a6b38SJeff Roberson struct tdq *tdq; 1516c47f202bSJeff Roberson 15170ec896fdSJeff Roberson #ifdef SMP 1518c47f202bSJeff Roberson sched_setup_smp(); 1519749d01b0SJeff Roberson #else 1520018ff686SJeff Roberson tdq_setup(TDQ_SELF(), 0); 1521356500a3SJeff Roberson #endif 1522018ff686SJeff Roberson tdq = TDQ_SELF(); 1523ae7a6b38SJeff Roberson 1524ae7a6b38SJeff Roberson /* Add thread0's load since it's running. */ 1525ae7a6b38SJeff Roberson TDQ_LOCK(tdq); 1526e1504695SJeff Roberson thread0.td_lock = TDQ_LOCKPTR(tdq); 15279727e637SJeff Roberson tdq_load_add(tdq, &thread0); 15286d3f74a1SMark Johnston tdq->tdq_curthread = &thread0; 152962fa74d9SJeff Roberson tdq->tdq_lowpri = thread0.td_priority; 1530ae7a6b38SJeff Roberson TDQ_UNLOCK(tdq); 153135e6168fSJeff Roberson } 153235e6168fSJeff Roberson 1533ae7a6b38SJeff Roberson /* 1534579895dfSAlexander Motin * This routine determines time constants after stathz and hz are setup. 1535ae7a6b38SJeff Roberson */ 1536a1d4fe69SDavid Xu /* ARGSUSED */ 1537a1d4fe69SDavid Xu static void 1538a1d4fe69SDavid Xu sched_initticks(void *dummy) 1539a1d4fe69SDavid Xu { 1540ae7a6b38SJeff Roberson int incr; 1541ae7a6b38SJeff Roberson 1542a1d4fe69SDavid Xu realstathz = stathz ? stathz : hz; 15435e5c3873SJeff Roberson sched_slice = realstathz / SCHED_SLICE_DEFAULT_DIVISOR; 15445e5c3873SJeff Roberson sched_slice_min = sched_slice / SCHED_SLICE_MIN_DIVISOR; 154537f4e025SAlexander Motin hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) / 154637f4e025SAlexander Motin realstathz); 1547a1d4fe69SDavid Xu 1548a1d4fe69SDavid Xu /* 1549e7d50326SJeff Roberson * tickincr is shifted out by 10 to avoid rounding errors due to 15503f872f85SJeff Roberson * hz not being evenly divisible by stathz on all platforms. 1551e7d50326SJeff Roberson */ 1552ae7a6b38SJeff Roberson incr = (hz << SCHED_TICK_SHIFT) / realstathz; 1553e7d50326SJeff Roberson /* 1554e7d50326SJeff Roberson * This does not work for values of stathz that are more than 1555e7d50326SJeff Roberson * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen. 1556a1d4fe69SDavid Xu */ 1557ae7a6b38SJeff Roberson if (incr == 0) 1558ae7a6b38SJeff Roberson incr = 1; 1559ae7a6b38SJeff Roberson tickincr = incr; 15607b8bfa0dSJeff Roberson #ifdef SMP 15619862717aSJeff Roberson /* 15627fcf154aSJeff Roberson * Set the default balance interval now that we know 15637fcf154aSJeff Roberson * what realstathz is. 15647fcf154aSJeff Roberson */ 15657fcf154aSJeff Roberson balance_interval = realstathz; 1566290d9060SDon Lewis balance_ticks = balance_interval; 15677b8bfa0dSJeff Roberson affinity = SCHED_AFFINITY_DEFAULT; 15687b8bfa0dSJeff Roberson #endif 1569b3f40a41SAlexander Motin if (sched_idlespinthresh < 0) 15702c27cb3aSAlexander Motin sched_idlespinthresh = 2 * max(10000, 6 * hz) / realstathz; 1571a1d4fe69SDavid Xu } 1572a1d4fe69SDavid Xu 157335e6168fSJeff Roberson /* 1574ae7a6b38SJeff Roberson * This is the core of the interactivity algorithm. Determines a score based 1575ae7a6b38SJeff Roberson * on past behavior. It is the ratio of sleep time to run time scaled to 1576ae7a6b38SJeff Roberson * a [0, 100] integer. This is the voluntary sleep time of a process, which 1577ae7a6b38SJeff Roberson * differs from the cpu usage because it does not account for time spent 1578ae7a6b38SJeff Roberson * waiting on a run-queue. Would be prettier if we had floating point. 157957031f79SGeorge V. Neville-Neil * 158057031f79SGeorge V. Neville-Neil * When a thread's sleep time is greater than its run time the 158157031f79SGeorge V. Neville-Neil * calculation is: 158257031f79SGeorge V. Neville-Neil * 158357031f79SGeorge V. Neville-Neil * scaling factor 158457031f79SGeorge V. Neville-Neil * interactivity score = --------------------- 158557031f79SGeorge V. Neville-Neil * sleep time / run time 158657031f79SGeorge V. Neville-Neil * 158757031f79SGeorge V. Neville-Neil * 158857031f79SGeorge V. Neville-Neil * When a thread's run time is greater than its sleep time the 158957031f79SGeorge V. Neville-Neil * calculation is: 159057031f79SGeorge V. Neville-Neil * 159157031f79SGeorge V. Neville-Neil * scaling factor 159243521b46Swiklam * interactivity score = 2 * scaling factor - --------------------- 159357031f79SGeorge V. Neville-Neil * run time / sleep time 1594ae7a6b38SJeff Roberson */ 1595ae7a6b38SJeff Roberson static int 1596ae7a6b38SJeff Roberson sched_interact_score(struct thread *td) 1597ae7a6b38SJeff Roberson { 1598ae7a6b38SJeff Roberson struct td_sched *ts; 1599ae7a6b38SJeff Roberson int div; 1600ae7a6b38SJeff Roberson 160193ccd6bfSKonstantin Belousov ts = td_get_sched(td); 1602ae7a6b38SJeff Roberson /* 1603ae7a6b38SJeff Roberson * The score is only needed if this is likely to be an interactive 1604ae7a6b38SJeff Roberson * task. Don't go through the expense of computing it if there's 1605ae7a6b38SJeff Roberson * no chance. 1606ae7a6b38SJeff Roberson */ 1607ae7a6b38SJeff Roberson if (sched_interact <= SCHED_INTERACT_HALF && 1608ae7a6b38SJeff Roberson ts->ts_runtime >= ts->ts_slptime) 1609ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF); 1610ae7a6b38SJeff Roberson 1611ae7a6b38SJeff Roberson if (ts->ts_runtime > ts->ts_slptime) { 1612ae7a6b38SJeff Roberson div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF); 1613ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF + 1614ae7a6b38SJeff Roberson (SCHED_INTERACT_HALF - (ts->ts_slptime / div))); 1615ae7a6b38SJeff Roberson } 1616ae7a6b38SJeff Roberson if (ts->ts_slptime > ts->ts_runtime) { 1617ae7a6b38SJeff Roberson div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF); 1618ae7a6b38SJeff Roberson return (ts->ts_runtime / div); 1619ae7a6b38SJeff Roberson } 1620ae7a6b38SJeff Roberson /* runtime == slptime */ 1621ae7a6b38SJeff Roberson if (ts->ts_runtime) 1622ae7a6b38SJeff Roberson return (SCHED_INTERACT_HALF); 1623ae7a6b38SJeff Roberson 1624ae7a6b38SJeff Roberson /* 1625ae7a6b38SJeff Roberson * This can happen if slptime and runtime are 0. 1626ae7a6b38SJeff Roberson */ 1627ae7a6b38SJeff Roberson return (0); 1628ae7a6b38SJeff Roberson 1629ae7a6b38SJeff Roberson } 1630ae7a6b38SJeff Roberson 1631ae7a6b38SJeff Roberson /* 163235e6168fSJeff Roberson * Scale the scheduling priority according to the "interactivity" of this 163335e6168fSJeff Roberson * process. 163435e6168fSJeff Roberson */ 163515dc847eSJeff Roberson static void 16368460a577SJohn Birrell sched_priority(struct thread *td) 163735e6168fSJeff Roberson { 16381c119e17SAlexander Motin u_int pri, score; 163935e6168fSJeff Roberson 1640c9a8cba4SJohn Baldwin if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE) 164115dc847eSJeff Roberson return; 1642e7d50326SJeff Roberson /* 1643e7d50326SJeff Roberson * If the score is interactive we place the thread in the realtime 1644e7d50326SJeff Roberson * queue with a priority that is less than kernel and interrupt 1645e7d50326SJeff Roberson * priorities. These threads are not subject to nice restrictions. 1646e7d50326SJeff Roberson * 1647ae7a6b38SJeff Roberson * Scores greater than this are placed on the normal timeshare queue 1648e7d50326SJeff Roberson * where the priority is partially decided by the most recent cpu 1649e7d50326SJeff Roberson * utilization and the rest is decided by nice value. 1650a5423ea3SJeff Roberson * 1651a5423ea3SJeff Roberson * The nice value of the process has a linear effect on the calculated 1652a5423ea3SJeff Roberson * score. Negative nice values make it easier for a thread to be 1653a5423ea3SJeff Roberson * considered interactive. 1654e7d50326SJeff Roberson */ 1655a0f15352SJohn Baldwin score = imax(0, sched_interact_score(td) + td->td_proc->p_nice); 1656e7d50326SJeff Roberson if (score < sched_interact) { 165712d56c0fSJohn Baldwin pri = PRI_MIN_INTERACT; 16581c119e17SAlexander Motin pri += (PRI_MAX_INTERACT - PRI_MIN_INTERACT + 1) * score / 16591c119e17SAlexander Motin sched_interact; 166012d56c0fSJohn Baldwin KASSERT(pri >= PRI_MIN_INTERACT && pri <= PRI_MAX_INTERACT, 16611c119e17SAlexander Motin ("sched_priority: invalid interactive priority %u score %u", 16629a93305aSJeff Roberson pri, score)); 1663e7d50326SJeff Roberson } else { 1664e7d50326SJeff Roberson pri = SCHED_PRI_MIN; 166593ccd6bfSKonstantin Belousov if (td_get_sched(td)->ts_ticks) 166693ccd6bfSKonstantin Belousov pri += min(SCHED_PRI_TICKS(td_get_sched(td)), 16675457fa23SJohn Baldwin SCHED_PRI_RANGE - 1); 1668e7d50326SJeff Roberson pri += SCHED_PRI_NICE(td->td_proc->p_nice); 166912d56c0fSJohn Baldwin KASSERT(pri >= PRI_MIN_BATCH && pri <= PRI_MAX_BATCH, 16701c119e17SAlexander Motin ("sched_priority: invalid priority %u: nice %d, " 1671ae7a6b38SJeff Roberson "ticks %d ftick %d ltick %d tick pri %d", 167293ccd6bfSKonstantin Belousov pri, td->td_proc->p_nice, td_get_sched(td)->ts_ticks, 167393ccd6bfSKonstantin Belousov td_get_sched(td)->ts_ftick, td_get_sched(td)->ts_ltick, 167493ccd6bfSKonstantin Belousov SCHED_PRI_TICKS(td_get_sched(td)))); 1675e7d50326SJeff Roberson } 16768460a577SJohn Birrell sched_user_prio(td, pri); 167735e6168fSJeff Roberson 167815dc847eSJeff Roberson return; 167935e6168fSJeff Roberson } 168035e6168fSJeff Roberson 168135e6168fSJeff Roberson /* 1682d322132cSJeff Roberson * This routine enforces a maximum limit on the amount of scheduling history 1683ae7a6b38SJeff Roberson * kept. It is called after either the slptime or runtime is adjusted. This 1684ae7a6b38SJeff Roberson * function is ugly due to integer math. 1685d322132cSJeff Roberson */ 16864b60e324SJeff Roberson static void 16878460a577SJohn Birrell sched_interact_update(struct thread *td) 16884b60e324SJeff Roberson { 1689155b6ca1SJeff Roberson struct td_sched *ts; 16909a93305aSJeff Roberson u_int sum; 16913f741ca1SJeff Roberson 169293ccd6bfSKonstantin Belousov ts = td_get_sched(td); 1693ae7a6b38SJeff Roberson sum = ts->ts_runtime + ts->ts_slptime; 1694d322132cSJeff Roberson if (sum < SCHED_SLP_RUN_MAX) 1695d322132cSJeff Roberson return; 1696d322132cSJeff Roberson /* 1697155b6ca1SJeff Roberson * This only happens from two places: 1698155b6ca1SJeff Roberson * 1) We have added an unusual amount of run time from fork_exit. 1699155b6ca1SJeff Roberson * 2) We have added an unusual amount of sleep time from sched_sleep(). 1700155b6ca1SJeff Roberson */ 1701155b6ca1SJeff Roberson if (sum > SCHED_SLP_RUN_MAX * 2) { 1702ae7a6b38SJeff Roberson if (ts->ts_runtime > ts->ts_slptime) { 1703ae7a6b38SJeff Roberson ts->ts_runtime = SCHED_SLP_RUN_MAX; 1704ae7a6b38SJeff Roberson ts->ts_slptime = 1; 1705155b6ca1SJeff Roberson } else { 1706ae7a6b38SJeff Roberson ts->ts_slptime = SCHED_SLP_RUN_MAX; 1707ae7a6b38SJeff Roberson ts->ts_runtime = 1; 1708155b6ca1SJeff Roberson } 1709155b6ca1SJeff Roberson return; 1710155b6ca1SJeff Roberson } 1711155b6ca1SJeff Roberson /* 1712d322132cSJeff Roberson * If we have exceeded by more than 1/5th then the algorithm below 1713d322132cSJeff Roberson * will not bring us back into range. Dividing by two here forces 17142454aaf5SJeff Roberson * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1715d322132cSJeff Roberson */ 171637a35e4aSJeff Roberson if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1717ae7a6b38SJeff Roberson ts->ts_runtime /= 2; 1718ae7a6b38SJeff Roberson ts->ts_slptime /= 2; 1719d322132cSJeff Roberson return; 1720d322132cSJeff Roberson } 1721ae7a6b38SJeff Roberson ts->ts_runtime = (ts->ts_runtime / 5) * 4; 1722ae7a6b38SJeff Roberson ts->ts_slptime = (ts->ts_slptime / 5) * 4; 1723d322132cSJeff Roberson } 1724d322132cSJeff Roberson 1725ae7a6b38SJeff Roberson /* 1726ae7a6b38SJeff Roberson * Scale back the interactivity history when a child thread is created. The 1727ae7a6b38SJeff Roberson * history is inherited from the parent but the thread may behave totally 1728ae7a6b38SJeff Roberson * differently. For example, a shell spawning a compiler process. We want 1729ae7a6b38SJeff Roberson * to learn that the compiler is behaving badly very quickly. 1730ae7a6b38SJeff Roberson */ 1731d322132cSJeff Roberson static void 17328460a577SJohn Birrell sched_interact_fork(struct thread *td) 1733d322132cSJeff Roberson { 173493ccd6bfSKonstantin Belousov struct td_sched *ts; 1735d322132cSJeff Roberson int ratio; 1736d322132cSJeff Roberson int sum; 1737d322132cSJeff Roberson 173893ccd6bfSKonstantin Belousov ts = td_get_sched(td); 173993ccd6bfSKonstantin Belousov sum = ts->ts_runtime + ts->ts_slptime; 1740d322132cSJeff Roberson if (sum > SCHED_SLP_RUN_FORK) { 1741d322132cSJeff Roberson ratio = sum / SCHED_SLP_RUN_FORK; 174293ccd6bfSKonstantin Belousov ts->ts_runtime /= ratio; 174393ccd6bfSKonstantin Belousov ts->ts_slptime /= ratio; 17444b60e324SJeff Roberson } 17454b60e324SJeff Roberson } 17464b60e324SJeff Roberson 174715dc847eSJeff Roberson /* 1748ae7a6b38SJeff Roberson * Called from proc0_init() to setup the scheduler fields. 1749ed062c8dSJulian Elischer */ 1750ed062c8dSJulian Elischer void 1751ed062c8dSJulian Elischer schedinit(void) 1752ed062c8dSJulian Elischer { 175393ccd6bfSKonstantin Belousov struct td_sched *ts0; 1754e7d50326SJeff Roberson 1755ed062c8dSJulian Elischer /* 175693ccd6bfSKonstantin Belousov * Set up the scheduler specific parts of thread0. 1757ed062c8dSJulian Elischer */ 175893ccd6bfSKonstantin Belousov ts0 = td_get_sched(&thread0); 175993ccd6bfSKonstantin Belousov ts0->ts_ltick = ticks; 176093ccd6bfSKonstantin Belousov ts0->ts_ftick = ticks; 176193ccd6bfSKonstantin Belousov ts0->ts_slice = 0; 17621408b84aSHans Petter Selasky ts0->ts_cpu = curcpu; /* set valid CPU number */ 1763ed062c8dSJulian Elischer } 1764ed062c8dSJulian Elischer 1765ed062c8dSJulian Elischer /* 1766589aed00SKyle Evans * schedinit_ap() is needed prior to calling sched_throw(NULL) to ensure that 1767589aed00SKyle Evans * the pcpu requirements are met for any calls in the period between curthread 1768589aed00SKyle Evans * initialization and sched_throw(). One can safely add threads to the queue 1769589aed00SKyle Evans * before sched_throw(), for instance, as long as the thread lock is setup 1770589aed00SKyle Evans * correctly. 1771589aed00SKyle Evans * 1772589aed00SKyle Evans * TDQ_SELF() relies on the below sched pcpu setting; it may be used only 1773589aed00SKyle Evans * after schedinit_ap(). 1774589aed00SKyle Evans */ 1775589aed00SKyle Evans void 1776589aed00SKyle Evans schedinit_ap(void) 1777589aed00SKyle Evans { 1778589aed00SKyle Evans 1779589aed00SKyle Evans #ifdef SMP 1780589aed00SKyle Evans PCPU_SET(sched, DPCPU_PTR(tdq)); 1781589aed00SKyle Evans #endif 1782589aed00SKyle Evans PCPU_GET(idlethread)->td_lock = TDQ_LOCKPTR(TDQ_SELF()); 1783589aed00SKyle Evans } 1784589aed00SKyle Evans 1785589aed00SKyle Evans /* 178615dc847eSJeff Roberson * This is only somewhat accurate since given many processes of the same 178715dc847eSJeff Roberson * priority they will switch when their slices run out, which will be 1788e7d50326SJeff Roberson * at most sched_slice stathz ticks. 178915dc847eSJeff Roberson */ 179035e6168fSJeff Roberson int 179135e6168fSJeff Roberson sched_rr_interval(void) 179235e6168fSJeff Roberson { 1793e7d50326SJeff Roberson 1794579895dfSAlexander Motin /* Convert sched_slice from stathz to hz. */ 179537f4e025SAlexander Motin return (imax(1, (sched_slice * hz + realstathz / 2) / realstathz)); 179635e6168fSJeff Roberson } 179735e6168fSJeff Roberson 1798ae7a6b38SJeff Roberson /* 1799ae7a6b38SJeff Roberson * Update the percent cpu tracking information when it is requested or 1800ae7a6b38SJeff Roberson * the total history exceeds the maximum. We keep a sliding history of 1801ae7a6b38SJeff Roberson * tick counts that slowly decays. This is less precise than the 4BSD 1802ae7a6b38SJeff Roberson * mechanism since it happens with less regular and frequent events. 1803ae7a6b38SJeff Roberson */ 180422bf7d9aSJeff Roberson static void 18057295465eSAlexander Motin sched_pctcpu_update(struct td_sched *ts, int run) 180635e6168fSJeff Roberson { 18077295465eSAlexander Motin int t = ticks; 1808e7d50326SJeff Roberson 180978133024SMark Johnston /* 181078133024SMark Johnston * The signed difference may be negative if the thread hasn't run for 181178133024SMark Johnston * over half of the ticks rollover period. 181278133024SMark Johnston */ 181378133024SMark Johnston if ((u_int)(t - ts->ts_ltick) >= SCHED_TICK_TARG) { 1814ad1e7d28SJulian Elischer ts->ts_ticks = 0; 18157295465eSAlexander Motin ts->ts_ftick = t - SCHED_TICK_TARG; 18167295465eSAlexander Motin } else if (t - ts->ts_ftick >= SCHED_TICK_MAX) { 18177295465eSAlexander Motin ts->ts_ticks = (ts->ts_ticks / (ts->ts_ltick - ts->ts_ftick)) * 18187295465eSAlexander Motin (ts->ts_ltick - (t - SCHED_TICK_TARG)); 18197295465eSAlexander Motin ts->ts_ftick = t - SCHED_TICK_TARG; 18207295465eSAlexander Motin } 18217295465eSAlexander Motin if (run) 18227295465eSAlexander Motin ts->ts_ticks += (t - ts->ts_ltick) << SCHED_TICK_SHIFT; 18237295465eSAlexander Motin ts->ts_ltick = t; 182435e6168fSJeff Roberson } 182535e6168fSJeff Roberson 1826ae7a6b38SJeff Roberson /* 1827ae7a6b38SJeff Roberson * Adjust the priority of a thread. Move it to the appropriate run-queue 1828ae7a6b38SJeff Roberson * if necessary. This is the back-end for several priority related 1829ae7a6b38SJeff Roberson * functions. 1830ae7a6b38SJeff Roberson */ 1831e7d50326SJeff Roberson static void 1832f5c157d9SJohn Baldwin sched_thread_priority(struct thread *td, u_char prio) 183335e6168fSJeff Roberson { 183473daf66fSJeff Roberson struct tdq *tdq; 183573daf66fSJeff Roberson int oldpri; 183635e6168fSJeff Roberson 18378f51ad55SJeff Roberson KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "prio", 18388f51ad55SJeff Roberson "prio:%d", td->td_priority, "new prio:%d", prio, 18398f51ad55SJeff Roberson KTR_ATTR_LINKED, sched_tdname(curthread)); 1840d9fae5abSAndriy Gapon SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio); 1841e87fc7cfSAndriy Gapon if (td != curthread && prio < td->td_priority) { 18428f51ad55SJeff Roberson KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread), 18438f51ad55SJeff Roberson "lend prio", "prio:%d", td->td_priority, "new prio:%d", 18448f51ad55SJeff Roberson prio, KTR_ATTR_LINKED, sched_tdname(td)); 1845d9fae5abSAndriy Gapon SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio, 1846b3e9e682SRyan Stone curthread); 18478f51ad55SJeff Roberson } 18487b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1849f5c157d9SJohn Baldwin if (td->td_priority == prio) 1850f5c157d9SJohn Baldwin return; 18513f741ca1SJeff Roberson /* 18523f741ca1SJeff Roberson * If the priority has been elevated due to priority 18533f741ca1SJeff Roberson * propagation, we may have to move ourselves to a new 1854e7d50326SJeff Roberson * queue. This could be optimized to not re-add in some 1855e7d50326SJeff Roberson * cases. 1856f2b74cbfSJeff Roberson */ 18576d55b3ecSJeff Roberson if (TD_ON_RUNQ(td) && prio < td->td_priority) { 1858e7d50326SJeff Roberson sched_rem(td); 1859e7d50326SJeff Roberson td->td_priority = prio; 186061a74c5cSJeff Roberson sched_add(td, SRQ_BORROWING | SRQ_HOLDTD); 186173daf66fSJeff Roberson return; 186273daf66fSJeff Roberson } 18636d55b3ecSJeff Roberson /* 18646d55b3ecSJeff Roberson * If the thread is currently running we may have to adjust the lowpri 18656d55b3ecSJeff Roberson * information so other cpus are aware of our current priority. 18666d55b3ecSJeff Roberson */ 18676d55b3ecSJeff Roberson if (TD_IS_RUNNING(td)) { 18684aec1984SJohn Baldwin tdq = TDQ_CPU(td_get_sched(td)->ts_cpu); 186962fa74d9SJeff Roberson oldpri = td->td_priority; 18703f741ca1SJeff Roberson td->td_priority = prio; 187162fa74d9SJeff Roberson if (prio < tdq->tdq_lowpri) 187262fa74d9SJeff Roberson tdq->tdq_lowpri = prio; 187362fa74d9SJeff Roberson else if (tdq->tdq_lowpri == oldpri) 187462fa74d9SJeff Roberson tdq_setlowpri(tdq, td); 18756d55b3ecSJeff Roberson return; 187673daf66fSJeff Roberson } 18776d55b3ecSJeff Roberson td->td_priority = prio; 1878ae7a6b38SJeff Roberson } 187935e6168fSJeff Roberson 1880f5c157d9SJohn Baldwin /* 1881f5c157d9SJohn Baldwin * Update a thread's priority when it is lent another thread's 1882f5c157d9SJohn Baldwin * priority. 1883f5c157d9SJohn Baldwin */ 1884f5c157d9SJohn Baldwin void 1885f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio) 1886f5c157d9SJohn Baldwin { 1887f5c157d9SJohn Baldwin 1888f5c157d9SJohn Baldwin td->td_flags |= TDF_BORROWING; 1889f5c157d9SJohn Baldwin sched_thread_priority(td, prio); 1890f5c157d9SJohn Baldwin } 1891f5c157d9SJohn Baldwin 1892f5c157d9SJohn Baldwin /* 1893f5c157d9SJohn Baldwin * Restore a thread's priority when priority propagation is 1894f5c157d9SJohn Baldwin * over. The prio argument is the minimum priority the thread 1895f5c157d9SJohn Baldwin * needs to have to satisfy other possible priority lending 1896f5c157d9SJohn Baldwin * requests. If the thread's regular priority is less 1897f5c157d9SJohn Baldwin * important than prio, the thread will keep a priority boost 1898f5c157d9SJohn Baldwin * of prio. 1899f5c157d9SJohn Baldwin */ 1900f5c157d9SJohn Baldwin void 1901f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio) 1902f5c157d9SJohn Baldwin { 1903f5c157d9SJohn Baldwin u_char base_pri; 1904f5c157d9SJohn Baldwin 1905f5c157d9SJohn Baldwin if (td->td_base_pri >= PRI_MIN_TIMESHARE && 1906f5c157d9SJohn Baldwin td->td_base_pri <= PRI_MAX_TIMESHARE) 19078460a577SJohn Birrell base_pri = td->td_user_pri; 1908f5c157d9SJohn Baldwin else 1909f5c157d9SJohn Baldwin base_pri = td->td_base_pri; 1910f5c157d9SJohn Baldwin if (prio >= base_pri) { 1911f5c157d9SJohn Baldwin td->td_flags &= ~TDF_BORROWING; 1912f5c157d9SJohn Baldwin sched_thread_priority(td, base_pri); 1913f5c157d9SJohn Baldwin } else 1914f5c157d9SJohn Baldwin sched_lend_prio(td, prio); 1915f5c157d9SJohn Baldwin } 1916f5c157d9SJohn Baldwin 1917ae7a6b38SJeff Roberson /* 1918ae7a6b38SJeff Roberson * Standard entry for setting the priority to an absolute value. 1919ae7a6b38SJeff Roberson */ 1920f5c157d9SJohn Baldwin void 1921f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio) 1922f5c157d9SJohn Baldwin { 1923f5c157d9SJohn Baldwin u_char oldprio; 1924f5c157d9SJohn Baldwin 1925f5c157d9SJohn Baldwin /* First, update the base priority. */ 1926f5c157d9SJohn Baldwin td->td_base_pri = prio; 1927f5c157d9SJohn Baldwin 1928f5c157d9SJohn Baldwin /* 192950aaa791SJohn Baldwin * If the thread is borrowing another thread's priority, don't 1930f5c157d9SJohn Baldwin * ever lower the priority. 1931f5c157d9SJohn Baldwin */ 1932f5c157d9SJohn Baldwin if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 1933f5c157d9SJohn Baldwin return; 1934f5c157d9SJohn Baldwin 1935f5c157d9SJohn Baldwin /* Change the real priority. */ 1936f5c157d9SJohn Baldwin oldprio = td->td_priority; 1937f5c157d9SJohn Baldwin sched_thread_priority(td, prio); 1938f5c157d9SJohn Baldwin 1939f5c157d9SJohn Baldwin /* 1940f5c157d9SJohn Baldwin * If the thread is on a turnstile, then let the turnstile update 1941f5c157d9SJohn Baldwin * its state. 1942f5c157d9SJohn Baldwin */ 1943f5c157d9SJohn Baldwin if (TD_ON_LOCK(td) && oldprio != prio) 1944f5c157d9SJohn Baldwin turnstile_adjust(td, oldprio); 1945f5c157d9SJohn Baldwin } 1946f5c157d9SJohn Baldwin 1947ae7a6b38SJeff Roberson /* 1948ae7a6b38SJeff Roberson * Set the base user priority, does not effect current running priority. 1949ae7a6b38SJeff Roberson */ 195035e6168fSJeff Roberson void 19518460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio) 19523db720fdSDavid Xu { 19533db720fdSDavid Xu 19548460a577SJohn Birrell td->td_base_user_pri = prio; 1955acbe332aSDavid Xu if (td->td_lend_user_pri <= prio) 1956fc6c30f6SJulian Elischer return; 19578460a577SJohn Birrell td->td_user_pri = prio; 19583db720fdSDavid Xu } 19593db720fdSDavid Xu 19603db720fdSDavid Xu void 19613db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio) 19623db720fdSDavid Xu { 19633db720fdSDavid Xu 1964435806d3SDavid Xu THREAD_LOCK_ASSERT(td, MA_OWNED); 1965acbe332aSDavid Xu td->td_lend_user_pri = prio; 1966c8e368a9SDavid Xu td->td_user_pri = min(prio, td->td_base_user_pri); 1967c8e368a9SDavid Xu if (td->td_priority > td->td_user_pri) 1968c8e368a9SDavid Xu sched_prio(td, td->td_user_pri); 1969c8e368a9SDavid Xu else if (td->td_priority != td->td_user_pri) 1970c8e368a9SDavid Xu td->td_flags |= TDF_NEEDRESCHED; 1971435806d3SDavid Xu } 19723db720fdSDavid Xu 1973ac97da9aSMateusz Guzik /* 1974ac97da9aSMateusz Guzik * Like the above but first check if there is anything to do. 1975ac97da9aSMateusz Guzik */ 1976ac97da9aSMateusz Guzik void 1977ac97da9aSMateusz Guzik sched_lend_user_prio_cond(struct thread *td, u_char prio) 1978ac97da9aSMateusz Guzik { 1979ac97da9aSMateusz Guzik 1980ac97da9aSMateusz Guzik if (td->td_lend_user_pri != prio) 1981ac97da9aSMateusz Guzik goto lend; 1982ac97da9aSMateusz Guzik if (td->td_user_pri != min(prio, td->td_base_user_pri)) 1983ac97da9aSMateusz Guzik goto lend; 1984b77594bbSMateusz Guzik if (td->td_priority != td->td_user_pri) 1985ac97da9aSMateusz Guzik goto lend; 1986ac97da9aSMateusz Guzik return; 1987ac97da9aSMateusz Guzik 1988ac97da9aSMateusz Guzik lend: 1989ac97da9aSMateusz Guzik thread_lock(td); 1990ac97da9aSMateusz Guzik sched_lend_user_prio(td, prio); 1991ac97da9aSMateusz Guzik thread_unlock(td); 1992ac97da9aSMateusz Guzik } 1993ac97da9aSMateusz Guzik 19944c8a8cfcSKonstantin Belousov #ifdef SMP 1995ae7a6b38SJeff Roberson /* 199697e9382dSDon Lewis * This tdq is about to idle. Try to steal a thread from another CPU before 199797e9382dSDon Lewis * choosing the idle thread. 199897e9382dSDon Lewis */ 199997e9382dSDon Lewis static void 200097e9382dSDon Lewis tdq_trysteal(struct tdq *tdq) 200197e9382dSDon Lewis { 20022668bb2aSAlexander Motin struct cpu_group *cg, *parent; 200397e9382dSDon Lewis struct tdq *steal; 200497e9382dSDon Lewis cpuset_t mask; 20052668bb2aSAlexander Motin int cpu, i, goup; 200697e9382dSDon Lewis 200708063e9fSAlexander Motin if (smp_started == 0 || steal_idle == 0 || trysteal_limit == 0 || 200808063e9fSAlexander Motin tdq->tdq_cg == NULL) 200997e9382dSDon Lewis return; 201097e9382dSDon Lewis CPU_FILL(&mask); 201197e9382dSDon Lewis CPU_CLR(PCPU_GET(cpuid), &mask); 201297e9382dSDon Lewis /* We don't want to be preempted while we're iterating. */ 201397e9382dSDon Lewis spinlock_enter(); 201497e9382dSDon Lewis TDQ_UNLOCK(tdq); 20152668bb2aSAlexander Motin for (i = 1, cg = tdq->tdq_cg, goup = 0; ; ) { 201608063e9fSAlexander Motin cpu = sched_highest(cg, &mask, steal_thresh, 1); 201797e9382dSDon Lewis /* 201897e9382dSDon Lewis * If a thread was added while interrupts were disabled don't 201997e9382dSDon Lewis * steal one here. 202097e9382dSDon Lewis */ 202197e9382dSDon Lewis if (tdq->tdq_load > 0) { 202297e9382dSDon Lewis TDQ_LOCK(tdq); 202397e9382dSDon Lewis break; 202497e9382dSDon Lewis } 20252668bb2aSAlexander Motin 20262668bb2aSAlexander Motin /* 20272668bb2aSAlexander Motin * We found no CPU to steal from in this group. Escalate to 20282668bb2aSAlexander Motin * the parent and repeat. But if parent has only two children 20292668bb2aSAlexander Motin * groups we can avoid searching this group again by searching 20302668bb2aSAlexander Motin * the other one specifically and then escalating two levels. 20312668bb2aSAlexander Motin */ 203297e9382dSDon Lewis if (cpu == -1) { 20332668bb2aSAlexander Motin if (goup) { 203497e9382dSDon Lewis cg = cg->cg_parent; 20352668bb2aSAlexander Motin goup = 0; 20362668bb2aSAlexander Motin } 20372668bb2aSAlexander Motin if (++i > trysteal_limit) { 203897e9382dSDon Lewis TDQ_LOCK(tdq); 203997e9382dSDon Lewis break; 204097e9382dSDon Lewis } 20412668bb2aSAlexander Motin parent = cg->cg_parent; 20422668bb2aSAlexander Motin if (parent == NULL) { 20432668bb2aSAlexander Motin TDQ_LOCK(tdq); 20442668bb2aSAlexander Motin break; 20452668bb2aSAlexander Motin } 20462668bb2aSAlexander Motin if (parent->cg_children == 2) { 20472668bb2aSAlexander Motin if (cg == &parent->cg_child[0]) 20482668bb2aSAlexander Motin cg = &parent->cg_child[1]; 20492668bb2aSAlexander Motin else 20502668bb2aSAlexander Motin cg = &parent->cg_child[0]; 20512668bb2aSAlexander Motin goup = 1; 20522668bb2aSAlexander Motin } else 20532668bb2aSAlexander Motin cg = parent; 205497e9382dSDon Lewis continue; 205597e9382dSDon Lewis } 205697e9382dSDon Lewis steal = TDQ_CPU(cpu); 205797e9382dSDon Lewis /* 205897e9382dSDon Lewis * The data returned by sched_highest() is stale and 205997e9382dSDon Lewis * the chosen CPU no longer has an eligible thread. 206015b5c347SGordon Bergling * At this point unconditionally exit the loop to bound 206108063e9fSAlexander Motin * the time spent in the critcal section. 206297e9382dSDon Lewis */ 206397e9382dSDon Lewis if (steal->tdq_load < steal_thresh || 206497e9382dSDon Lewis steal->tdq_transferable == 0) 206597e9382dSDon Lewis continue; 206697e9382dSDon Lewis /* 20678bb173fbSAlexander Motin * Try to lock both queues. If we are assigned a thread while 20688bb173fbSAlexander Motin * waited for the lock, switch to it now instead of stealing. 20698bb173fbSAlexander Motin * If we can't get the lock, then somebody likely got there 207008063e9fSAlexander Motin * first. 207197e9382dSDon Lewis */ 20728bb173fbSAlexander Motin TDQ_LOCK(tdq); 20738bb173fbSAlexander Motin if (tdq->tdq_load > 0) 207497e9382dSDon Lewis break; 20758bb173fbSAlexander Motin if (TDQ_TRYLOCK_FLAGS(steal, MTX_DUPOK) == 0) 20768bb173fbSAlexander Motin break; 207797e9382dSDon Lewis /* 207897e9382dSDon Lewis * The data returned by sched_highest() is stale and 207997e9382dSDon Lewis * the chosen CPU no longer has an eligible thread. 208097e9382dSDon Lewis */ 208197e9382dSDon Lewis if (steal->tdq_load < steal_thresh || 208297e9382dSDon Lewis steal->tdq_transferable == 0) { 208397e9382dSDon Lewis TDQ_UNLOCK(steal); 208497e9382dSDon Lewis break; 208597e9382dSDon Lewis } 208697e9382dSDon Lewis /* 208797e9382dSDon Lewis * If we fail to acquire one due to affinity restrictions, 208897e9382dSDon Lewis * bail out and let the idle thread to a more complete search 208997e9382dSDon Lewis * outside of a critical section. 209097e9382dSDon Lewis */ 20916d3f74a1SMark Johnston if (tdq_move(steal, tdq) == -1) { 209297e9382dSDon Lewis TDQ_UNLOCK(steal); 209397e9382dSDon Lewis break; 209497e9382dSDon Lewis } 209597e9382dSDon Lewis TDQ_UNLOCK(steal); 209697e9382dSDon Lewis break; 209797e9382dSDon Lewis } 209897e9382dSDon Lewis spinlock_exit(); 209997e9382dSDon Lewis } 21004c8a8cfcSKonstantin Belousov #endif 210197e9382dSDon Lewis 210297e9382dSDon Lewis /* 2103c47f202bSJeff Roberson * Handle migration from sched_switch(). This happens only for 2104c47f202bSJeff Roberson * cpu binding. 2105c47f202bSJeff Roberson */ 2106c47f202bSJeff Roberson static struct mtx * 2107c47f202bSJeff Roberson sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags) 2108c47f202bSJeff Roberson { 2109c47f202bSJeff Roberson struct tdq *tdn; 21106d3f74a1SMark Johnston int lowpri; 2111c47f202bSJeff Roberson 2112686bcb5cSJeff Roberson KASSERT(THREAD_CAN_MIGRATE(td) || 2113686bcb5cSJeff Roberson (td_get_sched(td)->ts_flags & TSF_BOUND) != 0, 2114686bcb5cSJeff Roberson ("Thread %p shouldn't migrate", td)); 2115efe67753SNathan Whitehorn KASSERT(!CPU_ABSENT(td_get_sched(td)->ts_cpu), ("sched_switch_migrate: " 2116efe67753SNathan Whitehorn "thread %s queued on absent CPU %d.", td->td_name, 2117efe67753SNathan Whitehorn td_get_sched(td)->ts_cpu)); 211893ccd6bfSKonstantin Belousov tdn = TDQ_CPU(td_get_sched(td)->ts_cpu); 2119c47f202bSJeff Roberson #ifdef SMP 21209727e637SJeff Roberson tdq_load_rem(tdq, td); 2121c47f202bSJeff Roberson /* 2122686bcb5cSJeff Roberson * Do the lock dance required to avoid LOR. We have an 2123686bcb5cSJeff Roberson * extra spinlock nesting from sched_switch() which will 2124686bcb5cSJeff Roberson * prevent preemption while we're holding neither run-queue lock. 2125c47f202bSJeff Roberson */ 2126686bcb5cSJeff Roberson TDQ_UNLOCK(tdq); 2127686bcb5cSJeff Roberson TDQ_LOCK(tdn); 21286d3f74a1SMark Johnston lowpri = tdq_add(tdn, td, flags); 21296d3f74a1SMark Johnston tdq_notify(tdn, lowpri); 2130c47f202bSJeff Roberson TDQ_UNLOCK(tdn); 2131686bcb5cSJeff Roberson TDQ_LOCK(tdq); 2132c47f202bSJeff Roberson #endif 2133c47f202bSJeff Roberson return (TDQ_LOCKPTR(tdn)); 2134c47f202bSJeff Roberson } 2135c47f202bSJeff Roberson 2136c47f202bSJeff Roberson /* 213761a74c5cSJeff Roberson * thread_lock_unblock() that does not assume td_lock is blocked. 2138ae7a6b38SJeff Roberson */ 2139ae7a6b38SJeff Roberson static inline void 2140ae7a6b38SJeff Roberson thread_unblock_switch(struct thread *td, struct mtx *mtx) 2141ae7a6b38SJeff Roberson { 2142ae7a6b38SJeff Roberson atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock, 2143ae7a6b38SJeff Roberson (uintptr_t)mtx); 2144ae7a6b38SJeff Roberson } 2145ae7a6b38SJeff Roberson 2146ae7a6b38SJeff Roberson /* 2147ae7a6b38SJeff Roberson * Switch threads. This function has to handle threads coming in while 2148ae7a6b38SJeff Roberson * blocked for some reason, running, or idle. It also must deal with 2149ae7a6b38SJeff Roberson * migrating a thread from one queue to another as running threads may 2150ae7a6b38SJeff Roberson * be assigned elsewhere via binding. 2151ae7a6b38SJeff Roberson */ 21523db720fdSDavid Xu void 2153686bcb5cSJeff Roberson sched_switch(struct thread *td, int flags) 215435e6168fSJeff Roberson { 2155686bcb5cSJeff Roberson struct thread *newtd; 2156c02bbb43SJeff Roberson struct tdq *tdq; 2157ad1e7d28SJulian Elischer struct td_sched *ts; 2158ae7a6b38SJeff Roberson struct mtx *mtx; 2159c47f202bSJeff Roberson int srqflag; 21608db16699SAlexander Motin int cpuid, preempted; 21618db16699SAlexander Motin #ifdef SMP 21628db16699SAlexander Motin int pickcpu; 21638db16699SAlexander Motin #endif 216435e6168fSJeff Roberson 21657b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 216635e6168fSJeff Roberson 2167ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 2168018ff686SJeff Roberson tdq = TDQ_SELF(); 216993ccd6bfSKonstantin Belousov ts = td_get_sched(td); 21707295465eSAlexander Motin sched_pctcpu_update(ts, 1); 21718db16699SAlexander Motin #ifdef SMP 2172e745d729SAlexander Motin pickcpu = (td->td_flags & TDF_PICKCPU) != 0; 2173e745d729SAlexander Motin if (pickcpu) 2174e745d729SAlexander Motin ts->ts_rltick = ticks - affinity * MAX_CACHE_LEVELS; 2175e745d729SAlexander Motin else 2176ae7a6b38SJeff Roberson ts->ts_rltick = ticks; 21778db16699SAlexander Motin #endif 2178060563ecSJulian Elischer td->td_lastcpu = td->td_oncpu; 2179ad9dadc4SAndriy Gapon preempted = (td->td_flags & TDF_SLICEEND) == 0 && 2180ad9dadc4SAndriy Gapon (flags & SW_PREEMPT) != 0; 2181e745d729SAlexander Motin td->td_flags &= ~(TDF_NEEDRESCHED | TDF_PICKCPU | TDF_SLICEEND); 218277918643SStephan Uphoff td->td_owepreempt = 0; 21837789ab32SMark Johnston tdq->tdq_owepreempt = 0; 21842c27cb3aSAlexander Motin if (!TD_IS_IDLETHREAD(td)) 21851690c6c1SJeff Roberson tdq->tdq_switchcnt++; 21867789ab32SMark Johnston 2187b11fdad0SJeff Roberson /* 2188686bcb5cSJeff Roberson * Always block the thread lock so we can drop the tdq lock early. 2189b11fdad0SJeff Roberson */ 2190686bcb5cSJeff Roberson mtx = thread_lock_block(td); 2191686bcb5cSJeff Roberson spinlock_enter(); 2192486a9414SJulian Elischer if (TD_IS_IDLETHREAD(td)) { 2193686bcb5cSJeff Roberson MPASS(mtx == TDQ_LOCKPTR(tdq)); 2194bf0acc27SJohn Baldwin TD_SET_CAN_RUN(td); 21957b20fb19SJeff Roberson } else if (TD_IS_RUNNING(td)) { 2196686bcb5cSJeff Roberson MPASS(mtx == TDQ_LOCKPTR(tdq)); 21973d7f4117SAlexander Motin srqflag = preempted ? 2198598b368dSJeff Roberson SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 2199c47f202bSJeff Roberson SRQ_OURSELF|SRQ_YIELDING; 2200ba4932b5SMatthew D Fleming #ifdef SMP 2201e745d729SAlexander Motin if (THREAD_CAN_MIGRATE(td) && (!THREAD_CAN_SCHED(td, ts->ts_cpu) 2202e745d729SAlexander Motin || pickcpu)) 22030f7a0ebdSMatthew D Fleming ts->ts_cpu = sched_pickcpu(td, 0); 2204ba4932b5SMatthew D Fleming #endif 2205c47f202bSJeff Roberson if (ts->ts_cpu == cpuid) 22069727e637SJeff Roberson tdq_runq_add(tdq, td, srqflag); 2207686bcb5cSJeff Roberson else 2208c47f202bSJeff Roberson mtx = sched_switch_migrate(tdq, td, srqflag); 2209ae7a6b38SJeff Roberson } else { 2210ae7a6b38SJeff Roberson /* This thread must be going to sleep. */ 221161a74c5cSJeff Roberson if (mtx != TDQ_LOCKPTR(tdq)) { 221261a74c5cSJeff Roberson mtx_unlock_spin(mtx); 221361a74c5cSJeff Roberson TDQ_LOCK(tdq); 221461a74c5cSJeff Roberson } 22159727e637SJeff Roberson tdq_load_rem(tdq, td); 22164c8a8cfcSKonstantin Belousov #ifdef SMP 221797e9382dSDon Lewis if (tdq->tdq_load == 0) 221897e9382dSDon Lewis tdq_trysteal(tdq); 22194c8a8cfcSKonstantin Belousov #endif 2220ae7a6b38SJeff Roberson } 2221afa0a46cSAndriy Gapon 2222afa0a46cSAndriy Gapon #if (KTR_COMPILE & KTR_SCHED) != 0 2223afa0a46cSAndriy Gapon if (TD_IS_IDLETHREAD(td)) 2224afa0a46cSAndriy Gapon KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle", 2225afa0a46cSAndriy Gapon "prio:%d", td->td_priority); 2226afa0a46cSAndriy Gapon else 2227afa0a46cSAndriy Gapon KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td), 2228afa0a46cSAndriy Gapon "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg, 2229afa0a46cSAndriy Gapon "lockname:\"%s\"", td->td_lockname); 2230afa0a46cSAndriy Gapon #endif 2231afa0a46cSAndriy Gapon 2232ae7a6b38SJeff Roberson /* 2233ae7a6b38SJeff Roberson * We enter here with the thread blocked and assigned to the 2234ae7a6b38SJeff Roberson * appropriate cpu run-queue or sleep-queue and with the current 2235ae7a6b38SJeff Roberson * thread-queue locked. 2236ae7a6b38SJeff Roberson */ 2237ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 22386d3f74a1SMark Johnston MPASS(td == tdq->tdq_curthread); 22392454aaf5SJeff Roberson newtd = choosethread(); 2240686bcb5cSJeff Roberson sched_pctcpu_update(td_get_sched(newtd), 0); 2241686bcb5cSJeff Roberson TDQ_UNLOCK(tdq); 2242686bcb5cSJeff Roberson 2243ae7a6b38SJeff Roberson /* 2244ae7a6b38SJeff Roberson * Call the MD code to switch contexts if necessary. 2245ae7a6b38SJeff Roberson */ 2246ebccf1e3SJoseph Koshy if (td != newtd) { 2247ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 2248ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 2249ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 2250ebccf1e3SJoseph Koshy #endif 2251d9fae5abSAndriy Gapon SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc); 22526f5f25e5SJohn Birrell 22536f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS 22546f5f25e5SJohn Birrell /* 22556f5f25e5SJohn Birrell * If DTrace has set the active vtime enum to anything 22566f5f25e5SJohn Birrell * other than INACTIVE (0), then it should have set the 22576f5f25e5SJohn Birrell * function to call. 22586f5f25e5SJohn Birrell */ 22596f5f25e5SJohn Birrell if (dtrace_vtime_active) 22606f5f25e5SJohn Birrell (*dtrace_vtime_switch_func)(newtd); 22616f5f25e5SJohn Birrell #endif 2262686bcb5cSJeff Roberson td->td_oncpu = NOCPU; 2263ae7a6b38SJeff Roberson cpu_switch(td, newtd, mtx); 2264a89c2c8cSMark Johnston cpuid = td->td_oncpu = PCPU_GET(cpuid); 2265b3e9e682SRyan Stone 2266d9fae5abSAndriy Gapon SDT_PROBE0(sched, , , on__cpu); 2267ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 2268ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 2269ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 2270ebccf1e3SJoseph Koshy #endif 2271b3e9e682SRyan Stone } else { 2272ae7a6b38SJeff Roberson thread_unblock_switch(td, mtx); 2273d9fae5abSAndriy Gapon SDT_PROBE0(sched, , , remain__cpu); 2274b3e9e682SRyan Stone } 2275686bcb5cSJeff Roberson KASSERT(curthread->td_md.md_spinlock_count == 1, 2276686bcb5cSJeff Roberson ("invalid count %d", curthread->td_md.md_spinlock_count)); 2277afa0a46cSAndriy Gapon 2278afa0a46cSAndriy Gapon KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running", 2279afa0a46cSAndriy Gapon "prio:%d", td->td_priority); 228035e6168fSJeff Roberson } 228135e6168fSJeff Roberson 2282ae7a6b38SJeff Roberson /* 2283ae7a6b38SJeff Roberson * Adjust thread priorities as a result of a nice request. 2284ae7a6b38SJeff Roberson */ 228535e6168fSJeff Roberson void 2286fa885116SJulian Elischer sched_nice(struct proc *p, int nice) 228735e6168fSJeff Roberson { 228835e6168fSJeff Roberson struct thread *td; 228935e6168fSJeff Roberson 2290fa885116SJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 2291e7d50326SJeff Roberson 2292fa885116SJulian Elischer p->p_nice = nice; 22938460a577SJohn Birrell FOREACH_THREAD_IN_PROC(p, td) { 22947b20fb19SJeff Roberson thread_lock(td); 22958460a577SJohn Birrell sched_priority(td); 2296e7d50326SJeff Roberson sched_prio(td, td->td_base_user_pri); 22977b20fb19SJeff Roberson thread_unlock(td); 229835e6168fSJeff Roberson } 2299fa885116SJulian Elischer } 230035e6168fSJeff Roberson 2301ae7a6b38SJeff Roberson /* 2302ae7a6b38SJeff Roberson * Record the sleep time for the interactivity scorer. 2303ae7a6b38SJeff Roberson */ 230435e6168fSJeff Roberson void 2305c5aa6b58SJeff Roberson sched_sleep(struct thread *td, int prio) 230635e6168fSJeff Roberson { 2307e7d50326SJeff Roberson 23087b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 230935e6168fSJeff Roberson 231054b0e65fSJeff Roberson td->td_slptick = ticks; 231117c4c356SKonstantin Belousov if (TD_IS_SUSPENDED(td) || prio >= PSOCK) 2312c5aa6b58SJeff Roberson td->td_flags |= TDF_CANSWAP; 23132dc29adbSJohn Baldwin if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE) 23142dc29adbSJohn Baldwin return; 23150502fe2eSJeff Roberson if (static_boost == 1 && prio) 2316c5aa6b58SJeff Roberson sched_prio(td, prio); 23170502fe2eSJeff Roberson else if (static_boost && td->td_priority > static_boost) 23180502fe2eSJeff Roberson sched_prio(td, static_boost); 231935e6168fSJeff Roberson } 232035e6168fSJeff Roberson 2321ae7a6b38SJeff Roberson /* 2322ae7a6b38SJeff Roberson * Schedule a thread to resume execution and record how long it voluntarily 2323ae7a6b38SJeff Roberson * slept. We also update the pctcpu, interactivity, and priority. 232461a74c5cSJeff Roberson * 232561a74c5cSJeff Roberson * Requires the thread lock on entry, drops on exit. 2326ae7a6b38SJeff Roberson */ 232735e6168fSJeff Roberson void 232861a74c5cSJeff Roberson sched_wakeup(struct thread *td, int srqflags) 232935e6168fSJeff Roberson { 233014618990SJeff Roberson struct td_sched *ts; 2331ae7a6b38SJeff Roberson int slptick; 2332e7d50326SJeff Roberson 23337b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 233493ccd6bfSKonstantin Belousov ts = td_get_sched(td); 2335c5aa6b58SJeff Roberson td->td_flags &= ~TDF_CANSWAP; 233661a74c5cSJeff Roberson 233735e6168fSJeff Roberson /* 2338e7d50326SJeff Roberson * If we slept for more than a tick update our interactivity and 2339e7d50326SJeff Roberson * priority. 234035e6168fSJeff Roberson */ 234154b0e65fSJeff Roberson slptick = td->td_slptick; 234254b0e65fSJeff Roberson td->td_slptick = 0; 2343ae7a6b38SJeff Roberson if (slptick && slptick != ticks) { 23447295465eSAlexander Motin ts->ts_slptime += (ticks - slptick) << SCHED_TICK_SHIFT; 23458460a577SJohn Birrell sched_interact_update(td); 23467295465eSAlexander Motin sched_pctcpu_update(ts, 0); 2347f1e8dc4aSJeff Roberson } 23485e5c3873SJeff Roberson /* 23495e5c3873SJeff Roberson * Reset the slice value since we slept and advanced the round-robin. 23505e5c3873SJeff Roberson */ 23515e5c3873SJeff Roberson ts->ts_slice = 0; 235261a74c5cSJeff Roberson sched_add(td, SRQ_BORING | srqflags); 235335e6168fSJeff Roberson } 235435e6168fSJeff Roberson 235535e6168fSJeff Roberson /* 235635e6168fSJeff Roberson * Penalize the parent for creating a new child and initialize the child's 235735e6168fSJeff Roberson * priority. 235835e6168fSJeff Roberson */ 235935e6168fSJeff Roberson void 23608460a577SJohn Birrell sched_fork(struct thread *td, struct thread *child) 236115dc847eSJeff Roberson { 23627b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 236393ccd6bfSKonstantin Belousov sched_pctcpu_update(td_get_sched(td), 1); 2364ad1e7d28SJulian Elischer sched_fork_thread(td, child); 2365e7d50326SJeff Roberson /* 2366e7d50326SJeff Roberson * Penalize the parent and child for forking. 2367e7d50326SJeff Roberson */ 2368e7d50326SJeff Roberson sched_interact_fork(child); 2369e7d50326SJeff Roberson sched_priority(child); 237093ccd6bfSKonstantin Belousov td_get_sched(td)->ts_runtime += tickincr; 2371e7d50326SJeff Roberson sched_interact_update(td); 2372e7d50326SJeff Roberson sched_priority(td); 2373ad1e7d28SJulian Elischer } 2374ad1e7d28SJulian Elischer 2375ae7a6b38SJeff Roberson /* 2376ae7a6b38SJeff Roberson * Fork a new thread, may be within the same process. 2377ae7a6b38SJeff Roberson */ 2378ad1e7d28SJulian Elischer void 2379ad1e7d28SJulian Elischer sched_fork_thread(struct thread *td, struct thread *child) 2380ad1e7d28SJulian Elischer { 2381ad1e7d28SJulian Elischer struct td_sched *ts; 2382ad1e7d28SJulian Elischer struct td_sched *ts2; 23835e5c3873SJeff Roberson struct tdq *tdq; 23848460a577SJohn Birrell 23855e5c3873SJeff Roberson tdq = TDQ_SELF(); 23868b16c208SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2387e7d50326SJeff Roberson /* 2388e7d50326SJeff Roberson * Initialize child. 2389e7d50326SJeff Roberson */ 239093ccd6bfSKonstantin Belousov ts = td_get_sched(td); 239193ccd6bfSKonstantin Belousov ts2 = td_get_sched(child); 239292de34dfSJohn Baldwin child->td_oncpu = NOCPU; 239392de34dfSJohn Baldwin child->td_lastcpu = NOCPU; 23945e5c3873SJeff Roberson child->td_lock = TDQ_LOCKPTR(tdq); 23958b16c208SJeff Roberson child->td_cpuset = cpuset_ref(td->td_cpuset); 23963f289c3fSJeff Roberson child->td_domain.dr_policy = td->td_cpuset->cs_domain; 2397ad1e7d28SJulian Elischer ts2->ts_cpu = ts->ts_cpu; 23988b16c208SJeff Roberson ts2->ts_flags = 0; 2399e7d50326SJeff Roberson /* 240022d19207SJohn Baldwin * Grab our parents cpu estimation information. 2401e7d50326SJeff Roberson */ 2402ad1e7d28SJulian Elischer ts2->ts_ticks = ts->ts_ticks; 2403ad1e7d28SJulian Elischer ts2->ts_ltick = ts->ts_ltick; 2404ad1e7d28SJulian Elischer ts2->ts_ftick = ts->ts_ftick; 240522d19207SJohn Baldwin /* 240622d19207SJohn Baldwin * Do not inherit any borrowed priority from the parent. 240722d19207SJohn Baldwin */ 240822d19207SJohn Baldwin child->td_priority = child->td_base_pri; 2409e7d50326SJeff Roberson /* 2410e7d50326SJeff Roberson * And update interactivity score. 2411e7d50326SJeff Roberson */ 2412ae7a6b38SJeff Roberson ts2->ts_slptime = ts->ts_slptime; 2413ae7a6b38SJeff Roberson ts2->ts_runtime = ts->ts_runtime; 24145e5c3873SJeff Roberson /* Attempt to quickly learn interactivity. */ 24155e5c3873SJeff Roberson ts2->ts_slice = tdq_slice(tdq) - sched_slice_min; 24168f51ad55SJeff Roberson #ifdef KTR 24178f51ad55SJeff Roberson bzero(ts2->ts_name, sizeof(ts2->ts_name)); 24188f51ad55SJeff Roberson #endif 241915dc847eSJeff Roberson } 242015dc847eSJeff Roberson 2421ae7a6b38SJeff Roberson /* 2422ae7a6b38SJeff Roberson * Adjust the priority class of a thread. 2423ae7a6b38SJeff Roberson */ 242415dc847eSJeff Roberson void 24258460a577SJohn Birrell sched_class(struct thread *td, int class) 242615dc847eSJeff Roberson { 242715dc847eSJeff Roberson 24287b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 24298460a577SJohn Birrell if (td->td_pri_class == class) 243015dc847eSJeff Roberson return; 24318460a577SJohn Birrell td->td_pri_class = class; 243235e6168fSJeff Roberson } 243335e6168fSJeff Roberson 243435e6168fSJeff Roberson /* 243535e6168fSJeff Roberson * Return some of the child's priority and interactivity to the parent. 243635e6168fSJeff Roberson */ 243735e6168fSJeff Roberson void 2438fc6c30f6SJulian Elischer sched_exit(struct proc *p, struct thread *child) 243935e6168fSJeff Roberson { 2440e7d50326SJeff Roberson struct thread *td; 2441141ad61cSJeff Roberson 24428f51ad55SJeff Roberson KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "proc exit", 2443cd39bb09SXin LI "prio:%d", child->td_priority); 2444374ae2a3SJeff Roberson PROC_LOCK_ASSERT(p, MA_OWNED); 2445e7d50326SJeff Roberson td = FIRST_THREAD_IN_PROC(p); 2446e7d50326SJeff Roberson sched_exit_thread(td, child); 2447ad1e7d28SJulian Elischer } 2448ad1e7d28SJulian Elischer 2449ae7a6b38SJeff Roberson /* 2450ae7a6b38SJeff Roberson * Penalize another thread for the time spent on this one. This helps to 2451ae7a6b38SJeff Roberson * worsen the priority and interactivity of processes which schedule batch 2452ae7a6b38SJeff Roberson * jobs such as make. This has little effect on the make process itself but 2453ae7a6b38SJeff Roberson * causes new processes spawned by it to receive worse scores immediately. 2454ae7a6b38SJeff Roberson */ 2455ad1e7d28SJulian Elischer void 2456fc6c30f6SJulian Elischer sched_exit_thread(struct thread *td, struct thread *child) 2457ad1e7d28SJulian Elischer { 2458fc6c30f6SJulian Elischer 24598f51ad55SJeff Roberson KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "thread exit", 2460cd39bb09SXin LI "prio:%d", child->td_priority); 2461e7d50326SJeff Roberson /* 2462e7d50326SJeff Roberson * Give the child's runtime to the parent without returning the 2463e7d50326SJeff Roberson * sleep time as a penalty to the parent. This causes shells that 2464e7d50326SJeff Roberson * launch expensive things to mark their children as expensive. 2465e7d50326SJeff Roberson */ 24667b20fb19SJeff Roberson thread_lock(td); 246793ccd6bfSKonstantin Belousov td_get_sched(td)->ts_runtime += td_get_sched(child)->ts_runtime; 2468fc6c30f6SJulian Elischer sched_interact_update(td); 2469e7d50326SJeff Roberson sched_priority(td); 24707b20fb19SJeff Roberson thread_unlock(td); 2471ad1e7d28SJulian Elischer } 2472ad1e7d28SJulian Elischer 2473ff256d9cSJeff Roberson void 2474ff256d9cSJeff Roberson sched_preempt(struct thread *td) 2475ff256d9cSJeff Roberson { 2476ff256d9cSJeff Roberson struct tdq *tdq; 2477686bcb5cSJeff Roberson int flags; 2478ff256d9cSJeff Roberson 2479b3e9e682SRyan Stone SDT_PROBE2(sched, , , surrender, td, td->td_proc); 2480b3e9e682SRyan Stone 2481ff256d9cSJeff Roberson thread_lock(td); 2482ff256d9cSJeff Roberson tdq = TDQ_SELF(); 2483ff256d9cSJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2484ff256d9cSJeff Roberson if (td->td_priority > tdq->tdq_lowpri) { 2485686bcb5cSJeff Roberson if (td->td_critnest == 1) { 24868df78c41SJeff Roberson flags = SW_INVOL | SW_PREEMPT; 2487686bcb5cSJeff Roberson flags |= TD_IS_IDLETHREAD(td) ? SWT_REMOTEWAKEIDLE : 2488686bcb5cSJeff Roberson SWT_REMOTEPREEMPT; 2489686bcb5cSJeff Roberson mi_switch(flags); 2490686bcb5cSJeff Roberson /* Switch dropped thread lock. */ 2491686bcb5cSJeff Roberson return; 2492686bcb5cSJeff Roberson } 2493ff256d9cSJeff Roberson td->td_owepreempt = 1; 24947789ab32SMark Johnston } else { 24957789ab32SMark Johnston tdq->tdq_owepreempt = 0; 2496ff256d9cSJeff Roberson } 2497ff256d9cSJeff Roberson thread_unlock(td); 2498ff256d9cSJeff Roberson } 2499ff256d9cSJeff Roberson 2500ae7a6b38SJeff Roberson /* 2501ae7a6b38SJeff Roberson * Fix priorities on return to user-space. Priorities may be elevated due 2502ae7a6b38SJeff Roberson * to static priorities in msleep() or similar. 2503ae7a6b38SJeff Roberson */ 2504ad1e7d28SJulian Elischer void 250528240885SMateusz Guzik sched_userret_slowpath(struct thread *td) 2506ad1e7d28SJulian Elischer { 250728240885SMateusz Guzik 25087b20fb19SJeff Roberson thread_lock(td); 2509ad1e7d28SJulian Elischer td->td_priority = td->td_user_pri; 2510ad1e7d28SJulian Elischer td->td_base_pri = td->td_user_pri; 251162fa74d9SJeff Roberson tdq_setlowpri(TDQ_SELF(), td); 25127b20fb19SJeff Roberson thread_unlock(td); 2513ad1e7d28SJulian Elischer } 251435e6168fSJeff Roberson 2515ae7a6b38SJeff Roberson /* 2516ae7a6b38SJeff Roberson * Handle a stathz tick. This is really only relevant for timeshare 2517ae7a6b38SJeff Roberson * threads. 2518ae7a6b38SJeff Roberson */ 251935e6168fSJeff Roberson void 2520c3cccf95SJeff Roberson sched_clock(struct thread *td, int cnt) 252135e6168fSJeff Roberson { 2522ad1e7d28SJulian Elischer struct tdq *tdq; 2523ad1e7d28SJulian Elischer struct td_sched *ts; 252435e6168fSJeff Roberson 2525ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 25263f872f85SJeff Roberson tdq = TDQ_SELF(); 25277fcf154aSJeff Roberson #ifdef SMP 25287fcf154aSJeff Roberson /* 25297fcf154aSJeff Roberson * We run the long term load balancer infrequently on the first cpu. 25307fcf154aSJeff Roberson */ 2531c3cccf95SJeff Roberson if (balance_tdq == tdq && smp_started != 0 && rebalance != 0 && 2532c3cccf95SJeff Roberson balance_ticks != 0) { 2533c3cccf95SJeff Roberson balance_ticks -= cnt; 2534c3cccf95SJeff Roberson if (balance_ticks <= 0) 25357fcf154aSJeff Roberson sched_balance(); 25367fcf154aSJeff Roberson } 25377fcf154aSJeff Roberson #endif 25383f872f85SJeff Roberson /* 25391690c6c1SJeff Roberson * Save the old switch count so we have a record of the last ticks 25401690c6c1SJeff Roberson * activity. Initialize the new switch count based on our load. 25411690c6c1SJeff Roberson * If there is some activity seed it to reflect that. 25421690c6c1SJeff Roberson */ 25431690c6c1SJeff Roberson tdq->tdq_oldswitchcnt = tdq->tdq_switchcnt; 25446c47aaaeSJeff Roberson tdq->tdq_switchcnt = tdq->tdq_load; 25451690c6c1SJeff Roberson /* 25463f872f85SJeff Roberson * Advance the insert index once for each tick to ensure that all 25473f872f85SJeff Roberson * threads get a chance to run. 25483f872f85SJeff Roberson */ 25493f872f85SJeff Roberson if (tdq->tdq_idx == tdq->tdq_ridx) { 25503f872f85SJeff Roberson tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS; 25513f872f85SJeff Roberson if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx])) 25523f872f85SJeff Roberson tdq->tdq_ridx = tdq->tdq_idx; 25533f872f85SJeff Roberson } 255493ccd6bfSKonstantin Belousov ts = td_get_sched(td); 25557295465eSAlexander Motin sched_pctcpu_update(ts, 1); 2556c3cccf95SJeff Roberson if ((td->td_pri_class & PRI_FIFO_BIT) || TD_IS_IDLETHREAD(td)) 2557a8949de2SJeff Roberson return; 2558c3cccf95SJeff Roberson 2559c9a8cba4SJohn Baldwin if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) { 2560a8949de2SJeff Roberson /* 2561fd0b8c78SJeff Roberson * We used a tick; charge it to the thread so 2562fd0b8c78SJeff Roberson * that we can compute our interactivity. 256315dc847eSJeff Roberson */ 2564c3cccf95SJeff Roberson td_get_sched(td)->ts_runtime += tickincr * cnt; 25658460a577SJohn Birrell sched_interact_update(td); 256673daf66fSJeff Roberson sched_priority(td); 2567fd0b8c78SJeff Roberson } 2568579895dfSAlexander Motin 256935e6168fSJeff Roberson /* 2570579895dfSAlexander Motin * Force a context switch if the current thread has used up a full 2571579895dfSAlexander Motin * time slice (default is 100ms). 257235e6168fSJeff Roberson */ 2573c3cccf95SJeff Roberson ts->ts_slice += cnt; 2574c3cccf95SJeff Roberson if (ts->ts_slice >= tdq_slice(tdq)) { 25755e5c3873SJeff Roberson ts->ts_slice = 0; 25763d7f4117SAlexander Motin td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND; 257735e6168fSJeff Roberson } 2578579895dfSAlexander Motin } 257935e6168fSJeff Roberson 2580ccd0ec40SKonstantin Belousov u_int 2581ccd0ec40SKonstantin Belousov sched_estcpu(struct thread *td __unused) 2582ae7a6b38SJeff Roberson { 2583ae7a6b38SJeff Roberson 2584ccd0ec40SKonstantin Belousov return (0); 2585ae7a6b38SJeff Roberson } 2586ae7a6b38SJeff Roberson 2587ae7a6b38SJeff Roberson /* 2588ae7a6b38SJeff Roberson * Return whether the current CPU has runnable tasks. Used for in-kernel 2589ae7a6b38SJeff Roberson * cooperative idle threads. 2590ae7a6b38SJeff Roberson */ 259135e6168fSJeff Roberson int 259235e6168fSJeff Roberson sched_runnable(void) 259335e6168fSJeff Roberson { 2594ad1e7d28SJulian Elischer struct tdq *tdq; 2595b90816f1SJeff Roberson int load; 259635e6168fSJeff Roberson 2597b90816f1SJeff Roberson load = 1; 2598b90816f1SJeff Roberson 2599ad1e7d28SJulian Elischer tdq = TDQ_SELF(); 26003f741ca1SJeff Roberson if ((curthread->td_flags & TDF_IDLETD) != 0) { 2601d2ad694cSJeff Roberson if (tdq->tdq_load > 0) 26023f741ca1SJeff Roberson goto out; 26033f741ca1SJeff Roberson } else 2604d2ad694cSJeff Roberson if (tdq->tdq_load - 1 > 0) 2605b90816f1SJeff Roberson goto out; 2606b90816f1SJeff Roberson load = 0; 2607b90816f1SJeff Roberson out: 2608b90816f1SJeff Roberson return (load); 260935e6168fSJeff Roberson } 261035e6168fSJeff Roberson 2611ae7a6b38SJeff Roberson /* 2612ae7a6b38SJeff Roberson * Choose the highest priority thread to run. The thread is removed from 2613ef80894cSMark Johnston * the run-queue while running however the load remains. 2614ae7a6b38SJeff Roberson */ 26157a5e5e2aSJeff Roberson struct thread * 2616c9f25d8fSJeff Roberson sched_choose(void) 2617c9f25d8fSJeff Roberson { 26189727e637SJeff Roberson struct thread *td; 2619ae7a6b38SJeff Roberson struct tdq *tdq; 2620ae7a6b38SJeff Roberson 2621ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 2622ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 26239727e637SJeff Roberson td = tdq_choose(tdq); 26246d3f74a1SMark Johnston if (td != NULL) { 26259727e637SJeff Roberson tdq_runq_rem(tdq, td); 26260502fe2eSJeff Roberson tdq->tdq_lowpri = td->td_priority; 26276d3f74a1SMark Johnston } else { 26280502fe2eSJeff Roberson tdq->tdq_lowpri = PRI_MAX_IDLE; 26296d3f74a1SMark Johnston td = PCPU_GET(idlethread); 26306d3f74a1SMark Johnston } 26316d3f74a1SMark Johnston tdq->tdq_curthread = td; 26326d3f74a1SMark Johnston return (td); 26337a5e5e2aSJeff Roberson } 26347a5e5e2aSJeff Roberson 2635ae7a6b38SJeff Roberson /* 2636*0927ff78SMark Johnston * Set owepreempt if the currently running thread has lower priority than "pri". 2637*0927ff78SMark Johnston * Preemption never happens directly in ULE, we always request it once we exit a 2638*0927ff78SMark Johnston * critical section. 2639ae7a6b38SJeff Roberson */ 2640*0927ff78SMark Johnston static void 2641*0927ff78SMark Johnston sched_setpreempt(int pri) 26427a5e5e2aSJeff Roberson { 26437a5e5e2aSJeff Roberson struct thread *ctd; 26447a5e5e2aSJeff Roberson int cpri; 2645ff256d9cSJeff Roberson 26467a5e5e2aSJeff Roberson ctd = curthread; 2647*0927ff78SMark Johnston THREAD_LOCK_ASSERT(ctd, MA_OWNED); 2648*0927ff78SMark Johnston 26497a5e5e2aSJeff Roberson cpri = ctd->td_priority; 2650ff256d9cSJeff Roberson if (pri < cpri) 2651ff256d9cSJeff Roberson ctd->td_flags |= TDF_NEEDRESCHED; 2652879e0604SMateusz Guzik if (KERNEL_PANICKED() || pri >= cpri || cold || TD_IS_INHIBITED(ctd)) 2653ae7a6b38SJeff Roberson return; 2654ff256d9cSJeff Roberson if (!sched_shouldpreempt(pri, cpri, 0)) 2655ae7a6b38SJeff Roberson return; 26567a5e5e2aSJeff Roberson ctd->td_owepreempt = 1; 265735e6168fSJeff Roberson } 265835e6168fSJeff Roberson 2659ae7a6b38SJeff Roberson /* 266073daf66fSJeff Roberson * Add a thread to a thread queue. Select the appropriate runq and add the 266173daf66fSJeff Roberson * thread to it. This is the internal function called when the tdq is 266273daf66fSJeff Roberson * predetermined. 2663ae7a6b38SJeff Roberson */ 26646d3f74a1SMark Johnston static int 2665ae7a6b38SJeff Roberson tdq_add(struct tdq *tdq, struct thread *td, int flags) 266635e6168fSJeff Roberson { 26676d3f74a1SMark Johnston int lowpri; 2668c9f25d8fSJeff Roberson 2669ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 267061a74c5cSJeff Roberson THREAD_LOCK_BLOCKED_ASSERT(td, MA_OWNED); 26717a5e5e2aSJeff Roberson KASSERT((td->td_inhibitors == 0), 26727a5e5e2aSJeff Roberson ("sched_add: trying to run inhibited thread")); 26737a5e5e2aSJeff Roberson KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 26747a5e5e2aSJeff Roberson ("sched_add: bad thread state")); 2675b61ce5b0SJeff Roberson KASSERT(td->td_flags & TDF_INMEM, 2676b61ce5b0SJeff Roberson ("sched_add: thread swapped out")); 2677ae7a6b38SJeff Roberson 26786d3f74a1SMark Johnston lowpri = tdq->tdq_lowpri; 26796d3f74a1SMark Johnston if (td->td_priority < lowpri) 2680ae7a6b38SJeff Roberson tdq->tdq_lowpri = td->td_priority; 26819727e637SJeff Roberson tdq_runq_add(tdq, td, flags); 26829727e637SJeff Roberson tdq_load_add(tdq, td); 26836d3f74a1SMark Johnston return (lowpri); 2684ae7a6b38SJeff Roberson } 2685ae7a6b38SJeff Roberson 2686ae7a6b38SJeff Roberson /* 2687ae7a6b38SJeff Roberson * Select the target thread queue and add a thread to it. Request 2688ae7a6b38SJeff Roberson * preemption or IPI a remote processor if required. 268961a74c5cSJeff Roberson * 269061a74c5cSJeff Roberson * Requires the thread lock on entry, drops on exit. 2691ae7a6b38SJeff Roberson */ 2692ae7a6b38SJeff Roberson void 2693ae7a6b38SJeff Roberson sched_add(struct thread *td, int flags) 2694ae7a6b38SJeff Roberson { 2695ae7a6b38SJeff Roberson struct tdq *tdq; 26967b8bfa0dSJeff Roberson #ifdef SMP 26976d3f74a1SMark Johnston int cpu, lowpri; 2698ae7a6b38SJeff Roberson #endif 26998f51ad55SJeff Roberson 27008f51ad55SJeff Roberson KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add", 27018f51ad55SJeff Roberson "prio:%d", td->td_priority, KTR_ATTR_LINKED, 27028f51ad55SJeff Roberson sched_tdname(curthread)); 27038f51ad55SJeff Roberson KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup", 27048f51ad55SJeff Roberson KTR_ATTR_LINKED, sched_tdname(td)); 2705b3e9e682SRyan Stone SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL, 2706b3e9e682SRyan Stone flags & SRQ_PREEMPTED); 2707ae7a6b38SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 2708ae7a6b38SJeff Roberson /* 2709ae7a6b38SJeff Roberson * Recalculate the priority before we select the target cpu or 2710ae7a6b38SJeff Roberson * run-queue. 2711ae7a6b38SJeff Roberson */ 2712ae7a6b38SJeff Roberson if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 2713ae7a6b38SJeff Roberson sched_priority(td); 2714ae7a6b38SJeff Roberson #ifdef SMP 2715ae7a6b38SJeff Roberson /* 2716ae7a6b38SJeff Roberson * Pick the destination cpu and if it isn't ours transfer to the 2717ae7a6b38SJeff Roberson * target cpu. 2718ae7a6b38SJeff Roberson */ 27199727e637SJeff Roberson cpu = sched_pickcpu(td, flags); 27209727e637SJeff Roberson tdq = sched_setcpu(td, cpu, flags); 27216d3f74a1SMark Johnston lowpri = tdq_add(tdq, td, flags); 272261a74c5cSJeff Roberson if (cpu != PCPU_GET(cpuid)) 27236d3f74a1SMark Johnston tdq_notify(tdq, lowpri); 272461a74c5cSJeff Roberson else if (!(flags & SRQ_YIELDING)) 2725*0927ff78SMark Johnston sched_setpreempt(td->td_priority); 2726ae7a6b38SJeff Roberson #else 2727ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 2728ae7a6b38SJeff Roberson /* 2729ae7a6b38SJeff Roberson * Now that the thread is moving to the run-queue, set the lock 2730ae7a6b38SJeff Roberson * to the scheduler's lock. 2731ae7a6b38SJeff Roberson */ 2732e4894505SMark Johnston if (td->td_lock != TDQ_LOCKPTR(tdq)) { 2733e4894505SMark Johnston TDQ_LOCK(tdq); 273461a74c5cSJeff Roberson if ((flags & SRQ_HOLD) != 0) 273561a74c5cSJeff Roberson td->td_lock = TDQ_LOCKPTR(tdq); 273661a74c5cSJeff Roberson else 2737ae7a6b38SJeff Roberson thread_lock_set(td, TDQ_LOCKPTR(tdq)); 2738e4894505SMark Johnston } 27396d3f74a1SMark Johnston (void)tdq_add(tdq, td, flags); 2740ae7a6b38SJeff Roberson if (!(flags & SRQ_YIELDING)) 2741*0927ff78SMark Johnston sched_setpreempt(td->td_priority); 274261a74c5cSJeff Roberson #endif 274361a74c5cSJeff Roberson if (!(flags & SRQ_HOLDTD)) 274461a74c5cSJeff Roberson thread_unlock(td); 274535e6168fSJeff Roberson } 274635e6168fSJeff Roberson 2747ae7a6b38SJeff Roberson /* 2748ae7a6b38SJeff Roberson * Remove a thread from a run-queue without running it. This is used 2749ae7a6b38SJeff Roberson * when we're stealing a thread from a remote queue. Otherwise all threads 2750ae7a6b38SJeff Roberson * exit by calling sched_exit_thread() and sched_throw() themselves. 2751ae7a6b38SJeff Roberson */ 275235e6168fSJeff Roberson void 27537cf90fb3SJeff Roberson sched_rem(struct thread *td) 275435e6168fSJeff Roberson { 2755ad1e7d28SJulian Elischer struct tdq *tdq; 27567cf90fb3SJeff Roberson 27578f51ad55SJeff Roberson KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "runq rem", 27588f51ad55SJeff Roberson "prio:%d", td->td_priority); 2759b3e9e682SRyan Stone SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL); 276093ccd6bfSKonstantin Belousov tdq = TDQ_CPU(td_get_sched(td)->ts_cpu); 2761ae7a6b38SJeff Roberson TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2762ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 27637a5e5e2aSJeff Roberson KASSERT(TD_ON_RUNQ(td), 2764ad1e7d28SJulian Elischer ("sched_rem: thread not on run queue")); 27659727e637SJeff Roberson tdq_runq_rem(tdq, td); 27669727e637SJeff Roberson tdq_load_rem(tdq, td); 27677a5e5e2aSJeff Roberson TD_SET_CAN_RUN(td); 276862fa74d9SJeff Roberson if (td->td_priority == tdq->tdq_lowpri) 276962fa74d9SJeff Roberson tdq_setlowpri(tdq, NULL); 277035e6168fSJeff Roberson } 277135e6168fSJeff Roberson 2772ae7a6b38SJeff Roberson /* 2773ae7a6b38SJeff Roberson * Fetch cpu utilization information. Updates on demand. 2774ae7a6b38SJeff Roberson */ 277535e6168fSJeff Roberson fixpt_t 27767cf90fb3SJeff Roberson sched_pctcpu(struct thread *td) 277735e6168fSJeff Roberson { 277835e6168fSJeff Roberson fixpt_t pctcpu; 2779ad1e7d28SJulian Elischer struct td_sched *ts; 278035e6168fSJeff Roberson 278135e6168fSJeff Roberson pctcpu = 0; 278293ccd6bfSKonstantin Belousov ts = td_get_sched(td); 278335e6168fSJeff Roberson 27843da35a0aSJohn Baldwin THREAD_LOCK_ASSERT(td, MA_OWNED); 27857295465eSAlexander Motin sched_pctcpu_update(ts, TD_IS_RUNNING(td)); 2786ad1e7d28SJulian Elischer if (ts->ts_ticks) { 278735e6168fSJeff Roberson int rtick; 278835e6168fSJeff Roberson 278935e6168fSJeff Roberson /* How many rtick per second ? */ 2790e7d50326SJeff Roberson rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz); 2791e7d50326SJeff Roberson pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT; 279235e6168fSJeff Roberson } 279335e6168fSJeff Roberson 279435e6168fSJeff Roberson return (pctcpu); 279535e6168fSJeff Roberson } 279635e6168fSJeff Roberson 279762fa74d9SJeff Roberson /* 279862fa74d9SJeff Roberson * Enforce affinity settings for a thread. Called after adjustments to 279962fa74d9SJeff Roberson * cpumask. 280062fa74d9SJeff Roberson */ 2801885d51a3SJeff Roberson void 2802885d51a3SJeff Roberson sched_affinity(struct thread *td) 2803885d51a3SJeff Roberson { 280462fa74d9SJeff Roberson #ifdef SMP 280562fa74d9SJeff Roberson struct td_sched *ts; 280662fa74d9SJeff Roberson 280762fa74d9SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 280893ccd6bfSKonstantin Belousov ts = td_get_sched(td); 280962fa74d9SJeff Roberson if (THREAD_CAN_SCHED(td, ts->ts_cpu)) 281062fa74d9SJeff Roberson return; 281153a6c8b3SJeff Roberson if (TD_ON_RUNQ(td)) { 281253a6c8b3SJeff Roberson sched_rem(td); 2813d8d5f036SJeff Roberson sched_add(td, SRQ_BORING | SRQ_HOLDTD); 281453a6c8b3SJeff Roberson return; 281553a6c8b3SJeff Roberson } 281662fa74d9SJeff Roberson if (!TD_IS_RUNNING(td)) 281762fa74d9SJeff Roberson return; 281862fa74d9SJeff Roberson /* 28190f7a0ebdSMatthew D Fleming * Force a switch before returning to userspace. If the 28200f7a0ebdSMatthew D Fleming * target thread is not running locally send an ipi to force 28210f7a0ebdSMatthew D Fleming * the issue. 282262fa74d9SJeff Roberson */ 2823a8103ae8SJohn Baldwin td->td_flags |= TDF_NEEDRESCHED; 28240f7a0ebdSMatthew D Fleming if (td != curthread) 28250f7a0ebdSMatthew D Fleming ipi_cpu(ts->ts_cpu, IPI_PREEMPT); 282662fa74d9SJeff Roberson #endif 2827885d51a3SJeff Roberson } 2828885d51a3SJeff Roberson 2829ae7a6b38SJeff Roberson /* 2830ae7a6b38SJeff Roberson * Bind a thread to a target cpu. 2831ae7a6b38SJeff Roberson */ 28329bacd788SJeff Roberson void 28339bacd788SJeff Roberson sched_bind(struct thread *td, int cpu) 28349bacd788SJeff Roberson { 2835ad1e7d28SJulian Elischer struct td_sched *ts; 28369bacd788SJeff Roberson 2837c47f202bSJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); 28381d7830edSJohn Baldwin KASSERT(td == curthread, ("sched_bind: can only bind curthread")); 283993ccd6bfSKonstantin Belousov ts = td_get_sched(td); 28406b2f763fSJeff Roberson if (ts->ts_flags & TSF_BOUND) 2841c95d2db2SJeff Roberson sched_unbind(td); 28420f7a0ebdSMatthew D Fleming KASSERT(THREAD_CAN_MIGRATE(td), ("%p must be migratable", td)); 2843ad1e7d28SJulian Elischer ts->ts_flags |= TSF_BOUND; 28446b2f763fSJeff Roberson sched_pin(); 284580f86c9fSJeff Roberson if (PCPU_GET(cpuid) == cpu) 28469bacd788SJeff Roberson return; 28476b2f763fSJeff Roberson ts->ts_cpu = cpu; 28489bacd788SJeff Roberson /* When we return from mi_switch we'll be on the correct cpu. */ 2849686bcb5cSJeff Roberson mi_switch(SW_VOL); 2850686bcb5cSJeff Roberson thread_lock(td); 28519bacd788SJeff Roberson } 28529bacd788SJeff Roberson 2853ae7a6b38SJeff Roberson /* 2854ae7a6b38SJeff Roberson * Release a bound thread. 2855ae7a6b38SJeff Roberson */ 28569bacd788SJeff Roberson void 28579bacd788SJeff Roberson sched_unbind(struct thread *td) 28589bacd788SJeff Roberson { 2859e7d50326SJeff Roberson struct td_sched *ts; 2860e7d50326SJeff Roberson 28617b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 28621d7830edSJohn Baldwin KASSERT(td == curthread, ("sched_unbind: can only bind curthread")); 286393ccd6bfSKonstantin Belousov ts = td_get_sched(td); 28646b2f763fSJeff Roberson if ((ts->ts_flags & TSF_BOUND) == 0) 28656b2f763fSJeff Roberson return; 2866e7d50326SJeff Roberson ts->ts_flags &= ~TSF_BOUND; 2867e7d50326SJeff Roberson sched_unpin(); 28689bacd788SJeff Roberson } 28699bacd788SJeff Roberson 287035e6168fSJeff Roberson int 2871ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td) 2872ebccf1e3SJoseph Koshy { 28737b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 287493ccd6bfSKonstantin Belousov return (td_get_sched(td)->ts_flags & TSF_BOUND); 2875ebccf1e3SJoseph Koshy } 2876ebccf1e3SJoseph Koshy 2877ae7a6b38SJeff Roberson /* 2878ae7a6b38SJeff Roberson * Basic yield call. 2879ae7a6b38SJeff Roberson */ 288036ec198bSDavid Xu void 288136ec198bSDavid Xu sched_relinquish(struct thread *td) 288236ec198bSDavid Xu { 28837b20fb19SJeff Roberson thread_lock(td); 2884686bcb5cSJeff Roberson mi_switch(SW_VOL | SWT_RELINQUISH); 288536ec198bSDavid Xu } 288636ec198bSDavid Xu 2887ae7a6b38SJeff Roberson /* 2888ae7a6b38SJeff Roberson * Return the total system load. 2889ae7a6b38SJeff Roberson */ 2890ebccf1e3SJoseph Koshy int 289133916c36SJeff Roberson sched_load(void) 289233916c36SJeff Roberson { 289333916c36SJeff Roberson #ifdef SMP 289433916c36SJeff Roberson int total; 289533916c36SJeff Roberson int i; 289633916c36SJeff Roberson 289733916c36SJeff Roberson total = 0; 28983aa6d94eSJohn Baldwin CPU_FOREACH(i) 289962fa74d9SJeff Roberson total += TDQ_CPU(i)->tdq_sysload; 290033916c36SJeff Roberson return (total); 290133916c36SJeff Roberson #else 2902d2ad694cSJeff Roberson return (TDQ_SELF()->tdq_sysload); 290333916c36SJeff Roberson #endif 290433916c36SJeff Roberson } 290533916c36SJeff Roberson 290633916c36SJeff Roberson int 290735e6168fSJeff Roberson sched_sizeof_proc(void) 290835e6168fSJeff Roberson { 290935e6168fSJeff Roberson return (sizeof(struct proc)); 291035e6168fSJeff Roberson } 291135e6168fSJeff Roberson 291235e6168fSJeff Roberson int 291335e6168fSJeff Roberson sched_sizeof_thread(void) 291435e6168fSJeff Roberson { 291535e6168fSJeff Roberson return (sizeof(struct thread) + sizeof(struct td_sched)); 291635e6168fSJeff Roberson } 2917b41f1452SDavid Xu 291809c8a4ccSJeff Roberson #ifdef SMP 291909c8a4ccSJeff Roberson #define TDQ_IDLESPIN(tdq) \ 292009c8a4ccSJeff Roberson ((tdq)->tdq_cg != NULL && ((tdq)->tdq_cg->cg_flags & CG_FLAG_THREAD) == 0) 292109c8a4ccSJeff Roberson #else 292209c8a4ccSJeff Roberson #define TDQ_IDLESPIN(tdq) 1 292309c8a4ccSJeff Roberson #endif 292409c8a4ccSJeff Roberson 29257a5e5e2aSJeff Roberson /* 29267a5e5e2aSJeff Roberson * The actual idle process. 29277a5e5e2aSJeff Roberson */ 29287a5e5e2aSJeff Roberson void 29297a5e5e2aSJeff Roberson sched_idletd(void *dummy) 29307a5e5e2aSJeff Roberson { 29317a5e5e2aSJeff Roberson struct thread *td; 2932ae7a6b38SJeff Roberson struct tdq *tdq; 29332c27cb3aSAlexander Motin int oldswitchcnt, switchcnt; 29341690c6c1SJeff Roberson int i; 29357a5e5e2aSJeff Roberson 29367b55ab05SJeff Roberson mtx_assert(&Giant, MA_NOTOWNED); 29377a5e5e2aSJeff Roberson td = curthread; 2938ae7a6b38SJeff Roberson tdq = TDQ_SELF(); 2939ba96d2d8SJohn Baldwin THREAD_NO_SLEEPING(); 29402c27cb3aSAlexander Motin oldswitchcnt = -1; 2941ae7a6b38SJeff Roberson for (;;) { 29422c27cb3aSAlexander Motin if (tdq->tdq_load) { 29432c27cb3aSAlexander Motin thread_lock(td); 2944686bcb5cSJeff Roberson mi_switch(SW_VOL | SWT_IDLE); 29452c27cb3aSAlexander Motin } 29462c27cb3aSAlexander Motin switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt; 2947ae7a6b38SJeff Roberson #ifdef SMP 294897e9382dSDon Lewis if (always_steal || switchcnt != oldswitchcnt) { 29492c27cb3aSAlexander Motin oldswitchcnt = switchcnt; 29501690c6c1SJeff Roberson if (tdq_idled(tdq) == 0) 29511690c6c1SJeff Roberson continue; 29522c27cb3aSAlexander Motin } 29531690c6c1SJeff Roberson switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt; 29542fd4047fSAlexander Motin #else 29552fd4047fSAlexander Motin oldswitchcnt = switchcnt; 29562fd4047fSAlexander Motin #endif 29571690c6c1SJeff Roberson /* 29581690c6c1SJeff Roberson * If we're switching very frequently, spin while checking 29591690c6c1SJeff Roberson * for load rather than entering a low power state that 29607b55ab05SJeff Roberson * may require an IPI. However, don't do any busy 29617b55ab05SJeff Roberson * loops while on SMT machines as this simply steals 29627b55ab05SJeff Roberson * cycles from cores doing useful work. 29631690c6c1SJeff Roberson */ 296409c8a4ccSJeff Roberson if (TDQ_IDLESPIN(tdq) && switchcnt > sched_idlespinthresh) { 29651690c6c1SJeff Roberson for (i = 0; i < sched_idlespins; i++) { 29661690c6c1SJeff Roberson if (tdq->tdq_load) 29671690c6c1SJeff Roberson break; 29681690c6c1SJeff Roberson cpu_spinwait(); 29691690c6c1SJeff Roberson } 29701690c6c1SJeff Roberson } 29712c27cb3aSAlexander Motin 29722c27cb3aSAlexander Motin /* If there was context switch during spin, restart it. */ 29736c47aaaeSJeff Roberson switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt; 29742c27cb3aSAlexander Motin if (tdq->tdq_load != 0 || switchcnt != oldswitchcnt) 29752c27cb3aSAlexander Motin continue; 29762c27cb3aSAlexander Motin 29772c27cb3aSAlexander Motin /* Run main MD idle handler. */ 29789f9ad565SAlexander Motin tdq->tdq_cpu_idle = 1; 297979654969SAlexander Motin /* 29806d3f74a1SMark Johnston * Make sure that the tdq_cpu_idle update is globally visible 29816d3f74a1SMark Johnston * before cpu_idle() reads tdq_load. The order is important 29826d3f74a1SMark Johnston * to avoid races with tdq_notify(). 298379654969SAlexander Motin */ 2984e8677f38SKonstantin Belousov atomic_thread_fence_seq_cst(); 298597e9382dSDon Lewis /* 298697e9382dSDon Lewis * Checking for again after the fence picks up assigned 298797e9382dSDon Lewis * threads often enough to make it worthwhile to do so in 298897e9382dSDon Lewis * order to avoid calling cpu_idle(). 298997e9382dSDon Lewis */ 299097e9382dSDon Lewis if (tdq->tdq_load != 0) { 299197e9382dSDon Lewis tdq->tdq_cpu_idle = 0; 299297e9382dSDon Lewis continue; 299397e9382dSDon Lewis } 29942c27cb3aSAlexander Motin cpu_idle(switchcnt * 4 > sched_idlespinthresh); 29959f9ad565SAlexander Motin tdq->tdq_cpu_idle = 0; 29962c27cb3aSAlexander Motin 29972c27cb3aSAlexander Motin /* 29982c27cb3aSAlexander Motin * Account thread-less hardware interrupts and 29992c27cb3aSAlexander Motin * other wakeup reasons equal to context switches. 30002c27cb3aSAlexander Motin */ 30012c27cb3aSAlexander Motin switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt; 30022c27cb3aSAlexander Motin if (switchcnt != oldswitchcnt) 30032c27cb3aSAlexander Motin continue; 30042c27cb3aSAlexander Motin tdq->tdq_switchcnt++; 30052c27cb3aSAlexander Motin oldswitchcnt++; 3006ae7a6b38SJeff Roberson } 3007b41f1452SDavid Xu } 3008e7d50326SJeff Roberson 30097b20fb19SJeff Roberson /* 30106a8ea6d1SKyle Evans * sched_throw_grab() chooses a thread from the queue to switch to 30116a8ea6d1SKyle Evans * next. It returns with the tdq lock dropped in a spinlock section to 30126a8ea6d1SKyle Evans * keep interrupts disabled until the CPU is running in a proper threaded 30136a8ea6d1SKyle Evans * context. 30146a8ea6d1SKyle Evans */ 30156a8ea6d1SKyle Evans static struct thread * 30166a8ea6d1SKyle Evans sched_throw_grab(struct tdq *tdq) 30176a8ea6d1SKyle Evans { 30186a8ea6d1SKyle Evans struct thread *newtd; 30196a8ea6d1SKyle Evans 30206a8ea6d1SKyle Evans newtd = choosethread(); 30216a8ea6d1SKyle Evans spinlock_enter(); 30226a8ea6d1SKyle Evans TDQ_UNLOCK(tdq); 30236a8ea6d1SKyle Evans KASSERT(curthread->td_md.md_spinlock_count == 1, 30246a8ea6d1SKyle Evans ("invalid count %d", curthread->td_md.md_spinlock_count)); 30256a8ea6d1SKyle Evans return (newtd); 30266a8ea6d1SKyle Evans } 30276a8ea6d1SKyle Evans 30286a8ea6d1SKyle Evans /* 30296a8ea6d1SKyle Evans * A CPU is entering for the first time. 30306a8ea6d1SKyle Evans */ 30316a8ea6d1SKyle Evans void 30326a8ea6d1SKyle Evans sched_ap_entry(void) 30336a8ea6d1SKyle Evans { 30346a8ea6d1SKyle Evans struct thread *newtd; 30356a8ea6d1SKyle Evans struct tdq *tdq; 30366a8ea6d1SKyle Evans 30376a8ea6d1SKyle Evans tdq = TDQ_SELF(); 30386a8ea6d1SKyle Evans 30396a8ea6d1SKyle Evans /* This should have been setup in schedinit_ap(). */ 30406a8ea6d1SKyle Evans THREAD_LOCKPTR_ASSERT(curthread, TDQ_LOCKPTR(tdq)); 30416a8ea6d1SKyle Evans 30426a8ea6d1SKyle Evans TDQ_LOCK(tdq); 30436a8ea6d1SKyle Evans /* Correct spinlock nesting. */ 30446a8ea6d1SKyle Evans spinlock_exit(); 30456a8ea6d1SKyle Evans PCPU_SET(switchtime, cpu_ticks()); 30466a8ea6d1SKyle Evans PCPU_SET(switchticks, ticks); 30476a8ea6d1SKyle Evans 30486a8ea6d1SKyle Evans newtd = sched_throw_grab(tdq); 30496a8ea6d1SKyle Evans 30506a8ea6d1SKyle Evans /* doesn't return */ 30516a8ea6d1SKyle Evans cpu_throw(NULL, newtd); 30526a8ea6d1SKyle Evans } 30536a8ea6d1SKyle Evans 30546a8ea6d1SKyle Evans /* 30556a8ea6d1SKyle Evans * A thread is exiting. 30567b20fb19SJeff Roberson */ 30577b20fb19SJeff Roberson void 30587b20fb19SJeff Roberson sched_throw(struct thread *td) 30597b20fb19SJeff Roberson { 306059c68134SJeff Roberson struct thread *newtd; 3061ae7a6b38SJeff Roberson struct tdq *tdq; 3062ae7a6b38SJeff Roberson 3063018ff686SJeff Roberson tdq = TDQ_SELF(); 30646a8ea6d1SKyle Evans 30656a8ea6d1SKyle Evans MPASS(td != NULL); 3066686bcb5cSJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 3067686bcb5cSJeff Roberson THREAD_LOCKPTR_ASSERT(td, TDQ_LOCKPTR(tdq)); 30686a8ea6d1SKyle Evans 30699727e637SJeff Roberson tdq_load_rem(tdq, td); 307092de34dfSJohn Baldwin td->td_lastcpu = td->td_oncpu; 307192de34dfSJohn Baldwin td->td_oncpu = NOCPU; 30721eb13fceSJeff Roberson thread_lock_block(td); 30736a8ea6d1SKyle Evans 30746a8ea6d1SKyle Evans newtd = sched_throw_grab(tdq); 30756a8ea6d1SKyle Evans 30761eb13fceSJeff Roberson /* doesn't return */ 30771eb13fceSJeff Roberson cpu_switch(td, newtd, TDQ_LOCKPTR(tdq)); 30787b20fb19SJeff Roberson } 30797b20fb19SJeff Roberson 3080ae7a6b38SJeff Roberson /* 3081ae7a6b38SJeff Roberson * This is called from fork_exit(). Just acquire the correct locks and 3082ae7a6b38SJeff Roberson * let fork do the rest of the work. 3083ae7a6b38SJeff Roberson */ 30847b20fb19SJeff Roberson void 3085fe54587fSJeff Roberson sched_fork_exit(struct thread *td) 30867b20fb19SJeff Roberson { 3087ae7a6b38SJeff Roberson struct tdq *tdq; 3088ae7a6b38SJeff Roberson int cpuid; 30897b20fb19SJeff Roberson 30907b20fb19SJeff Roberson /* 30917b20fb19SJeff Roberson * Finish setting up thread glue so that it begins execution in a 3092ae7a6b38SJeff Roberson * non-nested critical section with the scheduler lock held. 30937b20fb19SJeff Roberson */ 3094686bcb5cSJeff Roberson KASSERT(curthread->td_md.md_spinlock_count == 1, 3095686bcb5cSJeff Roberson ("invalid count %d", curthread->td_md.md_spinlock_count)); 3096ae7a6b38SJeff Roberson cpuid = PCPU_GET(cpuid); 3097018ff686SJeff Roberson tdq = TDQ_SELF(); 3098686bcb5cSJeff Roberson TDQ_LOCK(tdq); 3099686bcb5cSJeff Roberson spinlock_exit(); 3100ae7a6b38SJeff Roberson MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 3101ae7a6b38SJeff Roberson td->td_oncpu = cpuid; 310228ef18b8SAndriy Gapon KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running", 310328ef18b8SAndriy Gapon "prio:%d", td->td_priority); 310428ef18b8SAndriy Gapon SDT_PROBE0(sched, , , on__cpu); 31057b20fb19SJeff Roberson } 31067b20fb19SJeff Roberson 31078f51ad55SJeff Roberson /* 310815b5c347SGordon Bergling * Create on first use to catch odd startup conditions. 31098f51ad55SJeff Roberson */ 31108f51ad55SJeff Roberson char * 31118f51ad55SJeff Roberson sched_tdname(struct thread *td) 31128f51ad55SJeff Roberson { 31138f51ad55SJeff Roberson #ifdef KTR 31148f51ad55SJeff Roberson struct td_sched *ts; 31158f51ad55SJeff Roberson 311693ccd6bfSKonstantin Belousov ts = td_get_sched(td); 31178f51ad55SJeff Roberson if (ts->ts_name[0] == '\0') 31188f51ad55SJeff Roberson snprintf(ts->ts_name, sizeof(ts->ts_name), 31198f51ad55SJeff Roberson "%s tid %d", td->td_name, td->td_tid); 31208f51ad55SJeff Roberson return (ts->ts_name); 31218f51ad55SJeff Roberson #else 31228f51ad55SJeff Roberson return (td->td_name); 31238f51ad55SJeff Roberson #endif 31248f51ad55SJeff Roberson } 31258f51ad55SJeff Roberson 312644ad5475SJohn Baldwin #ifdef KTR 312744ad5475SJohn Baldwin void 312844ad5475SJohn Baldwin sched_clear_tdname(struct thread *td) 312944ad5475SJohn Baldwin { 313044ad5475SJohn Baldwin struct td_sched *ts; 313144ad5475SJohn Baldwin 313293ccd6bfSKonstantin Belousov ts = td_get_sched(td); 313344ad5475SJohn Baldwin ts->ts_name[0] = '\0'; 313444ad5475SJohn Baldwin } 313544ad5475SJohn Baldwin #endif 313644ad5475SJohn Baldwin 313707095abfSIvan Voras #ifdef SMP 313807095abfSIvan Voras 313907095abfSIvan Voras /* 314007095abfSIvan Voras * Build the CPU topology dump string. Is recursively called to collect 314107095abfSIvan Voras * the topology tree. 314207095abfSIvan Voras */ 314307095abfSIvan Voras static int 314407095abfSIvan Voras sysctl_kern_sched_topology_spec_internal(struct sbuf *sb, struct cpu_group *cg, 314507095abfSIvan Voras int indent) 314607095abfSIvan Voras { 314771a19bdcSAttilio Rao char cpusetbuf[CPUSETBUFSIZ]; 314807095abfSIvan Voras int i, first; 314907095abfSIvan Voras 315007095abfSIvan Voras sbuf_printf(sb, "%*s<group level=\"%d\" cache-level=\"%d\">\n", indent, 315119b8a6dbSAndriy Gapon "", 1 + indent / 2, cg->cg_level); 315271a19bdcSAttilio Rao sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"%s\">", indent, "", 315371a19bdcSAttilio Rao cg->cg_count, cpusetobj_strprint(cpusetbuf, &cg->cg_mask)); 315407095abfSIvan Voras first = TRUE; 3155aefe0a8cSAlexander Motin for (i = cg->cg_first; i <= cg->cg_last; i++) { 315671a19bdcSAttilio Rao if (CPU_ISSET(i, &cg->cg_mask)) { 315707095abfSIvan Voras if (!first) 315807095abfSIvan Voras sbuf_printf(sb, ", "); 315907095abfSIvan Voras else 316007095abfSIvan Voras first = FALSE; 316107095abfSIvan Voras sbuf_printf(sb, "%d", i); 316207095abfSIvan Voras } 316307095abfSIvan Voras } 316407095abfSIvan Voras sbuf_printf(sb, "</cpu>\n"); 316507095abfSIvan Voras 316607095abfSIvan Voras if (cg->cg_flags != 0) { 3167611daf7eSIvan Voras sbuf_printf(sb, "%*s <flags>", indent, ""); 316807095abfSIvan Voras if ((cg->cg_flags & CG_FLAG_HTT) != 0) 31695368befbSIvan Voras sbuf_printf(sb, "<flag name=\"HTT\">HTT group</flag>"); 3170a401f2d0SIvan Voras if ((cg->cg_flags & CG_FLAG_THREAD) != 0) 3171a401f2d0SIvan Voras sbuf_printf(sb, "<flag name=\"THREAD\">THREAD group</flag>"); 31727b55ab05SJeff Roberson if ((cg->cg_flags & CG_FLAG_SMT) != 0) 3173a401f2d0SIvan Voras sbuf_printf(sb, "<flag name=\"SMT\">SMT group</flag>"); 3174ef50d5fbSAlexander Motin if ((cg->cg_flags & CG_FLAG_NODE) != 0) 3175ef50d5fbSAlexander Motin sbuf_printf(sb, "<flag name=\"NODE\">NUMA node</flag>"); 317607095abfSIvan Voras sbuf_printf(sb, "</flags>\n"); 3177611daf7eSIvan Voras } 317807095abfSIvan Voras 317907095abfSIvan Voras if (cg->cg_children > 0) { 318007095abfSIvan Voras sbuf_printf(sb, "%*s <children>\n", indent, ""); 318107095abfSIvan Voras for (i = 0; i < cg->cg_children; i++) 318207095abfSIvan Voras sysctl_kern_sched_topology_spec_internal(sb, 318307095abfSIvan Voras &cg->cg_child[i], indent+2); 318407095abfSIvan Voras sbuf_printf(sb, "%*s </children>\n", indent, ""); 318507095abfSIvan Voras } 318607095abfSIvan Voras sbuf_printf(sb, "%*s</group>\n", indent, ""); 318707095abfSIvan Voras return (0); 318807095abfSIvan Voras } 318907095abfSIvan Voras 319007095abfSIvan Voras /* 319107095abfSIvan Voras * Sysctl handler for retrieving topology dump. It's a wrapper for 319207095abfSIvan Voras * the recursive sysctl_kern_smp_topology_spec_internal(). 319307095abfSIvan Voras */ 319407095abfSIvan Voras static int 319507095abfSIvan Voras sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS) 319607095abfSIvan Voras { 319707095abfSIvan Voras struct sbuf *topo; 319807095abfSIvan Voras int err; 319907095abfSIvan Voras 320007095abfSIvan Voras KASSERT(cpu_top != NULL, ("cpu_top isn't initialized")); 320107095abfSIvan Voras 3202b97fa22cSIan Lepore topo = sbuf_new_for_sysctl(NULL, NULL, 512, req); 320307095abfSIvan Voras if (topo == NULL) 320407095abfSIvan Voras return (ENOMEM); 320507095abfSIvan Voras 320607095abfSIvan Voras sbuf_printf(topo, "<groups>\n"); 320707095abfSIvan Voras err = sysctl_kern_sched_topology_spec_internal(topo, cpu_top, 1); 320807095abfSIvan Voras sbuf_printf(topo, "</groups>\n"); 320907095abfSIvan Voras 321007095abfSIvan Voras if (err == 0) { 3211b97fa22cSIan Lepore err = sbuf_finish(topo); 321207095abfSIvan Voras } 321307095abfSIvan Voras sbuf_delete(topo); 321407095abfSIvan Voras return (err); 321507095abfSIvan Voras } 3216b67cc292SDavid Xu 321707095abfSIvan Voras #endif 321807095abfSIvan Voras 3219579895dfSAlexander Motin static int 3220579895dfSAlexander Motin sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 3221579895dfSAlexander Motin { 3222579895dfSAlexander Motin int error, new_val, period; 3223579895dfSAlexander Motin 3224579895dfSAlexander Motin period = 1000000 / realstathz; 3225579895dfSAlexander Motin new_val = period * sched_slice; 3226579895dfSAlexander Motin error = sysctl_handle_int(oidp, &new_val, 0, req); 3227579895dfSAlexander Motin if (error != 0 || req->newptr == NULL) 3228579895dfSAlexander Motin return (error); 3229579895dfSAlexander Motin if (new_val <= 0) 3230579895dfSAlexander Motin return (EINVAL); 323137f4e025SAlexander Motin sched_slice = imax(1, (new_val + period / 2) / period); 32325e5c3873SJeff Roberson sched_slice_min = sched_slice / SCHED_SLICE_MIN_DIVISOR; 323337f4e025SAlexander Motin hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) / 323437f4e025SAlexander Motin realstathz); 3235579895dfSAlexander Motin return (0); 3236579895dfSAlexander Motin } 3237579895dfSAlexander Motin 32387029da5cSPawel Biernacki SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 32397029da5cSPawel Biernacki "Scheduler"); 3240ae7a6b38SJeff Roberson SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0, 3241e7d50326SJeff Roberson "Scheduler name"); 32427029da5cSPawel Biernacki SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, 32437029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, 32447029da5cSPawel Biernacki sysctl_kern_quantum, "I", 324537f4e025SAlexander Motin "Quantum for timeshare threads in microseconds"); 3246ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, 324737f4e025SAlexander Motin "Quantum for timeshare threads in stathz ticks"); 32481c119e17SAlexander Motin SYSCTL_UINT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, 3249ae7a6b38SJeff Roberson "Interactivity score threshold"); 325037f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, 325137f4e025SAlexander Motin &preempt_thresh, 0, 325237f4e025SAlexander Motin "Maximal (lowest) priority for preemption"); 325337f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost, 0, 325437f4e025SAlexander Motin "Assign static kernel priorities to sleeping threads"); 325537f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, idlespins, CTLFLAG_RW, &sched_idlespins, 0, 325637f4e025SAlexander Motin "Number of times idle thread will spin waiting for new work"); 325737f4e025SAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, idlespinthresh, CTLFLAG_RW, 325837f4e025SAlexander Motin &sched_idlespinthresh, 0, 325937f4e025SAlexander Motin "Threshold before we will permit idle thread spinning"); 32607b8bfa0dSJeff Roberson #ifdef SMP 3261ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0, 3262ae7a6b38SJeff Roberson "Number of hz ticks to keep thread affinity for"); 3263ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0, 3264ae7a6b38SJeff Roberson "Enables the long-term load balancer"); 32657fcf154aSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW, 32667fcf154aSJeff Roberson &balance_interval, 0, 3267579895dfSAlexander Motin "Average period in stathz ticks to run the long-term balancer"); 3268ae7a6b38SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0, 3269ae7a6b38SJeff Roberson "Attempts to steal work from other cores before idling"); 327028994a58SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0, 327137f4e025SAlexander Motin "Minimum load on remote CPU before we'll steal"); 327297e9382dSDon Lewis SYSCTL_INT(_kern_sched, OID_AUTO, trysteal_limit, CTLFLAG_RW, &trysteal_limit, 327397e9382dSDon Lewis 0, "Topological distance limit for stealing threads in sched_switch()"); 327497e9382dSDon Lewis SYSCTL_INT(_kern_sched, OID_AUTO, always_steal, CTLFLAG_RW, &always_steal, 0, 327597e9382dSDon Lewis "Always run the stealer from the idle thread"); 327607095abfSIvan Voras SYSCTL_PROC(_kern_sched, OID_AUTO, topology_spec, CTLTYPE_STRING | 3277c69a1a50SMateusz Guzik CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_kern_sched_topology_spec, "A", 327807095abfSIvan Voras "XML dump of detected CPU topology"); 32797b8bfa0dSJeff Roberson #endif 3280e7d50326SJeff Roberson 328154b0e65fSJeff Roberson /* ps compat. All cpu percentages from ULE are weighted. */ 3282a5423ea3SJeff Roberson static int ccpu = 0; 3283b05ca429SPawel Biernacki SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, 3284b05ca429SPawel Biernacki "Decay factor used for updating %CPU in 4BSD scheduler"); 3285