1b43179fbSJeff Roberson /*- 2b43179fbSJeff Roberson * Copyright (c) 1982, 1986, 1990, 1991, 1993 3b43179fbSJeff Roberson * The Regents of the University of California. All rights reserved. 4b43179fbSJeff Roberson * (c) UNIX System Laboratories, Inc. 5b43179fbSJeff Roberson * All or some portions of this file are derived from material licensed 6b43179fbSJeff Roberson * to the University of California by American Telephone and Telegraph 7b43179fbSJeff Roberson * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8b43179fbSJeff Roberson * the permission of UNIX System Laboratories, Inc. 9b43179fbSJeff Roberson * 10b43179fbSJeff Roberson * Redistribution and use in source and binary forms, with or without 11b43179fbSJeff Roberson * modification, are permitted provided that the following conditions 12b43179fbSJeff Roberson * are met: 13b43179fbSJeff Roberson * 1. Redistributions of source code must retain the above copyright 14b43179fbSJeff Roberson * notice, this list of conditions and the following disclaimer. 15b43179fbSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 16b43179fbSJeff Roberson * notice, this list of conditions and the following disclaimer in the 17b43179fbSJeff Roberson * documentation and/or other materials provided with the distribution. 18b43179fbSJeff Roberson * 4. Neither the name of the University nor the names of its contributors 19b43179fbSJeff Roberson * may be used to endorse or promote products derived from this software 20b43179fbSJeff Roberson * without specific prior written permission. 21b43179fbSJeff Roberson * 22b43179fbSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23b43179fbSJeff Roberson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24b43179fbSJeff Roberson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25b43179fbSJeff Roberson * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26b43179fbSJeff Roberson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27b43179fbSJeff Roberson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28b43179fbSJeff Roberson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29b43179fbSJeff Roberson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30b43179fbSJeff Roberson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31b43179fbSJeff Roberson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32b43179fbSJeff Roberson * SUCH DAMAGE. 33b43179fbSJeff Roberson */ 34b43179fbSJeff Roberson 35677b542eSDavid E. O'Brien #include <sys/cdefs.h> 36677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 37677b542eSDavid E. O'Brien 384da0d332SPeter Wemm #include "opt_hwpmc_hooks.h" 39a564bfc7SJeff Roberson #include "opt_sched.h" 404da0d332SPeter Wemm 41b43179fbSJeff Roberson #include <sys/param.h> 42b43179fbSJeff Roberson #include <sys/systm.h> 43f5a3ef99SMarcel Moolenaar #include <sys/cpuset.h> 44b43179fbSJeff Roberson #include <sys/kernel.h> 45b43179fbSJeff Roberson #include <sys/ktr.h> 46b43179fbSJeff Roberson #include <sys/lock.h> 47c55bbb6cSJohn Baldwin #include <sys/kthread.h> 48b43179fbSJeff Roberson #include <sys/mutex.h> 49b43179fbSJeff Roberson #include <sys/proc.h> 50b43179fbSJeff Roberson #include <sys/resourcevar.h> 51b43179fbSJeff Roberson #include <sys/sched.h> 52b3e9e682SRyan Stone #include <sys/sdt.h> 53b43179fbSJeff Roberson #include <sys/smp.h> 54b43179fbSJeff Roberson #include <sys/sysctl.h> 55b43179fbSJeff Roberson #include <sys/sx.h> 56f5c157d9SJohn Baldwin #include <sys/turnstile.h> 573db720fdSDavid Xu #include <sys/umtx.h> 582e4db89cSDavid E. O'Brien #include <machine/pcb.h> 59293968d8SJulian Elischer #include <machine/smp.h> 60b43179fbSJeff Roberson 61ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 62ebccf1e3SJoseph Koshy #include <sys/pmckern.h> 63ebccf1e3SJoseph Koshy #endif 64ebccf1e3SJoseph Koshy 656f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS 666f5f25e5SJohn Birrell #include <sys/dtrace_bsd.h> 676f5f25e5SJohn Birrell int dtrace_vtime_active; 686f5f25e5SJohn Birrell dtrace_vtime_switch_func_t dtrace_vtime_switch_func; 696f5f25e5SJohn Birrell #endif 706f5f25e5SJohn Birrell 7106439a04SJeff Roberson /* 7206439a04SJeff Roberson * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in 7306439a04SJeff Roberson * the range 100-256 Hz (approximately). 7406439a04SJeff Roberson */ 7506439a04SJeff Roberson #define ESTCPULIM(e) \ 7606439a04SJeff Roberson min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \ 7706439a04SJeff Roberson RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1) 78b698380fSBruce Evans #ifdef SMP 79b698380fSBruce Evans #define INVERSE_ESTCPU_WEIGHT (8 * smp_cpus) 80b698380fSBruce Evans #else 8106439a04SJeff Roberson #define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */ 82b698380fSBruce Evans #endif 8306439a04SJeff Roberson #define NICE_WEIGHT 1 /* Priorities per nice level. */ 8406439a04SJeff Roberson 850d2cf837SJeff Roberson #define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX))) 868f51ad55SJeff Roberson 878460a577SJohn Birrell /* 888460a577SJohn Birrell * The schedulable entity that runs a context. 89ad1e7d28SJulian Elischer * This is an extension to the thread structure and is tailored to 90ad1e7d28SJulian Elischer * the requirements of this scheduler 918460a577SJohn Birrell */ 92ad1e7d28SJulian Elischer struct td_sched { 93ad1e7d28SJulian Elischer fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */ 94ad1e7d28SJulian Elischer int ts_cpticks; /* (j) Ticks of cpu time. */ 9554b0e65fSJeff Roberson int ts_slptime; /* (j) Seconds !RUNNING. */ 9648317e9eSAlexander Motin int ts_slice; /* Remaining part of time slice. */ 97f200843bSJohn Baldwin int ts_flags; 98ad1e7d28SJulian Elischer struct runq *ts_runq; /* runq the thread is currently on */ 998f51ad55SJeff Roberson #ifdef KTR 1008f51ad55SJeff Roberson char ts_name[TS_NAME_LEN]; 1018f51ad55SJeff Roberson #endif 102bcb06d59SJeff Roberson }; 103ed062c8dSJulian Elischer 104ed062c8dSJulian Elischer /* flags kept in td_flags */ 105ad1e7d28SJulian Elischer #define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */ 1069727e637SJeff Roberson #define TDF_BOUND TDF_SCHED1 /* Bound to one CPU. */ 1073d7f4117SAlexander Motin #define TDF_SLICEEND TDF_SCHED2 /* Thread time slice is over. */ 108bcb06d59SJeff Roberson 109f200843bSJohn Baldwin /* flags kept in ts_flags */ 110f200843bSJohn Baldwin #define TSF_AFFINITY 0x0001 /* Has a non-"full" CPU set. */ 111f200843bSJohn Baldwin 112ad1e7d28SJulian Elischer #define SKE_RUNQ_PCPU(ts) \ 113ad1e7d28SJulian Elischer ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq) 114e17c57b1SJeff Roberson 115f200843bSJohn Baldwin #define THREAD_CAN_SCHED(td, cpu) \ 116f200843bSJohn Baldwin CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask) 117f200843bSJohn Baldwin 118ad1e7d28SJulian Elischer static struct td_sched td_sched0; 1196ea38de8SJeff Roberson struct mtx sched_lock; 120b43179fbSJeff Roberson 121579895dfSAlexander Motin static int realstathz = 127; /* stathz is sometimes 0 and run off of hz. */ 122ca59f152SJeff Roberson static int sched_tdcnt; /* Total runnable threads in the system. */ 123579895dfSAlexander Motin static int sched_slice = 12; /* Thread run time before rescheduling. */ 124b43179fbSJeff Roberson 125e17c57b1SJeff Roberson static void setup_runqs(void); 126c55bbb6cSJohn Baldwin static void schedcpu(void); 127e17c57b1SJeff Roberson static void schedcpu_thread(void); 128f5c157d9SJohn Baldwin static void sched_priority(struct thread *td, u_char prio); 129b43179fbSJeff Roberson static void sched_setup(void *dummy); 130b43179fbSJeff Roberson static void maybe_resched(struct thread *td); 1318460a577SJohn Birrell static void updatepri(struct thread *td); 1328460a577SJohn Birrell static void resetpriority(struct thread *td); 1338460a577SJohn Birrell static void resetpriority_thread(struct thread *td); 13400b0483dSJulian Elischer #ifdef SMP 135f200843bSJohn Baldwin static int sched_pickcpu(struct thread *td); 13682a1dfc1SJulian Elischer static int forward_wakeup(int cpunum); 1378aa3d7ffSJohn Baldwin static void kick_other_cpu(int pri, int cpuid); 13800b0483dSJulian Elischer #endif 139b43179fbSJeff Roberson 140e17c57b1SJeff Roberson static struct kproc_desc sched_kp = { 141e17c57b1SJeff Roberson "schedcpu", 142e17c57b1SJeff Roberson schedcpu_thread, 143e17c57b1SJeff Roberson NULL 144e17c57b1SJeff Roberson }; 145785797c3SAndriy Gapon SYSINIT(schedcpu, SI_SUB_LAST, SI_ORDER_FIRST, kproc_start, 146237fdd78SRobert Watson &sched_kp); 147237fdd78SRobert Watson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL); 148b43179fbSJeff Roberson 14948317e9eSAlexander Motin static void sched_initticks(void *dummy); 15048317e9eSAlexander Motin SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, 15148317e9eSAlexander Motin NULL); 15248317e9eSAlexander Motin 153b43179fbSJeff Roberson /* 154b43179fbSJeff Roberson * Global run queue. 155b43179fbSJeff Roberson */ 156b43179fbSJeff Roberson static struct runq runq; 157e17c57b1SJeff Roberson 158e17c57b1SJeff Roberson #ifdef SMP 159e17c57b1SJeff Roberson /* 160e17c57b1SJeff Roberson * Per-CPU run queues 161e17c57b1SJeff Roberson */ 162e17c57b1SJeff Roberson static struct runq runq_pcpu[MAXCPU]; 163f200843bSJohn Baldwin long runq_length[MAXCPU]; 1643121f534SAttilio Rao 16571a19bdcSAttilio Rao static cpuset_t idle_cpus_mask; 166e17c57b1SJeff Roberson #endif 167e17c57b1SJeff Roberson 168b722ad00SAlexander Motin struct pcpuidlestat { 169b722ad00SAlexander Motin u_int idlecalls; 170b722ad00SAlexander Motin u_int oldidlecalls; 171b722ad00SAlexander Motin }; 1723e288e62SDimitry Andric static DPCPU_DEFINE(struct pcpuidlestat, idlestat); 173b722ad00SAlexander Motin 174e17c57b1SJeff Roberson static void 175e17c57b1SJeff Roberson setup_runqs(void) 176e17c57b1SJeff Roberson { 177e17c57b1SJeff Roberson #ifdef SMP 178e17c57b1SJeff Roberson int i; 179e17c57b1SJeff Roberson 180e17c57b1SJeff Roberson for (i = 0; i < MAXCPU; ++i) 181e17c57b1SJeff Roberson runq_init(&runq_pcpu[i]); 182e17c57b1SJeff Roberson #endif 183e17c57b1SJeff Roberson 184e17c57b1SJeff Roberson runq_init(&runq); 185e17c57b1SJeff Roberson } 186b43179fbSJeff Roberson 187579895dfSAlexander Motin static int 188579895dfSAlexander Motin sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 189579895dfSAlexander Motin { 190579895dfSAlexander Motin int error, new_val, period; 191579895dfSAlexander Motin 192579895dfSAlexander Motin period = 1000000 / realstathz; 193579895dfSAlexander Motin new_val = period * sched_slice; 194579895dfSAlexander Motin error = sysctl_handle_int(oidp, &new_val, 0, req); 195579895dfSAlexander Motin if (error != 0 || req->newptr == NULL) 196579895dfSAlexander Motin return (error); 197579895dfSAlexander Motin if (new_val <= 0) 198579895dfSAlexander Motin return (EINVAL); 19937f4e025SAlexander Motin sched_slice = imax(1, (new_val + period / 2) / period); 20037f4e025SAlexander Motin hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) / 20137f4e025SAlexander Motin realstathz); 202579895dfSAlexander Motin return (0); 203579895dfSAlexander Motin } 204579895dfSAlexander Motin 205e038d354SScott Long SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler"); 206dc095794SScott Long 207e038d354SScott Long SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0, 208e038d354SScott Long "Scheduler name"); 209579895dfSAlexander Motin SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW, 210579895dfSAlexander Motin NULL, 0, sysctl_kern_quantum, "I", 21137f4e025SAlexander Motin "Quantum for timeshare threads in microseconds"); 21248317e9eSAlexander Motin SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, 21337f4e025SAlexander Motin "Quantum for timeshare threads in stathz ticks"); 21437c28a02SJulian Elischer #ifdef SMP 21582a1dfc1SJulian Elischer /* Enable forwarding of wakeups to all other cpus */ 2166472ac3dSEd Schouten static SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, 2176472ac3dSEd Schouten "Kernel SMP"); 21882a1dfc1SJulian Elischer 219a90f3f25SJeff Roberson static int runq_fuzz = 1; 220a90f3f25SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, ""); 221a90f3f25SJeff Roberson 222bce73aedSJulian Elischer static int forward_wakeup_enabled = 1; 22382a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW, 22482a1dfc1SJulian Elischer &forward_wakeup_enabled, 0, 22582a1dfc1SJulian Elischer "Forwarding of wakeup to idle CPUs"); 22682a1dfc1SJulian Elischer 22782a1dfc1SJulian Elischer static int forward_wakeups_requested = 0; 22882a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD, 22982a1dfc1SJulian Elischer &forward_wakeups_requested, 0, 23082a1dfc1SJulian Elischer "Requests for Forwarding of wakeup to idle CPUs"); 23182a1dfc1SJulian Elischer 23282a1dfc1SJulian Elischer static int forward_wakeups_delivered = 0; 23382a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD, 23482a1dfc1SJulian Elischer &forward_wakeups_delivered, 0, 23582a1dfc1SJulian Elischer "Completed Forwarding of wakeup to idle CPUs"); 23682a1dfc1SJulian Elischer 237bce73aedSJulian Elischer static int forward_wakeup_use_mask = 1; 23882a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW, 23982a1dfc1SJulian Elischer &forward_wakeup_use_mask, 0, 24082a1dfc1SJulian Elischer "Use the mask of idle cpus"); 24182a1dfc1SJulian Elischer 24282a1dfc1SJulian Elischer static int forward_wakeup_use_loop = 0; 24382a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW, 24482a1dfc1SJulian Elischer &forward_wakeup_use_loop, 0, 24582a1dfc1SJulian Elischer "Use a loop to find idle cpus"); 24682a1dfc1SJulian Elischer 24737c28a02SJulian Elischer #endif 248ad1e7d28SJulian Elischer #if 0 2493389af30SJulian Elischer static int sched_followon = 0; 2503389af30SJulian Elischer SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW, 2513389af30SJulian Elischer &sched_followon, 0, 2523389af30SJulian Elischer "allow threads to share a quantum"); 2538460a577SJohn Birrell #endif 25482a1dfc1SJulian Elischer 255b3e9e682SRyan Stone SDT_PROVIDER_DEFINE(sched); 256b3e9e682SRyan Stone 257*d9fae5abSAndriy Gapon SDT_PROBE_DEFINE3(sched, , , change__pri, "struct thread *", 258b3e9e682SRyan Stone "struct proc *", "uint8_t"); 259*d9fae5abSAndriy Gapon SDT_PROBE_DEFINE3(sched, , , dequeue, "struct thread *", 260b3e9e682SRyan Stone "struct proc *", "void *"); 261*d9fae5abSAndriy Gapon SDT_PROBE_DEFINE4(sched, , , enqueue, "struct thread *", 262b3e9e682SRyan Stone "struct proc *", "void *", "int"); 263*d9fae5abSAndriy Gapon SDT_PROBE_DEFINE4(sched, , , lend__pri, "struct thread *", 264b3e9e682SRyan Stone "struct proc *", "uint8_t", "struct thread *"); 265*d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , load__change, "int", "int"); 266*d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , off__cpu, "struct thread *", 267b3e9e682SRyan Stone "struct proc *"); 268*d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(sched, , , on__cpu); 269*d9fae5abSAndriy Gapon SDT_PROBE_DEFINE(sched, , , remain__cpu); 270*d9fae5abSAndriy Gapon SDT_PROBE_DEFINE2(sched, , , surrender, "struct thread *", 271b3e9e682SRyan Stone "struct proc *"); 272b3e9e682SRyan Stone 273907bdbc2SJeff Roberson static __inline void 274907bdbc2SJeff Roberson sched_load_add(void) 275907bdbc2SJeff Roberson { 2768f51ad55SJeff Roberson 277907bdbc2SJeff Roberson sched_tdcnt++; 2788f51ad55SJeff Roberson KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt); 279*d9fae5abSAndriy Gapon SDT_PROBE2(sched, , , load__change, NOCPU, sched_tdcnt); 280907bdbc2SJeff Roberson } 281907bdbc2SJeff Roberson 282907bdbc2SJeff Roberson static __inline void 283907bdbc2SJeff Roberson sched_load_rem(void) 284907bdbc2SJeff Roberson { 2858f51ad55SJeff Roberson 286907bdbc2SJeff Roberson sched_tdcnt--; 2878f51ad55SJeff Roberson KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt); 288*d9fae5abSAndriy Gapon SDT_PROBE2(sched, , , load__change, NOCPU, sched_tdcnt); 289907bdbc2SJeff Roberson } 290b43179fbSJeff Roberson /* 291b43179fbSJeff Roberson * Arrange to reschedule if necessary, taking the priorities and 292b43179fbSJeff Roberson * schedulers into account. 293b43179fbSJeff Roberson */ 294b43179fbSJeff Roberson static void 295b43179fbSJeff Roberson maybe_resched(struct thread *td) 296b43179fbSJeff Roberson { 297b43179fbSJeff Roberson 2987b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 299ed062c8dSJulian Elischer if (td->td_priority < curthread->td_priority) 3004a338afdSJulian Elischer curthread->td_flags |= TDF_NEEDRESCHED; 301b43179fbSJeff Roberson } 302b43179fbSJeff Roberson 303b43179fbSJeff Roberson /* 304a90f3f25SJeff Roberson * This function is called when a thread is about to be put on run queue 305a90f3f25SJeff Roberson * because it has been made runnable or its priority has been adjusted. It 306a90f3f25SJeff Roberson * determines if the new thread should be immediately preempted to. If so, 307a90f3f25SJeff Roberson * it switches to it and eventually returns true. If not, it returns false 308a90f3f25SJeff Roberson * so that the caller may place the thread on an appropriate run queue. 309a90f3f25SJeff Roberson */ 310a90f3f25SJeff Roberson int 311a90f3f25SJeff Roberson maybe_preempt(struct thread *td) 312a90f3f25SJeff Roberson { 313a90f3f25SJeff Roberson #ifdef PREEMPTION 314a90f3f25SJeff Roberson struct thread *ctd; 315a90f3f25SJeff Roberson int cpri, pri; 316a90f3f25SJeff Roberson 317a90f3f25SJeff Roberson /* 318a90f3f25SJeff Roberson * The new thread should not preempt the current thread if any of the 319a90f3f25SJeff Roberson * following conditions are true: 320a90f3f25SJeff Roberson * 321a90f3f25SJeff Roberson * - The kernel is in the throes of crashing (panicstr). 322a90f3f25SJeff Roberson * - The current thread has a higher (numerically lower) or 323a90f3f25SJeff Roberson * equivalent priority. Note that this prevents curthread from 324a90f3f25SJeff Roberson * trying to preempt to itself. 325a90f3f25SJeff Roberson * - It is too early in the boot for context switches (cold is set). 326a90f3f25SJeff Roberson * - The current thread has an inhibitor set or is in the process of 327a90f3f25SJeff Roberson * exiting. In this case, the current thread is about to switch 328a90f3f25SJeff Roberson * out anyways, so there's no point in preempting. If we did, 329a90f3f25SJeff Roberson * the current thread would not be properly resumed as well, so 330a90f3f25SJeff Roberson * just avoid that whole landmine. 331a90f3f25SJeff Roberson * - If the new thread's priority is not a realtime priority and 332a90f3f25SJeff Roberson * the current thread's priority is not an idle priority and 333a90f3f25SJeff Roberson * FULL_PREEMPTION is disabled. 334a90f3f25SJeff Roberson * 335a90f3f25SJeff Roberson * If all of these conditions are false, but the current thread is in 336a90f3f25SJeff Roberson * a nested critical section, then we have to defer the preemption 337a90f3f25SJeff Roberson * until we exit the critical section. Otherwise, switch immediately 338a90f3f25SJeff Roberson * to the new thread. 339a90f3f25SJeff Roberson */ 340a90f3f25SJeff Roberson ctd = curthread; 341a90f3f25SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 342a90f3f25SJeff Roberson KASSERT((td->td_inhibitors == 0), 343a90f3f25SJeff Roberson ("maybe_preempt: trying to run inhibited thread")); 344a90f3f25SJeff Roberson pri = td->td_priority; 345a90f3f25SJeff Roberson cpri = ctd->td_priority; 346a90f3f25SJeff Roberson if (panicstr != NULL || pri >= cpri || cold /* || dumping */ || 347a90f3f25SJeff Roberson TD_IS_INHIBITED(ctd)) 348a90f3f25SJeff Roberson return (0); 349a90f3f25SJeff Roberson #ifndef FULL_PREEMPTION 350a90f3f25SJeff Roberson if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE) 351a90f3f25SJeff Roberson return (0); 352a90f3f25SJeff Roberson #endif 353a90f3f25SJeff Roberson 354a90f3f25SJeff Roberson if (ctd->td_critnest > 1) { 355a90f3f25SJeff Roberson CTR1(KTR_PROC, "maybe_preempt: in critical section %d", 356a90f3f25SJeff Roberson ctd->td_critnest); 357a90f3f25SJeff Roberson ctd->td_owepreempt = 1; 358a90f3f25SJeff Roberson return (0); 359a90f3f25SJeff Roberson } 360a90f3f25SJeff Roberson /* 361a90f3f25SJeff Roberson * Thread is runnable but not yet put on system run queue. 362a90f3f25SJeff Roberson */ 363a90f3f25SJeff Roberson MPASS(ctd->td_lock == td->td_lock); 364a90f3f25SJeff Roberson MPASS(TD_ON_RUNQ(td)); 365a90f3f25SJeff Roberson TD_SET_RUNNING(td); 366a90f3f25SJeff Roberson CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, 367a90f3f25SJeff Roberson td->td_proc->p_pid, td->td_name); 3688df78c41SJeff Roberson mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, td); 369a90f3f25SJeff Roberson /* 370a90f3f25SJeff Roberson * td's lock pointer may have changed. We have to return with it 371a90f3f25SJeff Roberson * locked. 372a90f3f25SJeff Roberson */ 373a90f3f25SJeff Roberson spinlock_enter(); 374a90f3f25SJeff Roberson thread_unlock(ctd); 375a90f3f25SJeff Roberson thread_lock(td); 376a90f3f25SJeff Roberson spinlock_exit(); 377a90f3f25SJeff Roberson return (1); 378a90f3f25SJeff Roberson #else 379a90f3f25SJeff Roberson return (0); 380a90f3f25SJeff Roberson #endif 381a90f3f25SJeff Roberson } 382a90f3f25SJeff Roberson 383a90f3f25SJeff Roberson /* 384b43179fbSJeff Roberson * Constants for digital decay and forget: 3858460a577SJohn Birrell * 90% of (td_estcpu) usage in 5 * loadav time 386ad1e7d28SJulian Elischer * 95% of (ts_pctcpu) usage in 60 seconds (load insensitive) 387b43179fbSJeff Roberson * Note that, as ps(1) mentions, this can let percentages 388b43179fbSJeff Roberson * total over 100% (I've seen 137.9% for 3 processes). 389b43179fbSJeff Roberson * 3908460a577SJohn Birrell * Note that schedclock() updates td_estcpu and p_cpticks asynchronously. 391b43179fbSJeff Roberson * 3928460a577SJohn Birrell * We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds. 393b43179fbSJeff Roberson * That is, the system wants to compute a value of decay such 394b43179fbSJeff Roberson * that the following for loop: 395b43179fbSJeff Roberson * for (i = 0; i < (5 * loadavg); i++) 3968460a577SJohn Birrell * td_estcpu *= decay; 397b43179fbSJeff Roberson * will compute 3988460a577SJohn Birrell * td_estcpu *= 0.1; 399b43179fbSJeff Roberson * for all values of loadavg: 400b43179fbSJeff Roberson * 401b43179fbSJeff Roberson * Mathematically this loop can be expressed by saying: 402b43179fbSJeff Roberson * decay ** (5 * loadavg) ~= .1 403b43179fbSJeff Roberson * 404b43179fbSJeff Roberson * The system computes decay as: 405b43179fbSJeff Roberson * decay = (2 * loadavg) / (2 * loadavg + 1) 406b43179fbSJeff Roberson * 407b43179fbSJeff Roberson * We wish to prove that the system's computation of decay 408b43179fbSJeff Roberson * will always fulfill the equation: 409b43179fbSJeff Roberson * decay ** (5 * loadavg) ~= .1 410b43179fbSJeff Roberson * 411b43179fbSJeff Roberson * If we compute b as: 412b43179fbSJeff Roberson * b = 2 * loadavg 413b43179fbSJeff Roberson * then 414b43179fbSJeff Roberson * decay = b / (b + 1) 415b43179fbSJeff Roberson * 416b43179fbSJeff Roberson * We now need to prove two things: 417b43179fbSJeff Roberson * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 418b43179fbSJeff Roberson * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 419b43179fbSJeff Roberson * 420b43179fbSJeff Roberson * Facts: 421b43179fbSJeff Roberson * For x close to zero, exp(x) =~ 1 + x, since 422b43179fbSJeff Roberson * exp(x) = 0! + x**1/1! + x**2/2! + ... . 423b43179fbSJeff Roberson * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 424b43179fbSJeff Roberson * For x close to zero, ln(1+x) =~ x, since 425b43179fbSJeff Roberson * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 426b43179fbSJeff Roberson * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 427b43179fbSJeff Roberson * ln(.1) =~ -2.30 428b43179fbSJeff Roberson * 429b43179fbSJeff Roberson * Proof of (1): 430b43179fbSJeff Roberson * Solve (factor)**(power) =~ .1 given power (5*loadav): 431b43179fbSJeff Roberson * solving for factor, 432b43179fbSJeff Roberson * ln(factor) =~ (-2.30/5*loadav), or 433b43179fbSJeff Roberson * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 434b43179fbSJeff Roberson * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 435b43179fbSJeff Roberson * 436b43179fbSJeff Roberson * Proof of (2): 437b43179fbSJeff Roberson * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 438b43179fbSJeff Roberson * solving for power, 439b43179fbSJeff Roberson * power*ln(b/(b+1)) =~ -2.30, or 440b43179fbSJeff Roberson * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 441b43179fbSJeff Roberson * 442b43179fbSJeff Roberson * Actual power values for the implemented algorithm are as follows: 443b43179fbSJeff Roberson * loadav: 1 2 3 4 444b43179fbSJeff Roberson * power: 5.68 10.32 14.94 19.55 445b43179fbSJeff Roberson */ 446b43179fbSJeff Roberson 447b43179fbSJeff Roberson /* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 448b43179fbSJeff Roberson #define loadfactor(loadav) (2 * (loadav)) 449b43179fbSJeff Roberson #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 450b43179fbSJeff Roberson 451ad1e7d28SJulian Elischer /* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 452b43179fbSJeff Roberson static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 45352c0b557SMatthew D Fleming SYSCTL_UINT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 454b43179fbSJeff Roberson 455b43179fbSJeff Roberson /* 456b43179fbSJeff Roberson * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 457b43179fbSJeff Roberson * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 458b43179fbSJeff Roberson * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 459b43179fbSJeff Roberson * 460b43179fbSJeff Roberson * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 461b43179fbSJeff Roberson * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 462b43179fbSJeff Roberson * 463b43179fbSJeff Roberson * If you don't want to bother with the faster/more-accurate formula, you 464b43179fbSJeff Roberson * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 465b43179fbSJeff Roberson * (more general) method of calculating the %age of CPU used by a process. 466b43179fbSJeff Roberson */ 467b43179fbSJeff Roberson #define CCPU_SHIFT 11 468b43179fbSJeff Roberson 469b43179fbSJeff Roberson /* 470b43179fbSJeff Roberson * Recompute process priorities, every hz ticks. 471b43179fbSJeff Roberson * MP-safe, called without the Giant mutex. 472b43179fbSJeff Roberson */ 473b43179fbSJeff Roberson /* ARGSUSED */ 474b43179fbSJeff Roberson static void 475c55bbb6cSJohn Baldwin schedcpu(void) 476b43179fbSJeff Roberson { 477b43179fbSJeff Roberson register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 478b43179fbSJeff Roberson struct thread *td; 479b43179fbSJeff Roberson struct proc *p; 480ad1e7d28SJulian Elischer struct td_sched *ts; 48148317e9eSAlexander Motin int awake; 482b43179fbSJeff Roberson 483b43179fbSJeff Roberson sx_slock(&allproc_lock); 484b43179fbSJeff Roberson FOREACH_PROC_IN_SYSTEM(p) { 485374ae2a3SJeff Roberson PROC_LOCK(p); 486e806d352SJohn Baldwin if (p->p_state == PRS_NEW) { 487e806d352SJohn Baldwin PROC_UNLOCK(p); 488e806d352SJohn Baldwin continue; 489e806d352SJohn Baldwin } 4908460a577SJohn Birrell FOREACH_THREAD_IN_PROC(p, td) { 491b43179fbSJeff Roberson awake = 0; 4927b20fb19SJeff Roberson thread_lock(td); 493ad1e7d28SJulian Elischer ts = td->td_sched; 494b43179fbSJeff Roberson /* 49570fca427SJohn Baldwin * Increment sleep time (if sleeping). We 49670fca427SJohn Baldwin * ignore overflow, as above. 497b43179fbSJeff Roberson */ 498b43179fbSJeff Roberson /* 499ad1e7d28SJulian Elischer * The td_sched slptimes are not touched in wakeup 500ad1e7d28SJulian Elischer * because the thread may not HAVE everything in 501ad1e7d28SJulian Elischer * memory? XXX I think this is out of date. 502b43179fbSJeff Roberson */ 503f0393f06SJeff Roberson if (TD_ON_RUNQ(td)) { 504b43179fbSJeff Roberson awake = 1; 5059727e637SJeff Roberson td->td_flags &= ~TDF_DIDRUN; 506f0393f06SJeff Roberson } else if (TD_IS_RUNNING(td)) { 507b43179fbSJeff Roberson awake = 1; 5089727e637SJeff Roberson /* Do not clear TDF_DIDRUN */ 5099727e637SJeff Roberson } else if (td->td_flags & TDF_DIDRUN) { 510b43179fbSJeff Roberson awake = 1; 5119727e637SJeff Roberson td->td_flags &= ~TDF_DIDRUN; 512b43179fbSJeff Roberson } 513b43179fbSJeff Roberson 514b43179fbSJeff Roberson /* 515ad1e7d28SJulian Elischer * ts_pctcpu is only for ps and ttyinfo(). 516b43179fbSJeff Roberson */ 517ad1e7d28SJulian Elischer ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT; 518b43179fbSJeff Roberson /* 519ad1e7d28SJulian Elischer * If the td_sched has been idle the entire second, 520b43179fbSJeff Roberson * stop recalculating its priority until 521b43179fbSJeff Roberson * it wakes up. 522b43179fbSJeff Roberson */ 523ad1e7d28SJulian Elischer if (ts->ts_cpticks != 0) { 524b43179fbSJeff Roberson #if (FSHIFT >= CCPU_SHIFT) 525ad1e7d28SJulian Elischer ts->ts_pctcpu += (realstathz == 100) 526ad1e7d28SJulian Elischer ? ((fixpt_t) ts->ts_cpticks) << 527b43179fbSJeff Roberson (FSHIFT - CCPU_SHIFT) : 528ad1e7d28SJulian Elischer 100 * (((fixpt_t) ts->ts_cpticks) 529bcb06d59SJeff Roberson << (FSHIFT - CCPU_SHIFT)) / realstathz; 530b43179fbSJeff Roberson #else 531ad1e7d28SJulian Elischer ts->ts_pctcpu += ((FSCALE - ccpu) * 532ad1e7d28SJulian Elischer (ts->ts_cpticks * 533bcb06d59SJeff Roberson FSCALE / realstathz)) >> FSHIFT; 534b43179fbSJeff Roberson #endif 535ad1e7d28SJulian Elischer ts->ts_cpticks = 0; 5368460a577SJohn Birrell } 5378460a577SJohn Birrell /* 5388460a577SJohn Birrell * If there are ANY running threads in this process, 539b43179fbSJeff Roberson * then don't count it as sleeping. 5408aa3d7ffSJohn Baldwin * XXX: this is broken. 541b43179fbSJeff Roberson */ 542b43179fbSJeff Roberson if (awake) { 54354b0e65fSJeff Roberson if (ts->ts_slptime > 1) { 544b43179fbSJeff Roberson /* 545b43179fbSJeff Roberson * In an ideal world, this should not 546b43179fbSJeff Roberson * happen, because whoever woke us 547b43179fbSJeff Roberson * up from the long sleep should have 548b43179fbSJeff Roberson * unwound the slptime and reset our 549b43179fbSJeff Roberson * priority before we run at the stale 550b43179fbSJeff Roberson * priority. Should KASSERT at some 551b43179fbSJeff Roberson * point when all the cases are fixed. 552b43179fbSJeff Roberson */ 5538460a577SJohn Birrell updatepri(td); 5548460a577SJohn Birrell } 55554b0e65fSJeff Roberson ts->ts_slptime = 0; 5568460a577SJohn Birrell } else 55754b0e65fSJeff Roberson ts->ts_slptime++; 55854b0e65fSJeff Roberson if (ts->ts_slptime > 1) { 5597b20fb19SJeff Roberson thread_unlock(td); 5608460a577SJohn Birrell continue; 5617b20fb19SJeff Roberson } 5628460a577SJohn Birrell td->td_estcpu = decay_cpu(loadfac, td->td_estcpu); 5638460a577SJohn Birrell resetpriority(td); 5648460a577SJohn Birrell resetpriority_thread(td); 5657b20fb19SJeff Roberson thread_unlock(td); 5668aa3d7ffSJohn Baldwin } 567374ae2a3SJeff Roberson PROC_UNLOCK(p); 5688aa3d7ffSJohn Baldwin } 569b43179fbSJeff Roberson sx_sunlock(&allproc_lock); 570c55bbb6cSJohn Baldwin } 571c55bbb6cSJohn Baldwin 572c55bbb6cSJohn Baldwin /* 573c55bbb6cSJohn Baldwin * Main loop for a kthread that executes schedcpu once a second. 574c55bbb6cSJohn Baldwin */ 575c55bbb6cSJohn Baldwin static void 576e17c57b1SJeff Roberson schedcpu_thread(void) 577c55bbb6cSJohn Baldwin { 578c55bbb6cSJohn Baldwin 579c55bbb6cSJohn Baldwin for (;;) { 580c55bbb6cSJohn Baldwin schedcpu(); 5814d70511aSJohn Baldwin pause("-", hz); 582c55bbb6cSJohn Baldwin } 583b43179fbSJeff Roberson } 584b43179fbSJeff Roberson 585b43179fbSJeff Roberson /* 586b43179fbSJeff Roberson * Recalculate the priority of a process after it has slept for a while. 5878460a577SJohn Birrell * For all load averages >= 1 and max td_estcpu of 255, sleeping for at 5888460a577SJohn Birrell * least six times the loadfactor will decay td_estcpu to zero. 589b43179fbSJeff Roberson */ 590b43179fbSJeff Roberson static void 5918460a577SJohn Birrell updatepri(struct thread *td) 592b43179fbSJeff Roberson { 59354b0e65fSJeff Roberson struct td_sched *ts; 59454b0e65fSJeff Roberson fixpt_t loadfac; 59554b0e65fSJeff Roberson unsigned int newcpu; 596b43179fbSJeff Roberson 59754b0e65fSJeff Roberson ts = td->td_sched; 59870fca427SJohn Baldwin loadfac = loadfactor(averunnable.ldavg[0]); 59954b0e65fSJeff Roberson if (ts->ts_slptime > 5 * loadfac) 6008460a577SJohn Birrell td->td_estcpu = 0; 601b43179fbSJeff Roberson else { 6028460a577SJohn Birrell newcpu = td->td_estcpu; 60354b0e65fSJeff Roberson ts->ts_slptime--; /* was incremented in schedcpu() */ 60454b0e65fSJeff Roberson while (newcpu && --ts->ts_slptime) 605b43179fbSJeff Roberson newcpu = decay_cpu(loadfac, newcpu); 6068460a577SJohn Birrell td->td_estcpu = newcpu; 607b43179fbSJeff Roberson } 608b43179fbSJeff Roberson } 609b43179fbSJeff Roberson 610b43179fbSJeff Roberson /* 611b43179fbSJeff Roberson * Compute the priority of a process when running in user mode. 612b43179fbSJeff Roberson * Arrange to reschedule if the resulting priority is better 613b43179fbSJeff Roberson * than that of the current process. 614b43179fbSJeff Roberson */ 615b43179fbSJeff Roberson static void 6168460a577SJohn Birrell resetpriority(struct thread *td) 617b43179fbSJeff Roberson { 618b43179fbSJeff Roberson register unsigned int newpriority; 619b43179fbSJeff Roberson 6208460a577SJohn Birrell if (td->td_pri_class == PRI_TIMESHARE) { 6218460a577SJohn Birrell newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT + 6228460a577SJohn Birrell NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN); 623b43179fbSJeff Roberson newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 624b43179fbSJeff Roberson PRI_MAX_TIMESHARE); 6258460a577SJohn Birrell sched_user_prio(td, newpriority); 626b43179fbSJeff Roberson } 627b43179fbSJeff Roberson } 628f5c157d9SJohn Baldwin 629f5c157d9SJohn Baldwin /* 630ad1e7d28SJulian Elischer * Update the thread's priority when the associated process's user 631f5c157d9SJohn Baldwin * priority changes. 632f5c157d9SJohn Baldwin */ 633f5c157d9SJohn Baldwin static void 6348460a577SJohn Birrell resetpriority_thread(struct thread *td) 635f5c157d9SJohn Baldwin { 636f5c157d9SJohn Baldwin 637f5c157d9SJohn Baldwin /* Only change threads with a time sharing user priority. */ 638f5c157d9SJohn Baldwin if (td->td_priority < PRI_MIN_TIMESHARE || 639f5c157d9SJohn Baldwin td->td_priority > PRI_MAX_TIMESHARE) 640f5c157d9SJohn Baldwin return; 641f5c157d9SJohn Baldwin 642f5c157d9SJohn Baldwin /* XXX the whole needresched thing is broken, but not silly. */ 643f5c157d9SJohn Baldwin maybe_resched(td); 644f5c157d9SJohn Baldwin 6458460a577SJohn Birrell sched_prio(td, td->td_user_pri); 646b43179fbSJeff Roberson } 647b43179fbSJeff Roberson 648b43179fbSJeff Roberson /* ARGSUSED */ 649b43179fbSJeff Roberson static void 650b43179fbSJeff Roberson sched_setup(void *dummy) 651b43179fbSJeff Roberson { 65270fca427SJohn Baldwin 653579895dfSAlexander Motin setup_runqs(); 654b43179fbSJeff Roberson 655ca59f152SJeff Roberson /* Account for thread0. */ 656907bdbc2SJeff Roberson sched_load_add(); 657b43179fbSJeff Roberson } 658b43179fbSJeff Roberson 65948317e9eSAlexander Motin /* 660579895dfSAlexander Motin * This routine determines time constants after stathz and hz are setup. 66148317e9eSAlexander Motin */ 66248317e9eSAlexander Motin static void 66348317e9eSAlexander Motin sched_initticks(void *dummy) 66448317e9eSAlexander Motin { 66548317e9eSAlexander Motin 66648317e9eSAlexander Motin realstathz = stathz ? stathz : hz; 66748317e9eSAlexander Motin sched_slice = realstathz / 10; /* ~100ms */ 66837f4e025SAlexander Motin hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) / 66937f4e025SAlexander Motin realstathz); 67048317e9eSAlexander Motin } 67148317e9eSAlexander Motin 672b43179fbSJeff Roberson /* External interfaces start here */ 6738aa3d7ffSJohn Baldwin 674ed062c8dSJulian Elischer /* 675ed062c8dSJulian Elischer * Very early in the boot some setup of scheduler-specific 676f3050486SMaxim Konovalov * parts of proc0 and of some scheduler resources needs to be done. 677ed062c8dSJulian Elischer * Called from: 678ed062c8dSJulian Elischer * proc0_init() 679ed062c8dSJulian Elischer */ 680ed062c8dSJulian Elischer void 681ed062c8dSJulian Elischer schedinit(void) 682ed062c8dSJulian Elischer { 683ed062c8dSJulian Elischer /* 684ed062c8dSJulian Elischer * Set up the scheduler specific parts of proc0. 685ed062c8dSJulian Elischer */ 686ed062c8dSJulian Elischer proc0.p_sched = NULL; /* XXX */ 687ad1e7d28SJulian Elischer thread0.td_sched = &td_sched0; 6887b20fb19SJeff Roberson thread0.td_lock = &sched_lock; 68948317e9eSAlexander Motin td_sched0.ts_slice = sched_slice; 6906ea38de8SJeff Roberson mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 691ed062c8dSJulian Elischer } 692ed062c8dSJulian Elischer 693b43179fbSJeff Roberson int 694b43179fbSJeff Roberson sched_runnable(void) 695b43179fbSJeff Roberson { 696e17c57b1SJeff Roberson #ifdef SMP 697e17c57b1SJeff Roberson return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]); 698e17c57b1SJeff Roberson #else 699b43179fbSJeff Roberson return runq_check(&runq); 700e17c57b1SJeff Roberson #endif 701b43179fbSJeff Roberson } 702b43179fbSJeff Roberson 703b43179fbSJeff Roberson int 704b43179fbSJeff Roberson sched_rr_interval(void) 705b43179fbSJeff Roberson { 70648317e9eSAlexander Motin 70748317e9eSAlexander Motin /* Convert sched_slice from stathz to hz. */ 70837f4e025SAlexander Motin return (imax(1, (sched_slice * hz + realstathz / 2) / realstathz)); 709b43179fbSJeff Roberson } 710b43179fbSJeff Roberson 711b43179fbSJeff Roberson /* 712b43179fbSJeff Roberson * We adjust the priority of the current process. The priority of 713b43179fbSJeff Roberson * a process gets worse as it accumulates CPU time. The cpu usage 7148460a577SJohn Birrell * estimator (td_estcpu) is increased here. resetpriority() will 7158460a577SJohn Birrell * compute a different priority each time td_estcpu increases by 716b43179fbSJeff Roberson * INVERSE_ESTCPU_WEIGHT 717b43179fbSJeff Roberson * (until MAXPRI is reached). The cpu usage estimator ramps up 718b43179fbSJeff Roberson * quite quickly when the process is running (linearly), and decays 719b43179fbSJeff Roberson * away exponentially, at a rate which is proportionally slower when 720b43179fbSJeff Roberson * the system is busy. The basic principle is that the system will 721b43179fbSJeff Roberson * 90% forget that the process used a lot of CPU time in 5 * loadav 722b43179fbSJeff Roberson * seconds. This causes the system to favor processes which haven't 723b43179fbSJeff Roberson * run much recently, and to round-robin among other processes. 724b43179fbSJeff Roberson */ 725b43179fbSJeff Roberson void 7267cf90fb3SJeff Roberson sched_clock(struct thread *td) 727b43179fbSJeff Roberson { 728b722ad00SAlexander Motin struct pcpuidlestat *stat; 729ad1e7d28SJulian Elischer struct td_sched *ts; 730b43179fbSJeff Roberson 7317b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 732ad1e7d28SJulian Elischer ts = td->td_sched; 733f7f9e7f3SJeff Roberson 734ad1e7d28SJulian Elischer ts->ts_cpticks++; 7358460a577SJohn Birrell td->td_estcpu = ESTCPULIM(td->td_estcpu + 1); 7368460a577SJohn Birrell if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 7378460a577SJohn Birrell resetpriority(td); 7388460a577SJohn Birrell resetpriority_thread(td); 739b43179fbSJeff Roberson } 7409dddab6fSJohn Baldwin 7419dddab6fSJohn Baldwin /* 7429dddab6fSJohn Baldwin * Force a context switch if the current thread has used up a full 743579895dfSAlexander Motin * time slice (default is 100ms). 7449dddab6fSJohn Baldwin */ 745579895dfSAlexander Motin if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) { 74648317e9eSAlexander Motin ts->ts_slice = sched_slice; 7473d7f4117SAlexander Motin td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND; 74848317e9eSAlexander Motin } 749b722ad00SAlexander Motin 750b722ad00SAlexander Motin stat = DPCPU_PTR(idlestat); 751b722ad00SAlexander Motin stat->oldidlecalls = stat->idlecalls; 752b722ad00SAlexander Motin stat->idlecalls = 0; 753b43179fbSJeff Roberson } 75470fca427SJohn Baldwin 7558460a577SJohn Birrell /* 7568aa3d7ffSJohn Baldwin * Charge child's scheduling CPU usage to parent. 7578460a577SJohn Birrell */ 758b43179fbSJeff Roberson void 75955d44f79SJulian Elischer sched_exit(struct proc *p, struct thread *td) 760f7f9e7f3SJeff Roberson { 7618460a577SJohn Birrell 7628f51ad55SJeff Roberson KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit", 763cd39bb09SXin LI "prio:%d", td->td_priority); 7648f51ad55SJeff Roberson 765374ae2a3SJeff Roberson PROC_LOCK_ASSERT(p, MA_OWNED); 766ad1e7d28SJulian Elischer sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); 767b43179fbSJeff Roberson } 768b43179fbSJeff Roberson 769b43179fbSJeff Roberson void 770f7f9e7f3SJeff Roberson sched_exit_thread(struct thread *td, struct thread *child) 771b43179fbSJeff Roberson { 772ad1e7d28SJulian Elischer 7738f51ad55SJeff Roberson KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit", 774cd39bb09SXin LI "prio:%d", child->td_priority); 7757b20fb19SJeff Roberson thread_lock(td); 776ad1e7d28SJulian Elischer td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu); 7777b20fb19SJeff Roberson thread_unlock(td); 7781b9d701fSAttilio Rao thread_lock(child); 7791b9d701fSAttilio Rao if ((child->td_flags & TDF_NOLOAD) == 0) 780907bdbc2SJeff Roberson sched_load_rem(); 7811b9d701fSAttilio Rao thread_unlock(child); 782f7f9e7f3SJeff Roberson } 783bcb06d59SJeff Roberson 784f7f9e7f3SJeff Roberson void 785ed062c8dSJulian Elischer sched_fork(struct thread *td, struct thread *childtd) 786f7f9e7f3SJeff Roberson { 787ed062c8dSJulian Elischer sched_fork_thread(td, childtd); 788f7f9e7f3SJeff Roberson } 789bcb06d59SJeff Roberson 790f7f9e7f3SJeff Roberson void 791ed062c8dSJulian Elischer sched_fork_thread(struct thread *td, struct thread *childtd) 792f7f9e7f3SJeff Roberson { 7938b16c208SJeff Roberson struct td_sched *ts; 7948b16c208SJeff Roberson 795ad1e7d28SJulian Elischer childtd->td_estcpu = td->td_estcpu; 7967b20fb19SJeff Roberson childtd->td_lock = &sched_lock; 797f5a3ef99SMarcel Moolenaar childtd->td_cpuset = cpuset_ref(td->td_cpuset); 79822d19207SJohn Baldwin childtd->td_priority = childtd->td_base_pri; 7998b16c208SJeff Roberson ts = childtd->td_sched; 8008b16c208SJeff Roberson bzero(ts, sizeof(*ts)); 801f200843bSJohn Baldwin ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY); 80248317e9eSAlexander Motin ts->ts_slice = 1; 803b43179fbSJeff Roberson } 804b43179fbSJeff Roberson 805b43179fbSJeff Roberson void 806fa885116SJulian Elischer sched_nice(struct proc *p, int nice) 807b43179fbSJeff Roberson { 808f5c157d9SJohn Baldwin struct thread *td; 8090b5318c8SJohn Baldwin 810fa885116SJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 811fa885116SJulian Elischer p->p_nice = nice; 8128460a577SJohn Birrell FOREACH_THREAD_IN_PROC(p, td) { 8137b20fb19SJeff Roberson thread_lock(td); 8148460a577SJohn Birrell resetpriority(td); 8158460a577SJohn Birrell resetpriority_thread(td); 8167b20fb19SJeff Roberson thread_unlock(td); 8178460a577SJohn Birrell } 818fa885116SJulian Elischer } 819b43179fbSJeff Roberson 820f7f9e7f3SJeff Roberson void 8218460a577SJohn Birrell sched_class(struct thread *td, int class) 822f7f9e7f3SJeff Roberson { 8237b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 8248460a577SJohn Birrell td->td_pri_class = class; 825f7f9e7f3SJeff Roberson } 826f7f9e7f3SJeff Roberson 8278460a577SJohn Birrell /* 8288460a577SJohn Birrell * Adjust the priority of a thread. 8298460a577SJohn Birrell */ 830f5c157d9SJohn Baldwin static void 831f5c157d9SJohn Baldwin sched_priority(struct thread *td, u_char prio) 832b43179fbSJeff Roberson { 833b43179fbSJeff Roberson 8348f51ad55SJeff Roberson 8358f51ad55SJeff Roberson KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change", 8368f51ad55SJeff Roberson "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED, 8378f51ad55SJeff Roberson sched_tdname(curthread)); 838*d9fae5abSAndriy Gapon SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio); 8398f51ad55SJeff Roberson if (td != curthread && prio > td->td_priority) { 8408f51ad55SJeff Roberson KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread), 8418f51ad55SJeff Roberson "lend prio", "prio:%d", td->td_priority, "new prio:%d", 8428f51ad55SJeff Roberson prio, KTR_ATTR_LINKED, sched_tdname(td)); 843*d9fae5abSAndriy Gapon SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio, 844b3e9e682SRyan Stone curthread); 8458f51ad55SJeff Roberson } 8467b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 847f5c157d9SJohn Baldwin if (td->td_priority == prio) 848f5c157d9SJohn Baldwin return; 8491f955e2dSJulian Elischer td->td_priority = prio; 8509727e637SJeff Roberson if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) { 851f0393f06SJeff Roberson sched_rem(td); 852f0393f06SJeff Roberson sched_add(td, SRQ_BORING); 853b43179fbSJeff Roberson } 854b43179fbSJeff Roberson } 855b43179fbSJeff Roberson 856f5c157d9SJohn Baldwin /* 857f5c157d9SJohn Baldwin * Update a thread's priority when it is lent another thread's 858f5c157d9SJohn Baldwin * priority. 859f5c157d9SJohn Baldwin */ 860f5c157d9SJohn Baldwin void 861f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio) 862f5c157d9SJohn Baldwin { 863f5c157d9SJohn Baldwin 864f5c157d9SJohn Baldwin td->td_flags |= TDF_BORROWING; 865f5c157d9SJohn Baldwin sched_priority(td, prio); 866f5c157d9SJohn Baldwin } 867f5c157d9SJohn Baldwin 868f5c157d9SJohn Baldwin /* 869f5c157d9SJohn Baldwin * Restore a thread's priority when priority propagation is 870f5c157d9SJohn Baldwin * over. The prio argument is the minimum priority the thread 871f5c157d9SJohn Baldwin * needs to have to satisfy other possible priority lending 872f5c157d9SJohn Baldwin * requests. If the thread's regulary priority is less 873f5c157d9SJohn Baldwin * important than prio the thread will keep a priority boost 874f5c157d9SJohn Baldwin * of prio. 875f5c157d9SJohn Baldwin */ 876f5c157d9SJohn Baldwin void 877f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio) 878f5c157d9SJohn Baldwin { 879f5c157d9SJohn Baldwin u_char base_pri; 880f5c157d9SJohn Baldwin 881f5c157d9SJohn Baldwin if (td->td_base_pri >= PRI_MIN_TIMESHARE && 882f5c157d9SJohn Baldwin td->td_base_pri <= PRI_MAX_TIMESHARE) 8838460a577SJohn Birrell base_pri = td->td_user_pri; 884f5c157d9SJohn Baldwin else 885f5c157d9SJohn Baldwin base_pri = td->td_base_pri; 886f5c157d9SJohn Baldwin if (prio >= base_pri) { 887f5c157d9SJohn Baldwin td->td_flags &= ~TDF_BORROWING; 888f5c157d9SJohn Baldwin sched_prio(td, base_pri); 889f5c157d9SJohn Baldwin } else 890f5c157d9SJohn Baldwin sched_lend_prio(td, prio); 891f5c157d9SJohn Baldwin } 892f5c157d9SJohn Baldwin 893f5c157d9SJohn Baldwin void 894f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio) 895f5c157d9SJohn Baldwin { 896f5c157d9SJohn Baldwin u_char oldprio; 897f5c157d9SJohn Baldwin 898f5c157d9SJohn Baldwin /* First, update the base priority. */ 899f5c157d9SJohn Baldwin td->td_base_pri = prio; 900f5c157d9SJohn Baldwin 901f5c157d9SJohn Baldwin /* 902f5c157d9SJohn Baldwin * If the thread is borrowing another thread's priority, don't ever 903f5c157d9SJohn Baldwin * lower the priority. 904f5c157d9SJohn Baldwin */ 905f5c157d9SJohn Baldwin if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 906f5c157d9SJohn Baldwin return; 907f5c157d9SJohn Baldwin 908f5c157d9SJohn Baldwin /* Change the real priority. */ 909f5c157d9SJohn Baldwin oldprio = td->td_priority; 910f5c157d9SJohn Baldwin sched_priority(td, prio); 911f5c157d9SJohn Baldwin 912f5c157d9SJohn Baldwin /* 913f5c157d9SJohn Baldwin * If the thread is on a turnstile, then let the turnstile update 914f5c157d9SJohn Baldwin * its state. 915f5c157d9SJohn Baldwin */ 916f5c157d9SJohn Baldwin if (TD_ON_LOCK(td) && oldprio != prio) 917f5c157d9SJohn Baldwin turnstile_adjust(td, oldprio); 918f5c157d9SJohn Baldwin } 919f5c157d9SJohn Baldwin 920b43179fbSJeff Roberson void 9218460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio) 9223db720fdSDavid Xu { 9233db720fdSDavid Xu 924435806d3SDavid Xu THREAD_LOCK_ASSERT(td, MA_OWNED); 9258460a577SJohn Birrell td->td_base_user_pri = prio; 926acbe332aSDavid Xu if (td->td_lend_user_pri <= prio) 9275a215147SDavid Xu return; 9288460a577SJohn Birrell td->td_user_pri = prio; 9293db720fdSDavid Xu } 9303db720fdSDavid Xu 9313db720fdSDavid Xu void 9323db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio) 9333db720fdSDavid Xu { 9343db720fdSDavid Xu 935435806d3SDavid Xu THREAD_LOCK_ASSERT(td, MA_OWNED); 936acbe332aSDavid Xu td->td_lend_user_pri = prio; 937c8e368a9SDavid Xu td->td_user_pri = min(prio, td->td_base_user_pri); 938c8e368a9SDavid Xu if (td->td_priority > td->td_user_pri) 939c8e368a9SDavid Xu sched_prio(td, td->td_user_pri); 940c8e368a9SDavid Xu else if (td->td_priority != td->td_user_pri) 941c8e368a9SDavid Xu td->td_flags |= TDF_NEEDRESCHED; 942435806d3SDavid Xu } 9433db720fdSDavid Xu 9443db720fdSDavid Xu void 945c5aa6b58SJeff Roberson sched_sleep(struct thread *td, int pri) 946b43179fbSJeff Roberson { 9472056d0a1SJohn Baldwin 9487b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 94954b0e65fSJeff Roberson td->td_slptick = ticks; 95054b0e65fSJeff Roberson td->td_sched->ts_slptime = 0; 9512dc29adbSJohn Baldwin if (pri != 0 && PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 952c5aa6b58SJeff Roberson sched_prio(td, pri); 95317c4c356SKonstantin Belousov if (TD_IS_SUSPENDED(td) || pri >= PSOCK) 954c5aa6b58SJeff Roberson td->td_flags |= TDF_CANSWAP; 955b43179fbSJeff Roberson } 956b43179fbSJeff Roberson 957b43179fbSJeff Roberson void 9583389af30SJulian Elischer sched_switch(struct thread *td, struct thread *newtd, int flags) 959b43179fbSJeff Roberson { 960b0b9dee5SAttilio Rao struct mtx *tmtx; 961ad1e7d28SJulian Elischer struct td_sched *ts; 962b43179fbSJeff Roberson struct proc *p; 9633d7f4117SAlexander Motin int preempted; 964b43179fbSJeff Roberson 965b0b9dee5SAttilio Rao tmtx = NULL; 966ad1e7d28SJulian Elischer ts = td->td_sched; 967b43179fbSJeff Roberson p = td->td_proc; 968b43179fbSJeff Roberson 9697b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 9708aa3d7ffSJohn Baldwin 9717b20fb19SJeff Roberson /* 9727b20fb19SJeff Roberson * Switch to the sched lock to fix things up and pick 9737b20fb19SJeff Roberson * a new thread. 974b0b9dee5SAttilio Rao * Block the td_lock in order to avoid breaking the critical path. 9757b20fb19SJeff Roberson */ 9767b20fb19SJeff Roberson if (td->td_lock != &sched_lock) { 9777b20fb19SJeff Roberson mtx_lock_spin(&sched_lock); 978b0b9dee5SAttilio Rao tmtx = thread_lock_block(td); 9797b20fb19SJeff Roberson } 980b43179fbSJeff Roberson 9811b9d701fSAttilio Rao if ((td->td_flags & TDF_NOLOAD) == 0) 982907bdbc2SJeff Roberson sched_load_rem(); 9833389af30SJulian Elischer 984060563ecSJulian Elischer td->td_lastcpu = td->td_oncpu; 9853d7f4117SAlexander Motin preempted = !(td->td_flags & TDF_SLICEEND); 9863d7f4117SAlexander Motin td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND); 98777918643SStephan Uphoff td->td_owepreempt = 0; 988ca59f152SJeff Roberson td->td_oncpu = NOCPU; 9898aa3d7ffSJohn Baldwin 990b43179fbSJeff Roberson /* 991b43179fbSJeff Roberson * At the last moment, if this thread is still marked RUNNING, 992b43179fbSJeff Roberson * then put it back on the run queue as it has not been suspended 993bf0acc27SJohn Baldwin * or stopped or any thing else similar. We never put the idle 994bf0acc27SJohn Baldwin * threads on the run queue, however. 995b43179fbSJeff Roberson */ 996c6226eeaSJulian Elischer if (td->td_flags & TDF_IDLETD) { 997bf0acc27SJohn Baldwin TD_SET_CAN_RUN(td); 998c6226eeaSJulian Elischer #ifdef SMP 999a38f1f26SAttilio Rao CPU_CLR(PCPU_GET(cpuid), &idle_cpus_mask); 1000c6226eeaSJulian Elischer #endif 1001c6226eeaSJulian Elischer } else { 1002ed062c8dSJulian Elischer if (TD_IS_RUNNING(td)) { 1003ad1e7d28SJulian Elischer /* Put us back on the run queue. */ 10043d7f4117SAlexander Motin sched_add(td, preempted ? 1005c20c691bSJulian Elischer SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 1006c20c691bSJulian Elischer SRQ_OURSELF|SRQ_YIELDING); 1007ed062c8dSJulian Elischer } 1008b43179fbSJeff Roberson } 1009c20c691bSJulian Elischer if (newtd) { 1010c20c691bSJulian Elischer /* 1011c20c691bSJulian Elischer * The thread we are about to run needs to be counted 1012c20c691bSJulian Elischer * as if it had been added to the run queue and selected. 1013c20c691bSJulian Elischer * It came from: 1014c20c691bSJulian Elischer * * A preemption 1015c20c691bSJulian Elischer * * An upcall 1016c20c691bSJulian Elischer * * A followon 1017c20c691bSJulian Elischer */ 1018c20c691bSJulian Elischer KASSERT((newtd->td_inhibitors == 0), 10192da78e38SRobert Watson ("trying to run inhibited thread")); 10209727e637SJeff Roberson newtd->td_flags |= TDF_DIDRUN; 1021c20c691bSJulian Elischer TD_SET_RUNNING(newtd); 10221b9d701fSAttilio Rao if ((newtd->td_flags & TDF_NOLOAD) == 0) 1023907bdbc2SJeff Roberson sched_load_add(); 1024c20c691bSJulian Elischer } else { 1025ae53b483SJeff Roberson newtd = choosethread(); 10267b20fb19SJeff Roberson MPASS(newtd->td_lock == &sched_lock); 102758060789SAttilio Rao } 1028c20c691bSJulian Elischer 1029ebccf1e3SJoseph Koshy if (td != newtd) { 1030ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 1031ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1032ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 1033ebccf1e3SJoseph Koshy #endif 1034b3e9e682SRyan Stone 1035*d9fae5abSAndriy Gapon SDT_PROBE2(sched, , , off__cpu, td, td->td_proc); 1036b3e9e682SRyan Stone 1037c6226eeaSJulian Elischer /* I feel sleepy */ 1038eea4f254SJeff Roberson lock_profile_release_lock(&sched_lock.lock_object); 10396f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS 10406f5f25e5SJohn Birrell /* 10416f5f25e5SJohn Birrell * If DTrace has set the active vtime enum to anything 10426f5f25e5SJohn Birrell * other than INACTIVE (0), then it should have set the 10436f5f25e5SJohn Birrell * function to call. 10446f5f25e5SJohn Birrell */ 10456f5f25e5SJohn Birrell if (dtrace_vtime_active) 10466f5f25e5SJohn Birrell (*dtrace_vtime_switch_func)(newtd); 10476f5f25e5SJohn Birrell #endif 10486f5f25e5SJohn Birrell 1049b0b9dee5SAttilio Rao cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock); 1050eea4f254SJeff Roberson lock_profile_obtain_lock_success(&sched_lock.lock_object, 1051eea4f254SJeff Roberson 0, 0, __FILE__, __LINE__); 1052c6226eeaSJulian Elischer /* 1053c6226eeaSJulian Elischer * Where am I? What year is it? 1054c6226eeaSJulian Elischer * We are in the same thread that went to sleep above, 10558aa3d7ffSJohn Baldwin * but any amount of time may have passed. All our context 1056c6226eeaSJulian Elischer * will still be available as will local variables. 1057c6226eeaSJulian Elischer * PCPU values however may have changed as we may have 1058c6226eeaSJulian Elischer * changed CPU so don't trust cached values of them. 1059c6226eeaSJulian Elischer * New threads will go to fork_exit() instead of here 1060c6226eeaSJulian Elischer * so if you change things here you may need to change 1061c6226eeaSJulian Elischer * things there too. 10628aa3d7ffSJohn Baldwin * 1063c6226eeaSJulian Elischer * If the thread above was exiting it will never wake 1064c6226eeaSJulian Elischer * up again here, so either it has saved everything it 1065c6226eeaSJulian Elischer * needed to, or the thread_wait() or wait() will 1066c6226eeaSJulian Elischer * need to reap it. 1067c6226eeaSJulian Elischer */ 1068b3e9e682SRyan Stone 1069*d9fae5abSAndriy Gapon SDT_PROBE0(sched, , , on__cpu); 1070ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 1071ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1072ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1073ebccf1e3SJoseph Koshy #endif 1074b3e9e682SRyan Stone } else 1075*d9fae5abSAndriy Gapon SDT_PROBE0(sched, , , remain__cpu); 1076ebccf1e3SJoseph Koshy 1077c6226eeaSJulian Elischer #ifdef SMP 1078c6226eeaSJulian Elischer if (td->td_flags & TDF_IDLETD) 1079a38f1f26SAttilio Rao CPU_SET(PCPU_GET(cpuid), &idle_cpus_mask); 1080c6226eeaSJulian Elischer #endif 1081ae53b483SJeff Roberson sched_lock.mtx_lock = (uintptr_t)td; 1082ae53b483SJeff Roberson td->td_oncpu = PCPU_GET(cpuid); 10837b20fb19SJeff Roberson MPASS(td->td_lock == &sched_lock); 1084b43179fbSJeff Roberson } 1085b43179fbSJeff Roberson 1086b43179fbSJeff Roberson void 1087b43179fbSJeff Roberson sched_wakeup(struct thread *td) 1088b43179fbSJeff Roberson { 108954b0e65fSJeff Roberson struct td_sched *ts; 109054b0e65fSJeff Roberson 10917b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 109254b0e65fSJeff Roberson ts = td->td_sched; 1093c5aa6b58SJeff Roberson td->td_flags &= ~TDF_CANSWAP; 109454b0e65fSJeff Roberson if (ts->ts_slptime > 1) { 10958460a577SJohn Birrell updatepri(td); 10968460a577SJohn Birrell resetpriority(td); 10978460a577SJohn Birrell } 10986eac7e57SAttilio Rao td->td_slptick = 0; 109954b0e65fSJeff Roberson ts->ts_slptime = 0; 110048317e9eSAlexander Motin ts->ts_slice = sched_slice; 1101f0393f06SJeff Roberson sched_add(td, SRQ_BORING); 1102b43179fbSJeff Roberson } 1103b43179fbSJeff Roberson 110437c28a02SJulian Elischer #ifdef SMP 110582a1dfc1SJulian Elischer static int 110682a1dfc1SJulian Elischer forward_wakeup(int cpunum) 110782a1dfc1SJulian Elischer { 110882a1dfc1SJulian Elischer struct pcpu *pc; 1109a38f1f26SAttilio Rao cpuset_t dontuse, map, map2; 1110a38f1f26SAttilio Rao u_int id, me; 111171a19bdcSAttilio Rao int iscpuset; 111282a1dfc1SJulian Elischer 111382a1dfc1SJulian Elischer mtx_assert(&sched_lock, MA_OWNED); 111482a1dfc1SJulian Elischer 1115ed062c8dSJulian Elischer CTR0(KTR_RUNQ, "forward_wakeup()"); 111682a1dfc1SJulian Elischer 111782a1dfc1SJulian Elischer if ((!forward_wakeup_enabled) || 111882a1dfc1SJulian Elischer (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0)) 111982a1dfc1SJulian Elischer return (0); 112082a1dfc1SJulian Elischer if (!smp_started || cold || panicstr) 112182a1dfc1SJulian Elischer return (0); 112282a1dfc1SJulian Elischer 112382a1dfc1SJulian Elischer forward_wakeups_requested++; 112482a1dfc1SJulian Elischer 112582a1dfc1SJulian Elischer /* 11268aa3d7ffSJohn Baldwin * Check the idle mask we received against what we calculated 11278aa3d7ffSJohn Baldwin * before in the old version. 112882a1dfc1SJulian Elischer */ 1129a38f1f26SAttilio Rao me = PCPU_GET(cpuid); 11308aa3d7ffSJohn Baldwin 11318aa3d7ffSJohn Baldwin /* Don't bother if we should be doing it ourself. */ 1132a38f1f26SAttilio Rao if (CPU_ISSET(me, &idle_cpus_mask) && 1133a38f1f26SAttilio Rao (cpunum == NOCPU || me == cpunum)) 113482a1dfc1SJulian Elischer return (0); 113582a1dfc1SJulian Elischer 1136a38f1f26SAttilio Rao CPU_SETOF(me, &dontuse); 113771a19bdcSAttilio Rao CPU_OR(&dontuse, &stopped_cpus); 113871a19bdcSAttilio Rao CPU_OR(&dontuse, &hlt_cpus_mask); 113971a19bdcSAttilio Rao CPU_ZERO(&map2); 114082a1dfc1SJulian Elischer if (forward_wakeup_use_loop) { 1141d098f930SNathan Whitehorn STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 1142a38f1f26SAttilio Rao id = pc->pc_cpuid; 1143a38f1f26SAttilio Rao if (!CPU_ISSET(id, &dontuse) && 114482a1dfc1SJulian Elischer pc->pc_curthread == pc->pc_idlethread) { 1145a38f1f26SAttilio Rao CPU_SET(id, &map2); 114682a1dfc1SJulian Elischer } 114782a1dfc1SJulian Elischer } 114882a1dfc1SJulian Elischer } 114982a1dfc1SJulian Elischer 115082a1dfc1SJulian Elischer if (forward_wakeup_use_mask) { 115171a19bdcSAttilio Rao map = idle_cpus_mask; 115271a19bdcSAttilio Rao CPU_NAND(&map, &dontuse); 115382a1dfc1SJulian Elischer 11548aa3d7ffSJohn Baldwin /* If they are both on, compare and use loop if different. */ 115582a1dfc1SJulian Elischer if (forward_wakeup_use_loop) { 115671a19bdcSAttilio Rao if (CPU_CMP(&map, &map2)) { 1157f0283a73SAttilio Rao printf("map != map2, loop method preferred\n"); 1158f0283a73SAttilio Rao map = map2; 115982a1dfc1SJulian Elischer } 116082a1dfc1SJulian Elischer } 116182a1dfc1SJulian Elischer } else { 1162f0283a73SAttilio Rao map = map2; 116382a1dfc1SJulian Elischer } 11648aa3d7ffSJohn Baldwin 11658aa3d7ffSJohn Baldwin /* If we only allow a specific CPU, then mask off all the others. */ 116682a1dfc1SJulian Elischer if (cpunum != NOCPU) { 116782a1dfc1SJulian Elischer KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum.")); 116871a19bdcSAttilio Rao iscpuset = CPU_ISSET(cpunum, &map); 116971a19bdcSAttilio Rao if (iscpuset == 0) 117071a19bdcSAttilio Rao CPU_ZERO(&map); 117171a19bdcSAttilio Rao else 117271a19bdcSAttilio Rao CPU_SETOF(cpunum, &map); 117382a1dfc1SJulian Elischer } 117471a19bdcSAttilio Rao if (!CPU_EMPTY(&map)) { 117582a1dfc1SJulian Elischer forward_wakeups_delivered++; 1176d098f930SNathan Whitehorn STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 1177a38f1f26SAttilio Rao id = pc->pc_cpuid; 1178a38f1f26SAttilio Rao if (!CPU_ISSET(id, &map)) 1179b722ad00SAlexander Motin continue; 1180b722ad00SAlexander Motin if (cpu_idle_wakeup(pc->pc_cpuid)) 1181a38f1f26SAttilio Rao CPU_CLR(id, &map); 1182b722ad00SAlexander Motin } 118371a19bdcSAttilio Rao if (!CPU_EMPTY(&map)) 118482a1dfc1SJulian Elischer ipi_selected(map, IPI_AST); 118582a1dfc1SJulian Elischer return (1); 118682a1dfc1SJulian Elischer } 118782a1dfc1SJulian Elischer if (cpunum == NOCPU) 118882a1dfc1SJulian Elischer printf("forward_wakeup: Idle processor not found\n"); 118982a1dfc1SJulian Elischer return (0); 119082a1dfc1SJulian Elischer } 1191f3a0f873SStephan Uphoff 1192f3a0f873SStephan Uphoff static void 1193f3a0f873SStephan Uphoff kick_other_cpu(int pri, int cpuid) 1194f3a0f873SStephan Uphoff { 11958aa3d7ffSJohn Baldwin struct pcpu *pcpu; 11968aa3d7ffSJohn Baldwin int cpri; 1197f3a0f873SStephan Uphoff 11988aa3d7ffSJohn Baldwin pcpu = pcpu_find(cpuid); 1199a38f1f26SAttilio Rao if (CPU_ISSET(cpuid, &idle_cpus_mask)) { 1200f3a0f873SStephan Uphoff forward_wakeups_delivered++; 1201b722ad00SAlexander Motin if (!cpu_idle_wakeup(cpuid)) 1202d9d8d144SJohn Baldwin ipi_cpu(cpuid, IPI_AST); 1203f3a0f873SStephan Uphoff return; 1204f3a0f873SStephan Uphoff } 1205f3a0f873SStephan Uphoff 12068aa3d7ffSJohn Baldwin cpri = pcpu->pc_curthread->td_priority; 1207f3a0f873SStephan Uphoff if (pri >= cpri) 1208f3a0f873SStephan Uphoff return; 1209f3a0f873SStephan Uphoff 1210f3a0f873SStephan Uphoff #if defined(IPI_PREEMPTION) && defined(PREEMPTION) 1211f3a0f873SStephan Uphoff #if !defined(FULL_PREEMPTION) 1212f3a0f873SStephan Uphoff if (pri <= PRI_MAX_ITHD) 1213f3a0f873SStephan Uphoff #endif /* ! FULL_PREEMPTION */ 1214f3a0f873SStephan Uphoff { 1215d9d8d144SJohn Baldwin ipi_cpu(cpuid, IPI_PREEMPT); 1216f3a0f873SStephan Uphoff return; 1217f3a0f873SStephan Uphoff } 1218f3a0f873SStephan Uphoff #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */ 1219f3a0f873SStephan Uphoff 1220f3a0f873SStephan Uphoff pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED; 1221d9d8d144SJohn Baldwin ipi_cpu(cpuid, IPI_AST); 1222f3a0f873SStephan Uphoff return; 1223f3a0f873SStephan Uphoff } 1224f3a0f873SStephan Uphoff #endif /* SMP */ 1225f3a0f873SStephan Uphoff 1226f200843bSJohn Baldwin #ifdef SMP 1227f200843bSJohn Baldwin static int 1228f200843bSJohn Baldwin sched_pickcpu(struct thread *td) 1229f200843bSJohn Baldwin { 1230f200843bSJohn Baldwin int best, cpu; 1231f200843bSJohn Baldwin 1232f200843bSJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 1233f200843bSJohn Baldwin 1234c3ea3378SJohn Baldwin if (THREAD_CAN_SCHED(td, td->td_lastcpu)) 1235c3ea3378SJohn Baldwin best = td->td_lastcpu; 1236c3ea3378SJohn Baldwin else 1237f200843bSJohn Baldwin best = NOCPU; 12383aa6d94eSJohn Baldwin CPU_FOREACH(cpu) { 1239f200843bSJohn Baldwin if (!THREAD_CAN_SCHED(td, cpu)) 1240f200843bSJohn Baldwin continue; 1241f200843bSJohn Baldwin 1242f200843bSJohn Baldwin if (best == NOCPU) 1243f200843bSJohn Baldwin best = cpu; 1244f200843bSJohn Baldwin else if (runq_length[cpu] < runq_length[best]) 1245f200843bSJohn Baldwin best = cpu; 1246f200843bSJohn Baldwin } 1247f200843bSJohn Baldwin KASSERT(best != NOCPU, ("no valid CPUs")); 1248f200843bSJohn Baldwin 1249f200843bSJohn Baldwin return (best); 1250f200843bSJohn Baldwin } 1251f200843bSJohn Baldwin #endif 1252f200843bSJohn Baldwin 1253b43179fbSJeff Roberson void 12542630e4c9SJulian Elischer sched_add(struct thread *td, int flags) 12556804a3abSJulian Elischer #ifdef SMP 1256f3a0f873SStephan Uphoff { 1257a38f1f26SAttilio Rao cpuset_t tidlemsk; 1258ad1e7d28SJulian Elischer struct td_sched *ts; 1259a38f1f26SAttilio Rao u_int cpu, cpuid; 12606804a3abSJulian Elischer int forwarded = 0; 1261f3a0f873SStephan Uphoff int single_cpu = 0; 12627cf90fb3SJeff Roberson 1263ad1e7d28SJulian Elischer ts = td->td_sched; 12647b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1265f0393f06SJeff Roberson KASSERT((td->td_inhibitors == 0), 1266f0393f06SJeff Roberson ("sched_add: trying to run inhibited thread")); 1267f0393f06SJeff Roberson KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 1268f0393f06SJeff Roberson ("sched_add: bad thread state")); 1269b61ce5b0SJeff Roberson KASSERT(td->td_flags & TDF_INMEM, 1270b61ce5b0SJeff Roberson ("sched_add: thread swapped out")); 12718f51ad55SJeff Roberson 12728f51ad55SJeff Roberson KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add", 12738f51ad55SJeff Roberson "prio:%d", td->td_priority, KTR_ATTR_LINKED, 12748f51ad55SJeff Roberson sched_tdname(curthread)); 12758f51ad55SJeff Roberson KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup", 12768f51ad55SJeff Roberson KTR_ATTR_LINKED, sched_tdname(td)); 1277b3e9e682SRyan Stone SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL, 1278b3e9e682SRyan Stone flags & SRQ_PREEMPTED); 12798f51ad55SJeff Roberson 12808aa3d7ffSJohn Baldwin 12817b20fb19SJeff Roberson /* 12827b20fb19SJeff Roberson * Now that the thread is moving to the run-queue, set the lock 12837b20fb19SJeff Roberson * to the scheduler's lock. 12847b20fb19SJeff Roberson */ 12857b20fb19SJeff Roberson if (td->td_lock != &sched_lock) { 12867b20fb19SJeff Roberson mtx_lock_spin(&sched_lock); 12877b20fb19SJeff Roberson thread_lock_set(td, &sched_lock); 12887b20fb19SJeff Roberson } 1289f0393f06SJeff Roberson TD_SET_RUNQ(td); 1290f3a0f873SStephan Uphoff 129160dd73b7SRyan Stone /* 129260dd73b7SRyan Stone * If SMP is started and the thread is pinned or otherwise limited to 129360dd73b7SRyan Stone * a specific set of CPUs, queue the thread to a per-CPU run queue. 129460dd73b7SRyan Stone * Otherwise, queue the thread to the global run queue. 129560dd73b7SRyan Stone * 129660dd73b7SRyan Stone * If SMP has not yet been started we must use the global run queue 129760dd73b7SRyan Stone * as per-CPU state may not be initialized yet and we may crash if we 129860dd73b7SRyan Stone * try to access the per-CPU run queues. 129960dd73b7SRyan Stone */ 130060dd73b7SRyan Stone if (smp_started && (td->td_pinned != 0 || td->td_flags & TDF_BOUND || 130160dd73b7SRyan Stone ts->ts_flags & TSF_AFFINITY)) { 130260dd73b7SRyan Stone if (td->td_pinned != 0) 1303f3a0f873SStephan Uphoff cpu = td->td_lastcpu; 130460dd73b7SRyan Stone else if (td->td_flags & TDF_BOUND) { 13058aa3d7ffSJohn Baldwin /* Find CPU from bound runq. */ 13068aa3d7ffSJohn Baldwin KASSERT(SKE_RUNQ_PCPU(ts), 13078aa3d7ffSJohn Baldwin ("sched_add: bound td_sched not on cpu runq")); 1308ad1e7d28SJulian Elischer cpu = ts->ts_runq - &runq_pcpu[0]; 130960dd73b7SRyan Stone } else 1310f200843bSJohn Baldwin /* Find a valid CPU for our cpuset */ 1311f200843bSJohn Baldwin cpu = sched_pickcpu(td); 1312f200843bSJohn Baldwin ts->ts_runq = &runq_pcpu[cpu]; 1313f200843bSJohn Baldwin single_cpu = 1; 1314f200843bSJohn Baldwin CTR3(KTR_RUNQ, 1315f200843bSJohn Baldwin "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, 1316f200843bSJohn Baldwin cpu); 1317f3a0f873SStephan Uphoff } else { 13186804a3abSJulian Elischer CTR2(KTR_RUNQ, 13198aa3d7ffSJohn Baldwin "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts, 13208aa3d7ffSJohn Baldwin td); 13216804a3abSJulian Elischer cpu = NOCPU; 1322ad1e7d28SJulian Elischer ts->ts_runq = &runq; 1323e17c57b1SJeff Roberson } 1324f3a0f873SStephan Uphoff 1325a38f1f26SAttilio Rao cpuid = PCPU_GET(cpuid); 1326a38f1f26SAttilio Rao if (single_cpu && cpu != cpuid) { 1327f3a0f873SStephan Uphoff kick_other_cpu(td->td_priority, cpu); 1328f3a0f873SStephan Uphoff } else { 1329f3a0f873SStephan Uphoff if (!single_cpu) { 1330a38f1f26SAttilio Rao tidlemsk = idle_cpus_mask; 1331a38f1f26SAttilio Rao CPU_NAND(&tidlemsk, &hlt_cpus_mask); 1332a38f1f26SAttilio Rao CPU_CLR(cpuid, &tidlemsk); 1333f3a0f873SStephan Uphoff 1334a38f1f26SAttilio Rao if (!CPU_ISSET(cpuid, &idle_cpus_mask) && 1335a38f1f26SAttilio Rao ((flags & SRQ_INTR) == 0) && 133671a19bdcSAttilio Rao !CPU_EMPTY(&tidlemsk)) 1337f3a0f873SStephan Uphoff forwarded = forward_wakeup(cpu); 1338f3a0f873SStephan Uphoff } 1339f3a0f873SStephan Uphoff 1340f3a0f873SStephan Uphoff if (!forwarded) { 1341a3f2d842SStephan Uphoff if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td)) 1342f3a0f873SStephan Uphoff return; 1343f3a0f873SStephan Uphoff else 1344f3a0f873SStephan Uphoff maybe_resched(td); 1345f3a0f873SStephan Uphoff } 1346f3a0f873SStephan Uphoff } 1347f3a0f873SStephan Uphoff 13481b9d701fSAttilio Rao if ((td->td_flags & TDF_NOLOAD) == 0) 1349f3a0f873SStephan Uphoff sched_load_add(); 13509727e637SJeff Roberson runq_add(ts->ts_runq, td, flags); 1351f200843bSJohn Baldwin if (cpu != NOCPU) 1352f200843bSJohn Baldwin runq_length[cpu]++; 1353f3a0f873SStephan Uphoff } 1354f3a0f873SStephan Uphoff #else /* SMP */ 1355f3a0f873SStephan Uphoff { 1356ad1e7d28SJulian Elischer struct td_sched *ts; 1357f200843bSJohn Baldwin 1358ad1e7d28SJulian Elischer ts = td->td_sched; 13597b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1360f0393f06SJeff Roberson KASSERT((td->td_inhibitors == 0), 1361f0393f06SJeff Roberson ("sched_add: trying to run inhibited thread")); 1362f0393f06SJeff Roberson KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 1363f0393f06SJeff Roberson ("sched_add: bad thread state")); 1364b61ce5b0SJeff Roberson KASSERT(td->td_flags & TDF_INMEM, 1365b61ce5b0SJeff Roberson ("sched_add: thread swapped out")); 13668f51ad55SJeff Roberson KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add", 13678f51ad55SJeff Roberson "prio:%d", td->td_priority, KTR_ATTR_LINKED, 13688f51ad55SJeff Roberson sched_tdname(curthread)); 13698f51ad55SJeff Roberson KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup", 13708f51ad55SJeff Roberson KTR_ATTR_LINKED, sched_tdname(td)); 13712aaae99dSSergey Kandaurov SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL, 1372b3e9e682SRyan Stone flags & SRQ_PREEMPTED); 13738aa3d7ffSJohn Baldwin 13747b20fb19SJeff Roberson /* 13757b20fb19SJeff Roberson * Now that the thread is moving to the run-queue, set the lock 13767b20fb19SJeff Roberson * to the scheduler's lock. 13777b20fb19SJeff Roberson */ 13787b20fb19SJeff Roberson if (td->td_lock != &sched_lock) { 13797b20fb19SJeff Roberson mtx_lock_spin(&sched_lock); 13807b20fb19SJeff Roberson thread_lock_set(td, &sched_lock); 13817b20fb19SJeff Roberson } 1382f0393f06SJeff Roberson TD_SET_RUNQ(td); 1383ad1e7d28SJulian Elischer CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td); 1384ad1e7d28SJulian Elischer ts->ts_runq = &runq; 13856804a3abSJulian Elischer 13866804a3abSJulian Elischer /* 13878aa3d7ffSJohn Baldwin * If we are yielding (on the way out anyhow) or the thread 13888aa3d7ffSJohn Baldwin * being saved is US, then don't try be smart about preemption 13898aa3d7ffSJohn Baldwin * or kicking off another CPU as it won't help and may hinder. 13908aa3d7ffSJohn Baldwin * In the YIEDLING case, we are about to run whoever is being 13918aa3d7ffSJohn Baldwin * put in the queue anyhow, and in the OURSELF case, we are 13928aa3d7ffSJohn Baldwin * puting ourself on the run queue which also only happens 13938aa3d7ffSJohn Baldwin * when we are about to yield. 13946804a3abSJulian Elischer */ 13956804a3abSJulian Elischer if ((flags & SRQ_YIELDING) == 0) { 13966804a3abSJulian Elischer if (maybe_preempt(td)) 13976804a3abSJulian Elischer return; 13986804a3abSJulian Elischer } 13991b9d701fSAttilio Rao if ((td->td_flags & TDF_NOLOAD) == 0) 1400907bdbc2SJeff Roberson sched_load_add(); 14019727e637SJeff Roberson runq_add(ts->ts_runq, td, flags); 14026942d433SJohn Baldwin maybe_resched(td); 1403b43179fbSJeff Roberson } 1404f3a0f873SStephan Uphoff #endif /* SMP */ 1405f3a0f873SStephan Uphoff 1406b43179fbSJeff Roberson void 14077cf90fb3SJeff Roberson sched_rem(struct thread *td) 1408b43179fbSJeff Roberson { 1409ad1e7d28SJulian Elischer struct td_sched *ts; 14107cf90fb3SJeff Roberson 1411ad1e7d28SJulian Elischer ts = td->td_sched; 1412b61ce5b0SJeff Roberson KASSERT(td->td_flags & TDF_INMEM, 1413b61ce5b0SJeff Roberson ("sched_rem: thread swapped out")); 1414f0393f06SJeff Roberson KASSERT(TD_ON_RUNQ(td), 1415ad1e7d28SJulian Elischer ("sched_rem: thread not on run queue")); 1416b43179fbSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 14178f51ad55SJeff Roberson KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem", 14188f51ad55SJeff Roberson "prio:%d", td->td_priority, KTR_ATTR_LINKED, 14198f51ad55SJeff Roberson sched_tdname(curthread)); 1420b3e9e682SRyan Stone SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL); 1421b43179fbSJeff Roberson 14221b9d701fSAttilio Rao if ((td->td_flags & TDF_NOLOAD) == 0) 1423907bdbc2SJeff Roberson sched_load_rem(); 1424f200843bSJohn Baldwin #ifdef SMP 1425f200843bSJohn Baldwin if (ts->ts_runq != &runq) 1426f200843bSJohn Baldwin runq_length[ts->ts_runq - runq_pcpu]--; 1427f200843bSJohn Baldwin #endif 14289727e637SJeff Roberson runq_remove(ts->ts_runq, td); 1429f0393f06SJeff Roberson TD_SET_CAN_RUN(td); 1430b43179fbSJeff Roberson } 1431b43179fbSJeff Roberson 143214f0e2e9SJulian Elischer /* 14338aa3d7ffSJohn Baldwin * Select threads to run. Note that running threads still consume a 14348aa3d7ffSJohn Baldwin * slot. 143514f0e2e9SJulian Elischer */ 1436f0393f06SJeff Roberson struct thread * 1437b43179fbSJeff Roberson sched_choose(void) 1438b43179fbSJeff Roberson { 14399727e637SJeff Roberson struct thread *td; 1440e17c57b1SJeff Roberson struct runq *rq; 1441b43179fbSJeff Roberson 14427b20fb19SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 1443e17c57b1SJeff Roberson #ifdef SMP 14449727e637SJeff Roberson struct thread *tdcpu; 1445e17c57b1SJeff Roberson 1446e17c57b1SJeff Roberson rq = &runq; 14479727e637SJeff Roberson td = runq_choose_fuzz(&runq, runq_fuzz); 14489727e637SJeff Roberson tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]); 1449e17c57b1SJeff Roberson 14509727e637SJeff Roberson if (td == NULL || 14519727e637SJeff Roberson (tdcpu != NULL && 14529727e637SJeff Roberson tdcpu->td_priority < td->td_priority)) { 14539727e637SJeff Roberson CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu, 1454e17c57b1SJeff Roberson PCPU_GET(cpuid)); 14559727e637SJeff Roberson td = tdcpu; 1456e17c57b1SJeff Roberson rq = &runq_pcpu[PCPU_GET(cpuid)]; 1457e17c57b1SJeff Roberson } else { 14589727e637SJeff Roberson CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td); 1459e17c57b1SJeff Roberson } 1460e17c57b1SJeff Roberson 1461e17c57b1SJeff Roberson #else 1462e17c57b1SJeff Roberson rq = &runq; 14639727e637SJeff Roberson td = runq_choose(&runq); 1464e17c57b1SJeff Roberson #endif 1465b43179fbSJeff Roberson 14669727e637SJeff Roberson if (td) { 1467f200843bSJohn Baldwin #ifdef SMP 1468f200843bSJohn Baldwin if (td == tdcpu) 1469f200843bSJohn Baldwin runq_length[PCPU_GET(cpuid)]--; 1470f200843bSJohn Baldwin #endif 14719727e637SJeff Roberson runq_remove(rq, td); 14729727e637SJeff Roberson td->td_flags |= TDF_DIDRUN; 1473b43179fbSJeff Roberson 14749727e637SJeff Roberson KASSERT(td->td_flags & TDF_INMEM, 1475b61ce5b0SJeff Roberson ("sched_choose: thread swapped out")); 14769727e637SJeff Roberson return (td); 1477b43179fbSJeff Roberson } 1478f0393f06SJeff Roberson return (PCPU_GET(idlethread)); 1479b43179fbSJeff Roberson } 1480b43179fbSJeff Roberson 1481b43179fbSJeff Roberson void 14821e24c28fSJeff Roberson sched_preempt(struct thread *td) 14831e24c28fSJeff Roberson { 1484b3e9e682SRyan Stone 1485b3e9e682SRyan Stone SDT_PROBE2(sched, , , surrender, td, td->td_proc); 14861e24c28fSJeff Roberson thread_lock(td); 14871e24c28fSJeff Roberson if (td->td_critnest > 1) 14881e24c28fSJeff Roberson td->td_owepreempt = 1; 14891e24c28fSJeff Roberson else 14908df78c41SJeff Roberson mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, NULL); 14911e24c28fSJeff Roberson thread_unlock(td); 14921e24c28fSJeff Roberson } 14931e24c28fSJeff Roberson 14941e24c28fSJeff Roberson void 1495b43179fbSJeff Roberson sched_userret(struct thread *td) 1496b43179fbSJeff Roberson { 1497b43179fbSJeff Roberson /* 1498b43179fbSJeff Roberson * XXX we cheat slightly on the locking here to avoid locking in 1499b43179fbSJeff Roberson * the usual case. Setting td_priority here is essentially an 1500b43179fbSJeff Roberson * incomplete workaround for not setting it properly elsewhere. 1501b43179fbSJeff Roberson * Now that some interrupt handlers are threads, not setting it 1502b43179fbSJeff Roberson * properly elsewhere can clobber it in the window between setting 1503b43179fbSJeff Roberson * it here and returning to user mode, so don't waste time setting 1504b43179fbSJeff Roberson * it perfectly here. 1505b43179fbSJeff Roberson */ 1506f5c157d9SJohn Baldwin KASSERT((td->td_flags & TDF_BORROWING) == 0, 1507f5c157d9SJohn Baldwin ("thread with borrowed priority returning to userland")); 15088460a577SJohn Birrell if (td->td_priority != td->td_user_pri) { 15097b20fb19SJeff Roberson thread_lock(td); 15108460a577SJohn Birrell td->td_priority = td->td_user_pri; 15118460a577SJohn Birrell td->td_base_pri = td->td_user_pri; 15127b20fb19SJeff Roberson thread_unlock(td); 15138460a577SJohn Birrell } 1514b43179fbSJeff Roberson } 1515de028f5aSJeff Roberson 1516e17c57b1SJeff Roberson void 1517e17c57b1SJeff Roberson sched_bind(struct thread *td, int cpu) 1518e17c57b1SJeff Roberson { 1519ad1e7d28SJulian Elischer struct td_sched *ts; 1520e17c57b1SJeff Roberson 15211d7830edSJohn Baldwin THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED); 15221d7830edSJohn Baldwin KASSERT(td == curthread, ("sched_bind: can only bind curthread")); 1523e17c57b1SJeff Roberson 1524ad1e7d28SJulian Elischer ts = td->td_sched; 1525e17c57b1SJeff Roberson 15269727e637SJeff Roberson td->td_flags |= TDF_BOUND; 1527e17c57b1SJeff Roberson #ifdef SMP 1528ad1e7d28SJulian Elischer ts->ts_runq = &runq_pcpu[cpu]; 1529e17c57b1SJeff Roberson if (PCPU_GET(cpuid) == cpu) 1530e17c57b1SJeff Roberson return; 1531e17c57b1SJeff Roberson 1532bf0acc27SJohn Baldwin mi_switch(SW_VOL, NULL); 1533e17c57b1SJeff Roberson #endif 1534e17c57b1SJeff Roberson } 1535e17c57b1SJeff Roberson 1536e17c57b1SJeff Roberson void 1537e17c57b1SJeff Roberson sched_unbind(struct thread* td) 1538e17c57b1SJeff Roberson { 15397b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 15401d7830edSJohn Baldwin KASSERT(td == curthread, ("sched_unbind: can only bind curthread")); 15419727e637SJeff Roberson td->td_flags &= ~TDF_BOUND; 1542e17c57b1SJeff Roberson } 1543e17c57b1SJeff Roberson 1544de028f5aSJeff Roberson int 1545ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td) 1546ebccf1e3SJoseph Koshy { 15477b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 15489727e637SJeff Roberson return (td->td_flags & TDF_BOUND); 1549ebccf1e3SJoseph Koshy } 1550ebccf1e3SJoseph Koshy 155136ec198bSDavid Xu void 155236ec198bSDavid Xu sched_relinquish(struct thread *td) 155336ec198bSDavid Xu { 15547b20fb19SJeff Roberson thread_lock(td); 15558df78c41SJeff Roberson mi_switch(SW_VOL | SWT_RELINQUISH, NULL); 15567b20fb19SJeff Roberson thread_unlock(td); 155736ec198bSDavid Xu } 155836ec198bSDavid Xu 1559ebccf1e3SJoseph Koshy int 1560ca59f152SJeff Roberson sched_load(void) 1561ca59f152SJeff Roberson { 1562ca59f152SJeff Roberson return (sched_tdcnt); 1563ca59f152SJeff Roberson } 1564ca59f152SJeff Roberson 1565de028f5aSJeff Roberson int 1566de028f5aSJeff Roberson sched_sizeof_proc(void) 1567de028f5aSJeff Roberson { 1568de028f5aSJeff Roberson return (sizeof(struct proc)); 1569de028f5aSJeff Roberson } 157036ec198bSDavid Xu 1571de028f5aSJeff Roberson int 1572de028f5aSJeff Roberson sched_sizeof_thread(void) 1573de028f5aSJeff Roberson { 1574ad1e7d28SJulian Elischer return (sizeof(struct thread) + sizeof(struct td_sched)); 1575de028f5aSJeff Roberson } 157679acfc49SJeff Roberson 157779acfc49SJeff Roberson fixpt_t 15787cf90fb3SJeff Roberson sched_pctcpu(struct thread *td) 157979acfc49SJeff Roberson { 1580ad1e7d28SJulian Elischer struct td_sched *ts; 158155f2099aSJeff Roberson 15823da35a0aSJohn Baldwin THREAD_LOCK_ASSERT(td, MA_OWNED); 1583ad1e7d28SJulian Elischer ts = td->td_sched; 1584ad1e7d28SJulian Elischer return (ts->ts_pctcpu); 158579acfc49SJeff Roberson } 1586b41f1452SDavid Xu 158736af9869SEdward Tomasz Napierala #ifdef RACCT 158836af9869SEdward Tomasz Napierala /* 158936af9869SEdward Tomasz Napierala * Calculates the contribution to the thread cpu usage for the latest 159036af9869SEdward Tomasz Napierala * (unfinished) second. 159136af9869SEdward Tomasz Napierala */ 159236af9869SEdward Tomasz Napierala fixpt_t 159336af9869SEdward Tomasz Napierala sched_pctcpu_delta(struct thread *td) 159436af9869SEdward Tomasz Napierala { 159536af9869SEdward Tomasz Napierala struct td_sched *ts; 159636af9869SEdward Tomasz Napierala fixpt_t delta; 159736af9869SEdward Tomasz Napierala int realstathz; 159836af9869SEdward Tomasz Napierala 159936af9869SEdward Tomasz Napierala THREAD_LOCK_ASSERT(td, MA_OWNED); 160036af9869SEdward Tomasz Napierala ts = td->td_sched; 160136af9869SEdward Tomasz Napierala delta = 0; 160236af9869SEdward Tomasz Napierala realstathz = stathz ? stathz : hz; 160336af9869SEdward Tomasz Napierala if (ts->ts_cpticks != 0) { 160436af9869SEdward Tomasz Napierala #if (FSHIFT >= CCPU_SHIFT) 160536af9869SEdward Tomasz Napierala delta = (realstathz == 100) 160636af9869SEdward Tomasz Napierala ? ((fixpt_t) ts->ts_cpticks) << 160736af9869SEdward Tomasz Napierala (FSHIFT - CCPU_SHIFT) : 160836af9869SEdward Tomasz Napierala 100 * (((fixpt_t) ts->ts_cpticks) 160936af9869SEdward Tomasz Napierala << (FSHIFT - CCPU_SHIFT)) / realstathz; 161036af9869SEdward Tomasz Napierala #else 161136af9869SEdward Tomasz Napierala delta = ((FSCALE - ccpu) * 161236af9869SEdward Tomasz Napierala (ts->ts_cpticks * 161336af9869SEdward Tomasz Napierala FSCALE / realstathz)) >> FSHIFT; 161436af9869SEdward Tomasz Napierala #endif 161536af9869SEdward Tomasz Napierala } 161636af9869SEdward Tomasz Napierala 161736af9869SEdward Tomasz Napierala return (delta); 161836af9869SEdward Tomasz Napierala } 161936af9869SEdward Tomasz Napierala #endif 162036af9869SEdward Tomasz Napierala 1621b41f1452SDavid Xu void 1622a157e425SAlexander Motin sched_tick(int cnt) 1623b41f1452SDavid Xu { 1624b41f1452SDavid Xu } 1625f0393f06SJeff Roberson 1626f0393f06SJeff Roberson /* 1627f0393f06SJeff Roberson * The actual idle process. 1628f0393f06SJeff Roberson */ 1629f0393f06SJeff Roberson void 1630f0393f06SJeff Roberson sched_idletd(void *dummy) 1631f0393f06SJeff Roberson { 1632b722ad00SAlexander Motin struct pcpuidlestat *stat; 1633f0393f06SJeff Roberson 1634ba96d2d8SJohn Baldwin THREAD_NO_SLEEPING(); 1635b722ad00SAlexander Motin stat = DPCPU_PTR(idlestat); 1636f0393f06SJeff Roberson for (;;) { 1637f0393f06SJeff Roberson mtx_assert(&Giant, MA_NOTOWNED); 1638f0393f06SJeff Roberson 1639b722ad00SAlexander Motin while (sched_runnable() == 0) { 1640b722ad00SAlexander Motin cpu_idle(stat->idlecalls + stat->oldidlecalls > 64); 1641b722ad00SAlexander Motin stat->idlecalls++; 1642b722ad00SAlexander Motin } 1643f0393f06SJeff Roberson 1644f0393f06SJeff Roberson mtx_lock_spin(&sched_lock); 16458df78c41SJeff Roberson mi_switch(SW_VOL | SWT_IDLE, NULL); 1646f0393f06SJeff Roberson mtx_unlock_spin(&sched_lock); 1647f0393f06SJeff Roberson } 1648f0393f06SJeff Roberson } 1649f0393f06SJeff Roberson 16507b20fb19SJeff Roberson /* 16517b20fb19SJeff Roberson * A CPU is entering for the first time or a thread is exiting. 16527b20fb19SJeff Roberson */ 16537b20fb19SJeff Roberson void 16547b20fb19SJeff Roberson sched_throw(struct thread *td) 16557b20fb19SJeff Roberson { 16567b20fb19SJeff Roberson /* 16577b20fb19SJeff Roberson * Correct spinlock nesting. The idle thread context that we are 16587b20fb19SJeff Roberson * borrowing was created so that it would start out with a single 16597b20fb19SJeff Roberson * spin lock (sched_lock) held in fork_trampoline(). Since we've 16607b20fb19SJeff Roberson * explicitly acquired locks in this function, the nesting count 16617b20fb19SJeff Roberson * is now 2 rather than 1. Since we are nested, calling 16627b20fb19SJeff Roberson * spinlock_exit() will simply adjust the counts without allowing 16637b20fb19SJeff Roberson * spin lock using code to interrupt us. 16647b20fb19SJeff Roberson */ 16657b20fb19SJeff Roberson if (td == NULL) { 16667b20fb19SJeff Roberson mtx_lock_spin(&sched_lock); 16677b20fb19SJeff Roberson spinlock_exit(); 16687e3a96eaSJohn Baldwin PCPU_SET(switchtime, cpu_ticks()); 16697e3a96eaSJohn Baldwin PCPU_SET(switchticks, ticks); 16707b20fb19SJeff Roberson } else { 1671eea4f254SJeff Roberson lock_profile_release_lock(&sched_lock.lock_object); 16727b20fb19SJeff Roberson MPASS(td->td_lock == &sched_lock); 16737b20fb19SJeff Roberson } 16747b20fb19SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 16757b20fb19SJeff Roberson KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 16767b20fb19SJeff Roberson cpu_throw(td, choosethread()); /* doesn't return */ 16777b20fb19SJeff Roberson } 16787b20fb19SJeff Roberson 16797b20fb19SJeff Roberson void 1680fe54587fSJeff Roberson sched_fork_exit(struct thread *td) 16817b20fb19SJeff Roberson { 16827b20fb19SJeff Roberson 16837b20fb19SJeff Roberson /* 16847b20fb19SJeff Roberson * Finish setting up thread glue so that it begins execution in a 16857b20fb19SJeff Roberson * non-nested critical section with sched_lock held but not recursed. 16867b20fb19SJeff Roberson */ 1687fe54587fSJeff Roberson td->td_oncpu = PCPU_GET(cpuid); 1688fe54587fSJeff Roberson sched_lock.mtx_lock = (uintptr_t)td; 1689eea4f254SJeff Roberson lock_profile_obtain_lock_success(&sched_lock.lock_object, 1690eea4f254SJeff Roberson 0, 0, __FILE__, __LINE__); 1691fe54587fSJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); 16927b20fb19SJeff Roberson } 16937b20fb19SJeff Roberson 16948f51ad55SJeff Roberson char * 16958f51ad55SJeff Roberson sched_tdname(struct thread *td) 16968f51ad55SJeff Roberson { 16978f51ad55SJeff Roberson #ifdef KTR 16988f51ad55SJeff Roberson struct td_sched *ts; 16998f51ad55SJeff Roberson 17008f51ad55SJeff Roberson ts = td->td_sched; 17018f51ad55SJeff Roberson if (ts->ts_name[0] == '\0') 17028f51ad55SJeff Roberson snprintf(ts->ts_name, sizeof(ts->ts_name), 17038f51ad55SJeff Roberson "%s tid %d", td->td_name, td->td_tid); 17048f51ad55SJeff Roberson return (ts->ts_name); 17058f51ad55SJeff Roberson #else 17068f51ad55SJeff Roberson return (td->td_name); 17078f51ad55SJeff Roberson #endif 17088f51ad55SJeff Roberson } 17098f51ad55SJeff Roberson 171044ad5475SJohn Baldwin #ifdef KTR 171144ad5475SJohn Baldwin void 171244ad5475SJohn Baldwin sched_clear_tdname(struct thread *td) 171344ad5475SJohn Baldwin { 171444ad5475SJohn Baldwin struct td_sched *ts; 171544ad5475SJohn Baldwin 171644ad5475SJohn Baldwin ts = td->td_sched; 171744ad5475SJohn Baldwin ts->ts_name[0] = '\0'; 171844ad5475SJohn Baldwin } 171944ad5475SJohn Baldwin #endif 172044ad5475SJohn Baldwin 1721885d51a3SJeff Roberson void 1722885d51a3SJeff Roberson sched_affinity(struct thread *td) 1723885d51a3SJeff Roberson { 1724f200843bSJohn Baldwin #ifdef SMP 1725f200843bSJohn Baldwin struct td_sched *ts; 1726f200843bSJohn Baldwin int cpu; 1727f200843bSJohn Baldwin 1728f200843bSJohn Baldwin THREAD_LOCK_ASSERT(td, MA_OWNED); 1729f200843bSJohn Baldwin 1730f200843bSJohn Baldwin /* 1731f200843bSJohn Baldwin * Set the TSF_AFFINITY flag if there is at least one CPU this 1732f200843bSJohn Baldwin * thread can't run on. 1733f200843bSJohn Baldwin */ 1734f200843bSJohn Baldwin ts = td->td_sched; 1735f200843bSJohn Baldwin ts->ts_flags &= ~TSF_AFFINITY; 17363aa6d94eSJohn Baldwin CPU_FOREACH(cpu) { 1737f200843bSJohn Baldwin if (!THREAD_CAN_SCHED(td, cpu)) { 1738f200843bSJohn Baldwin ts->ts_flags |= TSF_AFFINITY; 1739f200843bSJohn Baldwin break; 1740f200843bSJohn Baldwin } 1741f200843bSJohn Baldwin } 1742f200843bSJohn Baldwin 1743f200843bSJohn Baldwin /* 1744f200843bSJohn Baldwin * If this thread can run on all CPUs, nothing else to do. 1745f200843bSJohn Baldwin */ 1746f200843bSJohn Baldwin if (!(ts->ts_flags & TSF_AFFINITY)) 1747f200843bSJohn Baldwin return; 1748f200843bSJohn Baldwin 1749f200843bSJohn Baldwin /* Pinned threads and bound threads should be left alone. */ 1750f200843bSJohn Baldwin if (td->td_pinned != 0 || td->td_flags & TDF_BOUND) 1751f200843bSJohn Baldwin return; 1752f200843bSJohn Baldwin 1753f200843bSJohn Baldwin switch (td->td_state) { 1754f200843bSJohn Baldwin case TDS_RUNQ: 1755f200843bSJohn Baldwin /* 1756f200843bSJohn Baldwin * If we are on a per-CPU runqueue that is in the set, 1757f200843bSJohn Baldwin * then nothing needs to be done. 1758f200843bSJohn Baldwin */ 1759f200843bSJohn Baldwin if (ts->ts_runq != &runq && 1760f200843bSJohn Baldwin THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu)) 1761f200843bSJohn Baldwin return; 1762f200843bSJohn Baldwin 1763f200843bSJohn Baldwin /* Put this thread on a valid per-CPU runqueue. */ 1764f200843bSJohn Baldwin sched_rem(td); 1765f200843bSJohn Baldwin sched_add(td, SRQ_BORING); 1766f200843bSJohn Baldwin break; 1767f200843bSJohn Baldwin case TDS_RUNNING: 1768f200843bSJohn Baldwin /* 1769f200843bSJohn Baldwin * See if our current CPU is in the set. If not, force a 1770f200843bSJohn Baldwin * context switch. 1771f200843bSJohn Baldwin */ 1772f200843bSJohn Baldwin if (THREAD_CAN_SCHED(td, td->td_oncpu)) 1773f200843bSJohn Baldwin return; 1774f200843bSJohn Baldwin 1775f200843bSJohn Baldwin td->td_flags |= TDF_NEEDRESCHED; 1776f200843bSJohn Baldwin if (td != curthread) 1777d9d8d144SJohn Baldwin ipi_cpu(cpu, IPI_AST); 1778f200843bSJohn Baldwin break; 1779f200843bSJohn Baldwin default: 1780f200843bSJohn Baldwin break; 1781f200843bSJohn Baldwin } 1782f200843bSJohn Baldwin #endif 1783885d51a3SJeff Roberson } 1784