1b43179fbSJeff Roberson /*- 2b43179fbSJeff Roberson * Copyright (c) 1982, 1986, 1990, 1991, 1993 3b43179fbSJeff Roberson * The Regents of the University of California. All rights reserved. 4b43179fbSJeff Roberson * (c) UNIX System Laboratories, Inc. 5b43179fbSJeff Roberson * All or some portions of this file are derived from material licensed 6b43179fbSJeff Roberson * to the University of California by American Telephone and Telegraph 7b43179fbSJeff Roberson * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8b43179fbSJeff Roberson * the permission of UNIX System Laboratories, Inc. 9b43179fbSJeff Roberson * 10b43179fbSJeff Roberson * Redistribution and use in source and binary forms, with or without 11b43179fbSJeff Roberson * modification, are permitted provided that the following conditions 12b43179fbSJeff Roberson * are met: 13b43179fbSJeff Roberson * 1. Redistributions of source code must retain the above copyright 14b43179fbSJeff Roberson * notice, this list of conditions and the following disclaimer. 15b43179fbSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 16b43179fbSJeff Roberson * notice, this list of conditions and the following disclaimer in the 17b43179fbSJeff Roberson * documentation and/or other materials provided with the distribution. 18b43179fbSJeff Roberson * 4. Neither the name of the University nor the names of its contributors 19b43179fbSJeff Roberson * may be used to endorse or promote products derived from this software 20b43179fbSJeff Roberson * without specific prior written permission. 21b43179fbSJeff Roberson * 22b43179fbSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23b43179fbSJeff Roberson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24b43179fbSJeff Roberson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25b43179fbSJeff Roberson * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26b43179fbSJeff Roberson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27b43179fbSJeff Roberson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28b43179fbSJeff Roberson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29b43179fbSJeff Roberson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30b43179fbSJeff Roberson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31b43179fbSJeff Roberson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32b43179fbSJeff Roberson * SUCH DAMAGE. 33b43179fbSJeff Roberson */ 34b43179fbSJeff Roberson 35677b542eSDavid E. O'Brien #include <sys/cdefs.h> 36677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 37677b542eSDavid E. O'Brien 384da0d332SPeter Wemm #include "opt_hwpmc_hooks.h" 39a564bfc7SJeff Roberson #include "opt_sched.h" 406f5f25e5SJohn Birrell #include "opt_kdtrace.h" 414da0d332SPeter Wemm 42b43179fbSJeff Roberson #include <sys/param.h> 43b43179fbSJeff Roberson #include <sys/systm.h> 44f5a3ef99SMarcel Moolenaar #include <sys/cpuset.h> 45b43179fbSJeff Roberson #include <sys/kernel.h> 46b43179fbSJeff Roberson #include <sys/ktr.h> 47b43179fbSJeff Roberson #include <sys/lock.h> 48c55bbb6cSJohn Baldwin #include <sys/kthread.h> 49b43179fbSJeff Roberson #include <sys/mutex.h> 50b43179fbSJeff Roberson #include <sys/proc.h> 51b43179fbSJeff Roberson #include <sys/resourcevar.h> 52b43179fbSJeff Roberson #include <sys/sched.h> 53b43179fbSJeff Roberson #include <sys/smp.h> 54b43179fbSJeff Roberson #include <sys/sysctl.h> 55b43179fbSJeff Roberson #include <sys/sx.h> 56f5c157d9SJohn Baldwin #include <sys/turnstile.h> 573db720fdSDavid Xu #include <sys/umtx.h> 582e4db89cSDavid E. O'Brien #include <machine/pcb.h> 59293968d8SJulian Elischer #include <machine/smp.h> 60b43179fbSJeff Roberson 61ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 62ebccf1e3SJoseph Koshy #include <sys/pmckern.h> 63ebccf1e3SJoseph Koshy #endif 64ebccf1e3SJoseph Koshy 656f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS 666f5f25e5SJohn Birrell #include <sys/dtrace_bsd.h> 676f5f25e5SJohn Birrell int dtrace_vtime_active; 686f5f25e5SJohn Birrell dtrace_vtime_switch_func_t dtrace_vtime_switch_func; 696f5f25e5SJohn Birrell #endif 706f5f25e5SJohn Birrell 7106439a04SJeff Roberson /* 7206439a04SJeff Roberson * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in 7306439a04SJeff Roberson * the range 100-256 Hz (approximately). 7406439a04SJeff Roberson */ 7506439a04SJeff Roberson #define ESTCPULIM(e) \ 7606439a04SJeff Roberson min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \ 7706439a04SJeff Roberson RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1) 78b698380fSBruce Evans #ifdef SMP 79b698380fSBruce Evans #define INVERSE_ESTCPU_WEIGHT (8 * smp_cpus) 80b698380fSBruce Evans #else 8106439a04SJeff Roberson #define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */ 82b698380fSBruce Evans #endif 8306439a04SJeff Roberson #define NICE_WEIGHT 1 /* Priorities per nice level. */ 8406439a04SJeff Roberson 858460a577SJohn Birrell /* 868460a577SJohn Birrell * The schedulable entity that runs a context. 87ad1e7d28SJulian Elischer * This is an extension to the thread structure and is tailored to 88ad1e7d28SJulian Elischer * the requirements of this scheduler 898460a577SJohn Birrell */ 90ad1e7d28SJulian Elischer struct td_sched { 91ad1e7d28SJulian Elischer fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */ 92ad1e7d28SJulian Elischer int ts_cpticks; /* (j) Ticks of cpu time. */ 9354b0e65fSJeff Roberson int ts_slptime; /* (j) Seconds !RUNNING. */ 94ad1e7d28SJulian Elischer struct runq *ts_runq; /* runq the thread is currently on */ 95bcb06d59SJeff Roberson }; 96ed062c8dSJulian Elischer 97ed062c8dSJulian Elischer /* flags kept in td_flags */ 98ad1e7d28SJulian Elischer #define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */ 999727e637SJeff Roberson #define TDF_BOUND TDF_SCHED1 /* Bound to one CPU. */ 100bcb06d59SJeff Roberson 101ad1e7d28SJulian Elischer #define SKE_RUNQ_PCPU(ts) \ 102ad1e7d28SJulian Elischer ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq) 103e17c57b1SJeff Roberson 104ad1e7d28SJulian Elischer static struct td_sched td_sched0; 1056ea38de8SJeff Roberson struct mtx sched_lock; 106b43179fbSJeff Roberson 107ca59f152SJeff Roberson static int sched_tdcnt; /* Total runnable threads in the system. */ 108b43179fbSJeff Roberson static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 1094974b53eSMaxime Henrion #define SCHED_QUANTUM (hz / 10) /* Default sched quantum */ 110b43179fbSJeff Roberson 111e17c57b1SJeff Roberson static void setup_runqs(void); 112c55bbb6cSJohn Baldwin static void schedcpu(void); 113e17c57b1SJeff Roberson static void schedcpu_thread(void); 114f5c157d9SJohn Baldwin static void sched_priority(struct thread *td, u_char prio); 115b43179fbSJeff Roberson static void sched_setup(void *dummy); 116b43179fbSJeff Roberson static void maybe_resched(struct thread *td); 1178460a577SJohn Birrell static void updatepri(struct thread *td); 1188460a577SJohn Birrell static void resetpriority(struct thread *td); 1198460a577SJohn Birrell static void resetpriority_thread(struct thread *td); 12000b0483dSJulian Elischer #ifdef SMP 12182a1dfc1SJulian Elischer static int forward_wakeup(int cpunum); 12200b0483dSJulian Elischer #endif 123b43179fbSJeff Roberson 124e17c57b1SJeff Roberson static struct kproc_desc sched_kp = { 125e17c57b1SJeff Roberson "schedcpu", 126e17c57b1SJeff Roberson schedcpu_thread, 127e17c57b1SJeff Roberson NULL 128e17c57b1SJeff Roberson }; 129237fdd78SRobert Watson SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, 130237fdd78SRobert Watson &sched_kp); 131237fdd78SRobert Watson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL); 132b43179fbSJeff Roberson 133b43179fbSJeff Roberson /* 134b43179fbSJeff Roberson * Global run queue. 135b43179fbSJeff Roberson */ 136b43179fbSJeff Roberson static struct runq runq; 137e17c57b1SJeff Roberson 138e17c57b1SJeff Roberson #ifdef SMP 139e17c57b1SJeff Roberson /* 140e17c57b1SJeff Roberson * Per-CPU run queues 141e17c57b1SJeff Roberson */ 142e17c57b1SJeff Roberson static struct runq runq_pcpu[MAXCPU]; 143e17c57b1SJeff Roberson #endif 144e17c57b1SJeff Roberson 145e17c57b1SJeff Roberson static void 146e17c57b1SJeff Roberson setup_runqs(void) 147e17c57b1SJeff Roberson { 148e17c57b1SJeff Roberson #ifdef SMP 149e17c57b1SJeff Roberson int i; 150e17c57b1SJeff Roberson 151e17c57b1SJeff Roberson for (i = 0; i < MAXCPU; ++i) 152e17c57b1SJeff Roberson runq_init(&runq_pcpu[i]); 153e17c57b1SJeff Roberson #endif 154e17c57b1SJeff Roberson 155e17c57b1SJeff Roberson runq_init(&runq); 156e17c57b1SJeff Roberson } 157b43179fbSJeff Roberson 158b43179fbSJeff Roberson static int 159b43179fbSJeff Roberson sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 160b43179fbSJeff Roberson { 161b43179fbSJeff Roberson int error, new_val; 162b43179fbSJeff Roberson 163b43179fbSJeff Roberson new_val = sched_quantum * tick; 164b43179fbSJeff Roberson error = sysctl_handle_int(oidp, &new_val, 0, req); 165b43179fbSJeff Roberson if (error != 0 || req->newptr == NULL) 166b43179fbSJeff Roberson return (error); 167b43179fbSJeff Roberson if (new_val < tick) 168b43179fbSJeff Roberson return (EINVAL); 169b43179fbSJeff Roberson sched_quantum = new_val / tick; 170b43179fbSJeff Roberson hogticks = 2 * sched_quantum; 171b43179fbSJeff Roberson return (0); 172b43179fbSJeff Roberson } 173b43179fbSJeff Roberson 174e038d354SScott Long SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler"); 175dc095794SScott Long 176e038d354SScott Long SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0, 177e038d354SScott Long "Scheduler name"); 178dc095794SScott Long 179dc095794SScott Long SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW, 180b43179fbSJeff Roberson 0, sizeof sched_quantum, sysctl_kern_quantum, "I", 181b43179fbSJeff Roberson "Roundrobin scheduling quantum in microseconds"); 182b43179fbSJeff Roberson 18337c28a02SJulian Elischer #ifdef SMP 18482a1dfc1SJulian Elischer /* Enable forwarding of wakeups to all other cpus */ 18582a1dfc1SJulian Elischer SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP"); 18682a1dfc1SJulian Elischer 187a90f3f25SJeff Roberson static int runq_fuzz = 1; 188a90f3f25SJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, ""); 189a90f3f25SJeff Roberson 190bce73aedSJulian Elischer static int forward_wakeup_enabled = 1; 19182a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW, 19282a1dfc1SJulian Elischer &forward_wakeup_enabled, 0, 19382a1dfc1SJulian Elischer "Forwarding of wakeup to idle CPUs"); 19482a1dfc1SJulian Elischer 19582a1dfc1SJulian Elischer static int forward_wakeups_requested = 0; 19682a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD, 19782a1dfc1SJulian Elischer &forward_wakeups_requested, 0, 19882a1dfc1SJulian Elischer "Requests for Forwarding of wakeup to idle CPUs"); 19982a1dfc1SJulian Elischer 20082a1dfc1SJulian Elischer static int forward_wakeups_delivered = 0; 20182a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD, 20282a1dfc1SJulian Elischer &forward_wakeups_delivered, 0, 20382a1dfc1SJulian Elischer "Completed Forwarding of wakeup to idle CPUs"); 20482a1dfc1SJulian Elischer 205bce73aedSJulian Elischer static int forward_wakeup_use_mask = 1; 20682a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW, 20782a1dfc1SJulian Elischer &forward_wakeup_use_mask, 0, 20882a1dfc1SJulian Elischer "Use the mask of idle cpus"); 20982a1dfc1SJulian Elischer 21082a1dfc1SJulian Elischer static int forward_wakeup_use_loop = 0; 21182a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW, 21282a1dfc1SJulian Elischer &forward_wakeup_use_loop, 0, 21382a1dfc1SJulian Elischer "Use a loop to find idle cpus"); 21482a1dfc1SJulian Elischer 21582a1dfc1SJulian Elischer static int forward_wakeup_use_single = 0; 21682a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW, 21782a1dfc1SJulian Elischer &forward_wakeup_use_single, 0, 21882a1dfc1SJulian Elischer "Only signal one idle cpu"); 21982a1dfc1SJulian Elischer 22082a1dfc1SJulian Elischer static int forward_wakeup_use_htt = 0; 22182a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW, 22282a1dfc1SJulian Elischer &forward_wakeup_use_htt, 0, 22382a1dfc1SJulian Elischer "account for htt"); 2243389af30SJulian Elischer 22537c28a02SJulian Elischer #endif 226ad1e7d28SJulian Elischer #if 0 2273389af30SJulian Elischer static int sched_followon = 0; 2283389af30SJulian Elischer SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW, 2293389af30SJulian Elischer &sched_followon, 0, 2303389af30SJulian Elischer "allow threads to share a quantum"); 2318460a577SJohn Birrell #endif 23282a1dfc1SJulian Elischer 233907bdbc2SJeff Roberson static __inline void 234907bdbc2SJeff Roberson sched_load_add(void) 235907bdbc2SJeff Roberson { 236907bdbc2SJeff Roberson sched_tdcnt++; 237907bdbc2SJeff Roberson CTR1(KTR_SCHED, "global load: %d", sched_tdcnt); 238907bdbc2SJeff Roberson } 239907bdbc2SJeff Roberson 240907bdbc2SJeff Roberson static __inline void 241907bdbc2SJeff Roberson sched_load_rem(void) 242907bdbc2SJeff Roberson { 243907bdbc2SJeff Roberson sched_tdcnt--; 244907bdbc2SJeff Roberson CTR1(KTR_SCHED, "global load: %d", sched_tdcnt); 245907bdbc2SJeff Roberson } 246b43179fbSJeff Roberson /* 247b43179fbSJeff Roberson * Arrange to reschedule if necessary, taking the priorities and 248b43179fbSJeff Roberson * schedulers into account. 249b43179fbSJeff Roberson */ 250b43179fbSJeff Roberson static void 251b43179fbSJeff Roberson maybe_resched(struct thread *td) 252b43179fbSJeff Roberson { 253b43179fbSJeff Roberson 2547b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 255ed062c8dSJulian Elischer if (td->td_priority < curthread->td_priority) 2564a338afdSJulian Elischer curthread->td_flags |= TDF_NEEDRESCHED; 257b43179fbSJeff Roberson } 258b43179fbSJeff Roberson 259b43179fbSJeff Roberson /* 260a90f3f25SJeff Roberson * This function is called when a thread is about to be put on run queue 261a90f3f25SJeff Roberson * because it has been made runnable or its priority has been adjusted. It 262a90f3f25SJeff Roberson * determines if the new thread should be immediately preempted to. If so, 263a90f3f25SJeff Roberson * it switches to it and eventually returns true. If not, it returns false 264a90f3f25SJeff Roberson * so that the caller may place the thread on an appropriate run queue. 265a90f3f25SJeff Roberson */ 266a90f3f25SJeff Roberson int 267a90f3f25SJeff Roberson maybe_preempt(struct thread *td) 268a90f3f25SJeff Roberson { 269a90f3f25SJeff Roberson #ifdef PREEMPTION 270a90f3f25SJeff Roberson struct thread *ctd; 271a90f3f25SJeff Roberson int cpri, pri; 272a90f3f25SJeff Roberson #endif 273a90f3f25SJeff Roberson 274a90f3f25SJeff Roberson #ifdef PREEMPTION 275a90f3f25SJeff Roberson /* 276a90f3f25SJeff Roberson * The new thread should not preempt the current thread if any of the 277a90f3f25SJeff Roberson * following conditions are true: 278a90f3f25SJeff Roberson * 279a90f3f25SJeff Roberson * - The kernel is in the throes of crashing (panicstr). 280a90f3f25SJeff Roberson * - The current thread has a higher (numerically lower) or 281a90f3f25SJeff Roberson * equivalent priority. Note that this prevents curthread from 282a90f3f25SJeff Roberson * trying to preempt to itself. 283a90f3f25SJeff Roberson * - It is too early in the boot for context switches (cold is set). 284a90f3f25SJeff Roberson * - The current thread has an inhibitor set or is in the process of 285a90f3f25SJeff Roberson * exiting. In this case, the current thread is about to switch 286a90f3f25SJeff Roberson * out anyways, so there's no point in preempting. If we did, 287a90f3f25SJeff Roberson * the current thread would not be properly resumed as well, so 288a90f3f25SJeff Roberson * just avoid that whole landmine. 289a90f3f25SJeff Roberson * - If the new thread's priority is not a realtime priority and 290a90f3f25SJeff Roberson * the current thread's priority is not an idle priority and 291a90f3f25SJeff Roberson * FULL_PREEMPTION is disabled. 292a90f3f25SJeff Roberson * 293a90f3f25SJeff Roberson * If all of these conditions are false, but the current thread is in 294a90f3f25SJeff Roberson * a nested critical section, then we have to defer the preemption 295a90f3f25SJeff Roberson * until we exit the critical section. Otherwise, switch immediately 296a90f3f25SJeff Roberson * to the new thread. 297a90f3f25SJeff Roberson */ 298a90f3f25SJeff Roberson ctd = curthread; 299a90f3f25SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 300a90f3f25SJeff Roberson KASSERT((td->td_inhibitors == 0), 301a90f3f25SJeff Roberson ("maybe_preempt: trying to run inhibited thread")); 302a90f3f25SJeff Roberson pri = td->td_priority; 303a90f3f25SJeff Roberson cpri = ctd->td_priority; 304a90f3f25SJeff Roberson if (panicstr != NULL || pri >= cpri || cold /* || dumping */ || 305a90f3f25SJeff Roberson TD_IS_INHIBITED(ctd)) 306a90f3f25SJeff Roberson return (0); 307a90f3f25SJeff Roberson #ifndef FULL_PREEMPTION 308a90f3f25SJeff Roberson if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE) 309a90f3f25SJeff Roberson return (0); 310a90f3f25SJeff Roberson #endif 311a90f3f25SJeff Roberson 312a90f3f25SJeff Roberson if (ctd->td_critnest > 1) { 313a90f3f25SJeff Roberson CTR1(KTR_PROC, "maybe_preempt: in critical section %d", 314a90f3f25SJeff Roberson ctd->td_critnest); 315a90f3f25SJeff Roberson ctd->td_owepreempt = 1; 316a90f3f25SJeff Roberson return (0); 317a90f3f25SJeff Roberson } 318a90f3f25SJeff Roberson /* 319a90f3f25SJeff Roberson * Thread is runnable but not yet put on system run queue. 320a90f3f25SJeff Roberson */ 321a90f3f25SJeff Roberson MPASS(ctd->td_lock == td->td_lock); 322a90f3f25SJeff Roberson MPASS(TD_ON_RUNQ(td)); 323a90f3f25SJeff Roberson TD_SET_RUNNING(td); 324a90f3f25SJeff Roberson CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, 325a90f3f25SJeff Roberson td->td_proc->p_pid, td->td_name); 3268df78c41SJeff Roberson mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, td); 327a90f3f25SJeff Roberson /* 328a90f3f25SJeff Roberson * td's lock pointer may have changed. We have to return with it 329a90f3f25SJeff Roberson * locked. 330a90f3f25SJeff Roberson */ 331a90f3f25SJeff Roberson spinlock_enter(); 332a90f3f25SJeff Roberson thread_unlock(ctd); 333a90f3f25SJeff Roberson thread_lock(td); 334a90f3f25SJeff Roberson spinlock_exit(); 335a90f3f25SJeff Roberson return (1); 336a90f3f25SJeff Roberson #else 337a90f3f25SJeff Roberson return (0); 338a90f3f25SJeff Roberson #endif 339a90f3f25SJeff Roberson } 340a90f3f25SJeff Roberson 341a90f3f25SJeff Roberson /* 342b43179fbSJeff Roberson * Constants for digital decay and forget: 3438460a577SJohn Birrell * 90% of (td_estcpu) usage in 5 * loadav time 344ad1e7d28SJulian Elischer * 95% of (ts_pctcpu) usage in 60 seconds (load insensitive) 345b43179fbSJeff Roberson * Note that, as ps(1) mentions, this can let percentages 346b43179fbSJeff Roberson * total over 100% (I've seen 137.9% for 3 processes). 347b43179fbSJeff Roberson * 3488460a577SJohn Birrell * Note that schedclock() updates td_estcpu and p_cpticks asynchronously. 349b43179fbSJeff Roberson * 3508460a577SJohn Birrell * We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds. 351b43179fbSJeff Roberson * That is, the system wants to compute a value of decay such 352b43179fbSJeff Roberson * that the following for loop: 353b43179fbSJeff Roberson * for (i = 0; i < (5 * loadavg); i++) 3548460a577SJohn Birrell * td_estcpu *= decay; 355b43179fbSJeff Roberson * will compute 3568460a577SJohn Birrell * td_estcpu *= 0.1; 357b43179fbSJeff Roberson * for all values of loadavg: 358b43179fbSJeff Roberson * 359b43179fbSJeff Roberson * Mathematically this loop can be expressed by saying: 360b43179fbSJeff Roberson * decay ** (5 * loadavg) ~= .1 361b43179fbSJeff Roberson * 362b43179fbSJeff Roberson * The system computes decay as: 363b43179fbSJeff Roberson * decay = (2 * loadavg) / (2 * loadavg + 1) 364b43179fbSJeff Roberson * 365b43179fbSJeff Roberson * We wish to prove that the system's computation of decay 366b43179fbSJeff Roberson * will always fulfill the equation: 367b43179fbSJeff Roberson * decay ** (5 * loadavg) ~= .1 368b43179fbSJeff Roberson * 369b43179fbSJeff Roberson * If we compute b as: 370b43179fbSJeff Roberson * b = 2 * loadavg 371b43179fbSJeff Roberson * then 372b43179fbSJeff Roberson * decay = b / (b + 1) 373b43179fbSJeff Roberson * 374b43179fbSJeff Roberson * We now need to prove two things: 375b43179fbSJeff Roberson * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 376b43179fbSJeff Roberson * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 377b43179fbSJeff Roberson * 378b43179fbSJeff Roberson * Facts: 379b43179fbSJeff Roberson * For x close to zero, exp(x) =~ 1 + x, since 380b43179fbSJeff Roberson * exp(x) = 0! + x**1/1! + x**2/2! + ... . 381b43179fbSJeff Roberson * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 382b43179fbSJeff Roberson * For x close to zero, ln(1+x) =~ x, since 383b43179fbSJeff Roberson * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 384b43179fbSJeff Roberson * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 385b43179fbSJeff Roberson * ln(.1) =~ -2.30 386b43179fbSJeff Roberson * 387b43179fbSJeff Roberson * Proof of (1): 388b43179fbSJeff Roberson * Solve (factor)**(power) =~ .1 given power (5*loadav): 389b43179fbSJeff Roberson * solving for factor, 390b43179fbSJeff Roberson * ln(factor) =~ (-2.30/5*loadav), or 391b43179fbSJeff Roberson * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 392b43179fbSJeff Roberson * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 393b43179fbSJeff Roberson * 394b43179fbSJeff Roberson * Proof of (2): 395b43179fbSJeff Roberson * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 396b43179fbSJeff Roberson * solving for power, 397b43179fbSJeff Roberson * power*ln(b/(b+1)) =~ -2.30, or 398b43179fbSJeff Roberson * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 399b43179fbSJeff Roberson * 400b43179fbSJeff Roberson * Actual power values for the implemented algorithm are as follows: 401b43179fbSJeff Roberson * loadav: 1 2 3 4 402b43179fbSJeff Roberson * power: 5.68 10.32 14.94 19.55 403b43179fbSJeff Roberson */ 404b43179fbSJeff Roberson 405b43179fbSJeff Roberson /* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 406b43179fbSJeff Roberson #define loadfactor(loadav) (2 * (loadav)) 407b43179fbSJeff Roberson #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 408b43179fbSJeff Roberson 409ad1e7d28SJulian Elischer /* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 410b43179fbSJeff Roberson static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 4115c06d111SJohn-Mark Gurney SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 412b43179fbSJeff Roberson 413b43179fbSJeff Roberson /* 414b43179fbSJeff Roberson * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 415b43179fbSJeff Roberson * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 416b43179fbSJeff Roberson * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 417b43179fbSJeff Roberson * 418b43179fbSJeff Roberson * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 419b43179fbSJeff Roberson * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 420b43179fbSJeff Roberson * 421b43179fbSJeff Roberson * If you don't want to bother with the faster/more-accurate formula, you 422b43179fbSJeff Roberson * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 423b43179fbSJeff Roberson * (more general) method of calculating the %age of CPU used by a process. 424b43179fbSJeff Roberson */ 425b43179fbSJeff Roberson #define CCPU_SHIFT 11 426b43179fbSJeff Roberson 427b43179fbSJeff Roberson /* 428b43179fbSJeff Roberson * Recompute process priorities, every hz ticks. 429b43179fbSJeff Roberson * MP-safe, called without the Giant mutex. 430b43179fbSJeff Roberson */ 431b43179fbSJeff Roberson /* ARGSUSED */ 432b43179fbSJeff Roberson static void 433c55bbb6cSJohn Baldwin schedcpu(void) 434b43179fbSJeff Roberson { 435b43179fbSJeff Roberson register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 436b43179fbSJeff Roberson struct thread *td; 437b43179fbSJeff Roberson struct proc *p; 438ad1e7d28SJulian Elischer struct td_sched *ts; 43970fca427SJohn Baldwin int awake, realstathz; 440b43179fbSJeff Roberson 441b43179fbSJeff Roberson realstathz = stathz ? stathz : hz; 442b43179fbSJeff Roberson sx_slock(&allproc_lock); 443b43179fbSJeff Roberson FOREACH_PROC_IN_SYSTEM(p) { 444374ae2a3SJeff Roberson PROC_LOCK(p); 4458460a577SJohn Birrell FOREACH_THREAD_IN_PROC(p, td) { 446b43179fbSJeff Roberson awake = 0; 4477b20fb19SJeff Roberson thread_lock(td); 448ad1e7d28SJulian Elischer ts = td->td_sched; 449b43179fbSJeff Roberson /* 45070fca427SJohn Baldwin * Increment sleep time (if sleeping). We 45170fca427SJohn Baldwin * ignore overflow, as above. 452b43179fbSJeff Roberson */ 453b43179fbSJeff Roberson /* 454ad1e7d28SJulian Elischer * The td_sched slptimes are not touched in wakeup 455ad1e7d28SJulian Elischer * because the thread may not HAVE everything in 456ad1e7d28SJulian Elischer * memory? XXX I think this is out of date. 457b43179fbSJeff Roberson */ 458f0393f06SJeff Roberson if (TD_ON_RUNQ(td)) { 459b43179fbSJeff Roberson awake = 1; 4609727e637SJeff Roberson td->td_flags &= ~TDF_DIDRUN; 461f0393f06SJeff Roberson } else if (TD_IS_RUNNING(td)) { 462b43179fbSJeff Roberson awake = 1; 4639727e637SJeff Roberson /* Do not clear TDF_DIDRUN */ 4649727e637SJeff Roberson } else if (td->td_flags & TDF_DIDRUN) { 465b43179fbSJeff Roberson awake = 1; 4669727e637SJeff Roberson td->td_flags &= ~TDF_DIDRUN; 467b43179fbSJeff Roberson } 468b43179fbSJeff Roberson 469b43179fbSJeff Roberson /* 470ad1e7d28SJulian Elischer * ts_pctcpu is only for ps and ttyinfo(). 471b43179fbSJeff Roberson */ 472ad1e7d28SJulian Elischer ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT; 473b43179fbSJeff Roberson /* 474ad1e7d28SJulian Elischer * If the td_sched has been idle the entire second, 475b43179fbSJeff Roberson * stop recalculating its priority until 476b43179fbSJeff Roberson * it wakes up. 477b43179fbSJeff Roberson */ 478ad1e7d28SJulian Elischer if (ts->ts_cpticks != 0) { 479b43179fbSJeff Roberson #if (FSHIFT >= CCPU_SHIFT) 480ad1e7d28SJulian Elischer ts->ts_pctcpu += (realstathz == 100) 481ad1e7d28SJulian Elischer ? ((fixpt_t) ts->ts_cpticks) << 482b43179fbSJeff Roberson (FSHIFT - CCPU_SHIFT) : 483ad1e7d28SJulian Elischer 100 * (((fixpt_t) ts->ts_cpticks) 484bcb06d59SJeff Roberson << (FSHIFT - CCPU_SHIFT)) / realstathz; 485b43179fbSJeff Roberson #else 486ad1e7d28SJulian Elischer ts->ts_pctcpu += ((FSCALE - ccpu) * 487ad1e7d28SJulian Elischer (ts->ts_cpticks * 488bcb06d59SJeff Roberson FSCALE / realstathz)) >> FSHIFT; 489b43179fbSJeff Roberson #endif 490ad1e7d28SJulian Elischer ts->ts_cpticks = 0; 4918460a577SJohn Birrell } 4928460a577SJohn Birrell /* 4938460a577SJohn Birrell * If there are ANY running threads in this process, 494b43179fbSJeff Roberson * then don't count it as sleeping. 495ad1e7d28SJulian Elischer XXX this is broken 496ad1e7d28SJulian Elischer 497b43179fbSJeff Roberson */ 498b43179fbSJeff Roberson if (awake) { 49954b0e65fSJeff Roberson if (ts->ts_slptime > 1) { 500b43179fbSJeff Roberson /* 501b43179fbSJeff Roberson * In an ideal world, this should not 502b43179fbSJeff Roberson * happen, because whoever woke us 503b43179fbSJeff Roberson * up from the long sleep should have 504b43179fbSJeff Roberson * unwound the slptime and reset our 505b43179fbSJeff Roberson * priority before we run at the stale 506b43179fbSJeff Roberson * priority. Should KASSERT at some 507b43179fbSJeff Roberson * point when all the cases are fixed. 508b43179fbSJeff Roberson */ 5098460a577SJohn Birrell updatepri(td); 5108460a577SJohn Birrell } 51154b0e65fSJeff Roberson ts->ts_slptime = 0; 5128460a577SJohn Birrell } else 51354b0e65fSJeff Roberson ts->ts_slptime++; 51454b0e65fSJeff Roberson if (ts->ts_slptime > 1) { 5157b20fb19SJeff Roberson thread_unlock(td); 5168460a577SJohn Birrell continue; 5177b20fb19SJeff Roberson } 5188460a577SJohn Birrell td->td_estcpu = decay_cpu(loadfac, td->td_estcpu); 5198460a577SJohn Birrell resetpriority(td); 5208460a577SJohn Birrell resetpriority_thread(td); 5217b20fb19SJeff Roberson thread_unlock(td); 5228460a577SJohn Birrell } /* end of thread loop */ 523374ae2a3SJeff Roberson PROC_UNLOCK(p); 524b43179fbSJeff Roberson } /* end of process loop */ 525b43179fbSJeff Roberson sx_sunlock(&allproc_lock); 526c55bbb6cSJohn Baldwin } 527c55bbb6cSJohn Baldwin 528c55bbb6cSJohn Baldwin /* 529c55bbb6cSJohn Baldwin * Main loop for a kthread that executes schedcpu once a second. 530c55bbb6cSJohn Baldwin */ 531c55bbb6cSJohn Baldwin static void 532e17c57b1SJeff Roberson schedcpu_thread(void) 533c55bbb6cSJohn Baldwin { 534c55bbb6cSJohn Baldwin 535c55bbb6cSJohn Baldwin for (;;) { 536c55bbb6cSJohn Baldwin schedcpu(); 5374d70511aSJohn Baldwin pause("-", hz); 538c55bbb6cSJohn Baldwin } 539b43179fbSJeff Roberson } 540b43179fbSJeff Roberson 541b43179fbSJeff Roberson /* 542b43179fbSJeff Roberson * Recalculate the priority of a process after it has slept for a while. 5438460a577SJohn Birrell * For all load averages >= 1 and max td_estcpu of 255, sleeping for at 5448460a577SJohn Birrell * least six times the loadfactor will decay td_estcpu to zero. 545b43179fbSJeff Roberson */ 546b43179fbSJeff Roberson static void 5478460a577SJohn Birrell updatepri(struct thread *td) 548b43179fbSJeff Roberson { 54954b0e65fSJeff Roberson struct td_sched *ts; 55054b0e65fSJeff Roberson fixpt_t loadfac; 55154b0e65fSJeff Roberson unsigned int newcpu; 552b43179fbSJeff Roberson 55354b0e65fSJeff Roberson ts = td->td_sched; 55470fca427SJohn Baldwin loadfac = loadfactor(averunnable.ldavg[0]); 55554b0e65fSJeff Roberson if (ts->ts_slptime > 5 * loadfac) 5568460a577SJohn Birrell td->td_estcpu = 0; 557b43179fbSJeff Roberson else { 5588460a577SJohn Birrell newcpu = td->td_estcpu; 55954b0e65fSJeff Roberson ts->ts_slptime--; /* was incremented in schedcpu() */ 56054b0e65fSJeff Roberson while (newcpu && --ts->ts_slptime) 561b43179fbSJeff Roberson newcpu = decay_cpu(loadfac, newcpu); 5628460a577SJohn Birrell td->td_estcpu = newcpu; 563b43179fbSJeff Roberson } 564b43179fbSJeff Roberson } 565b43179fbSJeff Roberson 566b43179fbSJeff Roberson /* 567b43179fbSJeff Roberson * Compute the priority of a process when running in user mode. 568b43179fbSJeff Roberson * Arrange to reschedule if the resulting priority is better 569b43179fbSJeff Roberson * than that of the current process. 570b43179fbSJeff Roberson */ 571b43179fbSJeff Roberson static void 5728460a577SJohn Birrell resetpriority(struct thread *td) 573b43179fbSJeff Roberson { 574b43179fbSJeff Roberson register unsigned int newpriority; 575b43179fbSJeff Roberson 5768460a577SJohn Birrell if (td->td_pri_class == PRI_TIMESHARE) { 5778460a577SJohn Birrell newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT + 5788460a577SJohn Birrell NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN); 579b43179fbSJeff Roberson newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 580b43179fbSJeff Roberson PRI_MAX_TIMESHARE); 5818460a577SJohn Birrell sched_user_prio(td, newpriority); 582b43179fbSJeff Roberson } 583b43179fbSJeff Roberson } 584f5c157d9SJohn Baldwin 585f5c157d9SJohn Baldwin /* 586ad1e7d28SJulian Elischer * Update the thread's priority when the associated process's user 587f5c157d9SJohn Baldwin * priority changes. 588f5c157d9SJohn Baldwin */ 589f5c157d9SJohn Baldwin static void 5908460a577SJohn Birrell resetpriority_thread(struct thread *td) 591f5c157d9SJohn Baldwin { 592f5c157d9SJohn Baldwin 593f5c157d9SJohn Baldwin /* Only change threads with a time sharing user priority. */ 594f5c157d9SJohn Baldwin if (td->td_priority < PRI_MIN_TIMESHARE || 595f5c157d9SJohn Baldwin td->td_priority > PRI_MAX_TIMESHARE) 596f5c157d9SJohn Baldwin return; 597f5c157d9SJohn Baldwin 598f5c157d9SJohn Baldwin /* XXX the whole needresched thing is broken, but not silly. */ 599f5c157d9SJohn Baldwin maybe_resched(td); 600f5c157d9SJohn Baldwin 6018460a577SJohn Birrell sched_prio(td, td->td_user_pri); 602b43179fbSJeff Roberson } 603b43179fbSJeff Roberson 604b43179fbSJeff Roberson /* ARGSUSED */ 605b43179fbSJeff Roberson static void 606b43179fbSJeff Roberson sched_setup(void *dummy) 607b43179fbSJeff Roberson { 608e17c57b1SJeff Roberson setup_runqs(); 60970fca427SJohn Baldwin 610b43179fbSJeff Roberson if (sched_quantum == 0) 611b43179fbSJeff Roberson sched_quantum = SCHED_QUANTUM; 612b43179fbSJeff Roberson hogticks = 2 * sched_quantum; 613b43179fbSJeff Roberson 614ca59f152SJeff Roberson /* Account for thread0. */ 615907bdbc2SJeff Roberson sched_load_add(); 616b43179fbSJeff Roberson } 617b43179fbSJeff Roberson 618b43179fbSJeff Roberson /* External interfaces start here */ 619ed062c8dSJulian Elischer /* 620ed062c8dSJulian Elischer * Very early in the boot some setup of scheduler-specific 621f3050486SMaxim Konovalov * parts of proc0 and of some scheduler resources needs to be done. 622ed062c8dSJulian Elischer * Called from: 623ed062c8dSJulian Elischer * proc0_init() 624ed062c8dSJulian Elischer */ 625ed062c8dSJulian Elischer void 626ed062c8dSJulian Elischer schedinit(void) 627ed062c8dSJulian Elischer { 628ed062c8dSJulian Elischer /* 629ed062c8dSJulian Elischer * Set up the scheduler specific parts of proc0. 630ed062c8dSJulian Elischer */ 631ed062c8dSJulian Elischer proc0.p_sched = NULL; /* XXX */ 632ad1e7d28SJulian Elischer thread0.td_sched = &td_sched0; 6337b20fb19SJeff Roberson thread0.td_lock = &sched_lock; 6346ea38de8SJeff Roberson mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE); 635ed062c8dSJulian Elischer } 636ed062c8dSJulian Elischer 637b43179fbSJeff Roberson int 638b43179fbSJeff Roberson sched_runnable(void) 639b43179fbSJeff Roberson { 640e17c57b1SJeff Roberson #ifdef SMP 641e17c57b1SJeff Roberson return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]); 642e17c57b1SJeff Roberson #else 643b43179fbSJeff Roberson return runq_check(&runq); 644e17c57b1SJeff Roberson #endif 645b43179fbSJeff Roberson } 646b43179fbSJeff Roberson 647b43179fbSJeff Roberson int 648b43179fbSJeff Roberson sched_rr_interval(void) 649b43179fbSJeff Roberson { 650b43179fbSJeff Roberson if (sched_quantum == 0) 651b43179fbSJeff Roberson sched_quantum = SCHED_QUANTUM; 652b43179fbSJeff Roberson return (sched_quantum); 653b43179fbSJeff Roberson } 654b43179fbSJeff Roberson 655b43179fbSJeff Roberson /* 656b43179fbSJeff Roberson * We adjust the priority of the current process. The priority of 657b43179fbSJeff Roberson * a process gets worse as it accumulates CPU time. The cpu usage 6588460a577SJohn Birrell * estimator (td_estcpu) is increased here. resetpriority() will 6598460a577SJohn Birrell * compute a different priority each time td_estcpu increases by 660b43179fbSJeff Roberson * INVERSE_ESTCPU_WEIGHT 661b43179fbSJeff Roberson * (until MAXPRI is reached). The cpu usage estimator ramps up 662b43179fbSJeff Roberson * quite quickly when the process is running (linearly), and decays 663b43179fbSJeff Roberson * away exponentially, at a rate which is proportionally slower when 664b43179fbSJeff Roberson * the system is busy. The basic principle is that the system will 665b43179fbSJeff Roberson * 90% forget that the process used a lot of CPU time in 5 * loadav 666b43179fbSJeff Roberson * seconds. This causes the system to favor processes which haven't 667b43179fbSJeff Roberson * run much recently, and to round-robin among other processes. 668b43179fbSJeff Roberson */ 669b43179fbSJeff Roberson void 6707cf90fb3SJeff Roberson sched_clock(struct thread *td) 671b43179fbSJeff Roberson { 672ad1e7d28SJulian Elischer struct td_sched *ts; 673b43179fbSJeff Roberson 6747b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 675ad1e7d28SJulian Elischer ts = td->td_sched; 676f7f9e7f3SJeff Roberson 677ad1e7d28SJulian Elischer ts->ts_cpticks++; 6788460a577SJohn Birrell td->td_estcpu = ESTCPULIM(td->td_estcpu + 1); 6798460a577SJohn Birrell if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 6808460a577SJohn Birrell resetpriority(td); 6818460a577SJohn Birrell resetpriority_thread(td); 682b43179fbSJeff Roberson } 6839dddab6fSJohn Baldwin 6849dddab6fSJohn Baldwin /* 6859dddab6fSJohn Baldwin * Force a context switch if the current thread has used up a full 6869dddab6fSJohn Baldwin * quantum (default quantum is 100ms). 6879dddab6fSJohn Baldwin */ 6889dddab6fSJohn Baldwin if (!TD_IS_IDLETHREAD(td) && 6899dddab6fSJohn Baldwin ticks - PCPU_GET(switchticks) >= sched_quantum) 6909dddab6fSJohn Baldwin td->td_flags |= TDF_NEEDRESCHED; 691b43179fbSJeff Roberson } 69270fca427SJohn Baldwin 6938460a577SJohn Birrell /* 6948460a577SJohn Birrell * charge childs scheduling cpu usage to parent. 6958460a577SJohn Birrell */ 696b43179fbSJeff Roberson void 69755d44f79SJulian Elischer sched_exit(struct proc *p, struct thread *td) 698f7f9e7f3SJeff Roberson { 6998460a577SJohn Birrell 7008460a577SJohn Birrell CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d", 701431f8906SJulian Elischer td, td->td_name, td->td_priority); 702374ae2a3SJeff Roberson PROC_LOCK_ASSERT(p, MA_OWNED); 703ad1e7d28SJulian Elischer sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); 704b43179fbSJeff Roberson } 705b43179fbSJeff Roberson 706b43179fbSJeff Roberson void 707f7f9e7f3SJeff Roberson sched_exit_thread(struct thread *td, struct thread *child) 708b43179fbSJeff Roberson { 709ad1e7d28SJulian Elischer 710907bdbc2SJeff Roberson CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", 711431f8906SJulian Elischer child, child->td_name, child->td_priority); 7127b20fb19SJeff Roberson thread_lock(td); 713ad1e7d28SJulian Elischer td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu); 7147b20fb19SJeff Roberson thread_unlock(td); 7157b20fb19SJeff Roberson mtx_lock_spin(&sched_lock); 7167d5ea13fSDoug Rabson if ((child->td_proc->p_flag & P_NOLOAD) == 0) 717907bdbc2SJeff Roberson sched_load_rem(); 7187b20fb19SJeff Roberson mtx_unlock_spin(&sched_lock); 719f7f9e7f3SJeff Roberson } 720bcb06d59SJeff Roberson 721f7f9e7f3SJeff Roberson void 722ed062c8dSJulian Elischer sched_fork(struct thread *td, struct thread *childtd) 723f7f9e7f3SJeff Roberson { 724ed062c8dSJulian Elischer sched_fork_thread(td, childtd); 725f7f9e7f3SJeff Roberson } 726bcb06d59SJeff Roberson 727f7f9e7f3SJeff Roberson void 728ed062c8dSJulian Elischer sched_fork_thread(struct thread *td, struct thread *childtd) 729f7f9e7f3SJeff Roberson { 7308b16c208SJeff Roberson struct td_sched *ts; 7318b16c208SJeff Roberson 732ad1e7d28SJulian Elischer childtd->td_estcpu = td->td_estcpu; 7337b20fb19SJeff Roberson childtd->td_lock = &sched_lock; 734f5a3ef99SMarcel Moolenaar childtd->td_cpuset = cpuset_ref(td->td_cpuset); 7358b16c208SJeff Roberson ts = childtd->td_sched; 7368b16c208SJeff Roberson bzero(ts, sizeof(*ts)); 737b43179fbSJeff Roberson } 738b43179fbSJeff Roberson 739b43179fbSJeff Roberson void 740fa885116SJulian Elischer sched_nice(struct proc *p, int nice) 741b43179fbSJeff Roberson { 742f5c157d9SJohn Baldwin struct thread *td; 7430b5318c8SJohn Baldwin 744fa885116SJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 745fa885116SJulian Elischer p->p_nice = nice; 7468460a577SJohn Birrell FOREACH_THREAD_IN_PROC(p, td) { 7477b20fb19SJeff Roberson thread_lock(td); 7488460a577SJohn Birrell resetpriority(td); 7498460a577SJohn Birrell resetpriority_thread(td); 7507b20fb19SJeff Roberson thread_unlock(td); 7518460a577SJohn Birrell } 752fa885116SJulian Elischer } 753b43179fbSJeff Roberson 754f7f9e7f3SJeff Roberson void 7558460a577SJohn Birrell sched_class(struct thread *td, int class) 756f7f9e7f3SJeff Roberson { 7577b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 7588460a577SJohn Birrell td->td_pri_class = class; 759f7f9e7f3SJeff Roberson } 760f7f9e7f3SJeff Roberson 7618460a577SJohn Birrell /* 7628460a577SJohn Birrell * Adjust the priority of a thread. 7638460a577SJohn Birrell */ 764f5c157d9SJohn Baldwin static void 765f5c157d9SJohn Baldwin sched_priority(struct thread *td, u_char prio) 766b43179fbSJeff Roberson { 767907bdbc2SJeff Roberson CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", 768431f8906SJulian Elischer td, td->td_name, td->td_priority, prio, curthread, 769431f8906SJulian Elischer curthread->td_name); 770b43179fbSJeff Roberson 7717b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 772f5c157d9SJohn Baldwin if (td->td_priority == prio) 773f5c157d9SJohn Baldwin return; 7741f955e2dSJulian Elischer td->td_priority = prio; 7759727e637SJeff Roberson if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) { 776f0393f06SJeff Roberson sched_rem(td); 777f0393f06SJeff Roberson sched_add(td, SRQ_BORING); 778b43179fbSJeff Roberson } 779b43179fbSJeff Roberson } 780b43179fbSJeff Roberson 781f5c157d9SJohn Baldwin /* 782f5c157d9SJohn Baldwin * Update a thread's priority when it is lent another thread's 783f5c157d9SJohn Baldwin * priority. 784f5c157d9SJohn Baldwin */ 785f5c157d9SJohn Baldwin void 786f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio) 787f5c157d9SJohn Baldwin { 788f5c157d9SJohn Baldwin 789f5c157d9SJohn Baldwin td->td_flags |= TDF_BORROWING; 790f5c157d9SJohn Baldwin sched_priority(td, prio); 791f5c157d9SJohn Baldwin } 792f5c157d9SJohn Baldwin 793f5c157d9SJohn Baldwin /* 794f5c157d9SJohn Baldwin * Restore a thread's priority when priority propagation is 795f5c157d9SJohn Baldwin * over. The prio argument is the minimum priority the thread 796f5c157d9SJohn Baldwin * needs to have to satisfy other possible priority lending 797f5c157d9SJohn Baldwin * requests. If the thread's regulary priority is less 798f5c157d9SJohn Baldwin * important than prio the thread will keep a priority boost 799f5c157d9SJohn Baldwin * of prio. 800f5c157d9SJohn Baldwin */ 801f5c157d9SJohn Baldwin void 802f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio) 803f5c157d9SJohn Baldwin { 804f5c157d9SJohn Baldwin u_char base_pri; 805f5c157d9SJohn Baldwin 806f5c157d9SJohn Baldwin if (td->td_base_pri >= PRI_MIN_TIMESHARE && 807f5c157d9SJohn Baldwin td->td_base_pri <= PRI_MAX_TIMESHARE) 8088460a577SJohn Birrell base_pri = td->td_user_pri; 809f5c157d9SJohn Baldwin else 810f5c157d9SJohn Baldwin base_pri = td->td_base_pri; 811f5c157d9SJohn Baldwin if (prio >= base_pri) { 812f5c157d9SJohn Baldwin td->td_flags &= ~TDF_BORROWING; 813f5c157d9SJohn Baldwin sched_prio(td, base_pri); 814f5c157d9SJohn Baldwin } else 815f5c157d9SJohn Baldwin sched_lend_prio(td, prio); 816f5c157d9SJohn Baldwin } 817f5c157d9SJohn Baldwin 818f5c157d9SJohn Baldwin void 819f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio) 820f5c157d9SJohn Baldwin { 821f5c157d9SJohn Baldwin u_char oldprio; 822f5c157d9SJohn Baldwin 823f5c157d9SJohn Baldwin /* First, update the base priority. */ 824f5c157d9SJohn Baldwin td->td_base_pri = prio; 825f5c157d9SJohn Baldwin 826f5c157d9SJohn Baldwin /* 827f5c157d9SJohn Baldwin * If the thread is borrowing another thread's priority, don't ever 828f5c157d9SJohn Baldwin * lower the priority. 829f5c157d9SJohn Baldwin */ 830f5c157d9SJohn Baldwin if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 831f5c157d9SJohn Baldwin return; 832f5c157d9SJohn Baldwin 833f5c157d9SJohn Baldwin /* Change the real priority. */ 834f5c157d9SJohn Baldwin oldprio = td->td_priority; 835f5c157d9SJohn Baldwin sched_priority(td, prio); 836f5c157d9SJohn Baldwin 837f5c157d9SJohn Baldwin /* 838f5c157d9SJohn Baldwin * If the thread is on a turnstile, then let the turnstile update 839f5c157d9SJohn Baldwin * its state. 840f5c157d9SJohn Baldwin */ 841f5c157d9SJohn Baldwin if (TD_ON_LOCK(td) && oldprio != prio) 842f5c157d9SJohn Baldwin turnstile_adjust(td, oldprio); 843f5c157d9SJohn Baldwin } 844f5c157d9SJohn Baldwin 845b43179fbSJeff Roberson void 8468460a577SJohn Birrell sched_user_prio(struct thread *td, u_char prio) 8473db720fdSDavid Xu { 8483db720fdSDavid Xu u_char oldprio; 8493db720fdSDavid Xu 850435806d3SDavid Xu THREAD_LOCK_ASSERT(td, MA_OWNED); 8518460a577SJohn Birrell td->td_base_user_pri = prio; 8525a215147SDavid Xu if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio) 8535a215147SDavid Xu return; 8548460a577SJohn Birrell oldprio = td->td_user_pri; 8558460a577SJohn Birrell td->td_user_pri = prio; 8563db720fdSDavid Xu } 8573db720fdSDavid Xu 8583db720fdSDavid Xu void 8593db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio) 8603db720fdSDavid Xu { 8613db720fdSDavid Xu u_char oldprio; 8623db720fdSDavid Xu 863435806d3SDavid Xu THREAD_LOCK_ASSERT(td, MA_OWNED); 8643db720fdSDavid Xu td->td_flags |= TDF_UBORROWING; 8658460a577SJohn Birrell oldprio = td->td_user_pri; 8668460a577SJohn Birrell td->td_user_pri = prio; 8673db720fdSDavid Xu } 8683db720fdSDavid Xu 8693db720fdSDavid Xu void 8703db720fdSDavid Xu sched_unlend_user_prio(struct thread *td, u_char prio) 8713db720fdSDavid Xu { 8723db720fdSDavid Xu u_char base_pri; 8733db720fdSDavid Xu 874435806d3SDavid Xu THREAD_LOCK_ASSERT(td, MA_OWNED); 8758460a577SJohn Birrell base_pri = td->td_base_user_pri; 8763db720fdSDavid Xu if (prio >= base_pri) { 8773db720fdSDavid Xu td->td_flags &= ~TDF_UBORROWING; 8788460a577SJohn Birrell sched_user_prio(td, base_pri); 879435806d3SDavid Xu } else { 8803db720fdSDavid Xu sched_lend_user_prio(td, prio); 8813db720fdSDavid Xu } 882435806d3SDavid Xu } 8833db720fdSDavid Xu 8843db720fdSDavid Xu void 885c5aa6b58SJeff Roberson sched_sleep(struct thread *td, int pri) 886b43179fbSJeff Roberson { 8872056d0a1SJohn Baldwin 8887b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 88954b0e65fSJeff Roberson td->td_slptick = ticks; 89054b0e65fSJeff Roberson td->td_sched->ts_slptime = 0; 891c5aa6b58SJeff Roberson if (pri) 892c5aa6b58SJeff Roberson sched_prio(td, pri); 893c5aa6b58SJeff Roberson if (TD_IS_SUSPENDED(td) || pri <= PSOCK) 894c5aa6b58SJeff Roberson td->td_flags |= TDF_CANSWAP; 895b43179fbSJeff Roberson } 896b43179fbSJeff Roberson 897b43179fbSJeff Roberson void 8983389af30SJulian Elischer sched_switch(struct thread *td, struct thread *newtd, int flags) 899b43179fbSJeff Roberson { 900ad1e7d28SJulian Elischer struct td_sched *ts; 901b43179fbSJeff Roberson struct proc *p; 902b43179fbSJeff Roberson 903ad1e7d28SJulian Elischer ts = td->td_sched; 904b43179fbSJeff Roberson p = td->td_proc; 905b43179fbSJeff Roberson 9067b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 9077b20fb19SJeff Roberson /* 9087b20fb19SJeff Roberson * Switch to the sched lock to fix things up and pick 9097b20fb19SJeff Roberson * a new thread. 9107b20fb19SJeff Roberson */ 9117b20fb19SJeff Roberson if (td->td_lock != &sched_lock) { 9127b20fb19SJeff Roberson mtx_lock_spin(&sched_lock); 9137b20fb19SJeff Roberson thread_unlock(td); 9147b20fb19SJeff Roberson } 915b43179fbSJeff Roberson 916f2f51f8aSJeff Roberson if ((p->p_flag & P_NOLOAD) == 0) 917907bdbc2SJeff Roberson sched_load_rem(); 9183389af30SJulian Elischer 91956564741SStephan Uphoff if (newtd) 92056564741SStephan Uphoff newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED); 92156564741SStephan Uphoff 922060563ecSJulian Elischer td->td_lastcpu = td->td_oncpu; 92352eb8464SJohn Baldwin td->td_flags &= ~TDF_NEEDRESCHED; 92477918643SStephan Uphoff td->td_owepreempt = 0; 925ca59f152SJeff Roberson td->td_oncpu = NOCPU; 926b43179fbSJeff Roberson /* 927b43179fbSJeff Roberson * At the last moment, if this thread is still marked RUNNING, 928b43179fbSJeff Roberson * then put it back on the run queue as it has not been suspended 929bf0acc27SJohn Baldwin * or stopped or any thing else similar. We never put the idle 930bf0acc27SJohn Baldwin * threads on the run queue, however. 931b43179fbSJeff Roberson */ 932c6226eeaSJulian Elischer if (td->td_flags & TDF_IDLETD) { 933bf0acc27SJohn Baldwin TD_SET_CAN_RUN(td); 934c6226eeaSJulian Elischer #ifdef SMP 935c6226eeaSJulian Elischer idle_cpus_mask &= ~PCPU_GET(cpumask); 936c6226eeaSJulian Elischer #endif 937c6226eeaSJulian Elischer } else { 938ed062c8dSJulian Elischer if (TD_IS_RUNNING(td)) { 939ad1e7d28SJulian Elischer /* Put us back on the run queue. */ 940f0393f06SJeff Roberson sched_add(td, (flags & SW_PREEMPT) ? 941c20c691bSJulian Elischer SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 942c20c691bSJulian Elischer SRQ_OURSELF|SRQ_YIELDING); 943ed062c8dSJulian Elischer } 944b43179fbSJeff Roberson } 945c20c691bSJulian Elischer if (newtd) { 946c20c691bSJulian Elischer /* 947c20c691bSJulian Elischer * The thread we are about to run needs to be counted 948c20c691bSJulian Elischer * as if it had been added to the run queue and selected. 949c20c691bSJulian Elischer * It came from: 950c20c691bSJulian Elischer * * A preemption 951c20c691bSJulian Elischer * * An upcall 952c20c691bSJulian Elischer * * A followon 953c20c691bSJulian Elischer */ 954c20c691bSJulian Elischer KASSERT((newtd->td_inhibitors == 0), 9552da78e38SRobert Watson ("trying to run inhibited thread")); 9569727e637SJeff Roberson newtd->td_flags |= TDF_DIDRUN; 957c20c691bSJulian Elischer TD_SET_RUNNING(newtd); 958c20c691bSJulian Elischer if ((newtd->td_proc->p_flag & P_NOLOAD) == 0) 959907bdbc2SJeff Roberson sched_load_add(); 960c20c691bSJulian Elischer } else { 961ae53b483SJeff Roberson newtd = choosethread(); 962c20c691bSJulian Elischer } 9637b20fb19SJeff Roberson MPASS(newtd->td_lock == &sched_lock); 964c20c691bSJulian Elischer 965ebccf1e3SJoseph Koshy if (td != newtd) { 966ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 967ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 968ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 969ebccf1e3SJoseph Koshy #endif 970c6226eeaSJulian Elischer /* I feel sleepy */ 971eea4f254SJeff Roberson lock_profile_release_lock(&sched_lock.lock_object); 9726f5f25e5SJohn Birrell #ifdef KDTRACE_HOOKS 9736f5f25e5SJohn Birrell /* 9746f5f25e5SJohn Birrell * If DTrace has set the active vtime enum to anything 9756f5f25e5SJohn Birrell * other than INACTIVE (0), then it should have set the 9766f5f25e5SJohn Birrell * function to call. 9776f5f25e5SJohn Birrell */ 9786f5f25e5SJohn Birrell if (dtrace_vtime_active) 9796f5f25e5SJohn Birrell (*dtrace_vtime_switch_func)(newtd); 9806f5f25e5SJohn Birrell #endif 9816f5f25e5SJohn Birrell 982710eacdcSJeff Roberson cpu_switch(td, newtd, td->td_lock); 983eea4f254SJeff Roberson lock_profile_obtain_lock_success(&sched_lock.lock_object, 984eea4f254SJeff Roberson 0, 0, __FILE__, __LINE__); 985c6226eeaSJulian Elischer /* 986c6226eeaSJulian Elischer * Where am I? What year is it? 987c6226eeaSJulian Elischer * We are in the same thread that went to sleep above, 988c6226eeaSJulian Elischer * but any amount of time may have passed. All out context 989c6226eeaSJulian Elischer * will still be available as will local variables. 990c6226eeaSJulian Elischer * PCPU values however may have changed as we may have 991c6226eeaSJulian Elischer * changed CPU so don't trust cached values of them. 992c6226eeaSJulian Elischer * New threads will go to fork_exit() instead of here 993c6226eeaSJulian Elischer * so if you change things here you may need to change 994c6226eeaSJulian Elischer * things there too. 995c6226eeaSJulian Elischer * If the thread above was exiting it will never wake 996c6226eeaSJulian Elischer * up again here, so either it has saved everything it 997c6226eeaSJulian Elischer * needed to, or the thread_wait() or wait() will 998c6226eeaSJulian Elischer * need to reap it. 999c6226eeaSJulian Elischer */ 1000ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 1001ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1002ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1003ebccf1e3SJoseph Koshy #endif 1004ebccf1e3SJoseph Koshy } 1005ebccf1e3SJoseph Koshy 1006c6226eeaSJulian Elischer #ifdef SMP 1007c6226eeaSJulian Elischer if (td->td_flags & TDF_IDLETD) 1008c6226eeaSJulian Elischer idle_cpus_mask |= PCPU_GET(cpumask); 1009c6226eeaSJulian Elischer #endif 1010ae53b483SJeff Roberson sched_lock.mtx_lock = (uintptr_t)td; 1011ae53b483SJeff Roberson td->td_oncpu = PCPU_GET(cpuid); 10127b20fb19SJeff Roberson MPASS(td->td_lock == &sched_lock); 1013b43179fbSJeff Roberson } 1014b43179fbSJeff Roberson 1015b43179fbSJeff Roberson void 1016b43179fbSJeff Roberson sched_wakeup(struct thread *td) 1017b43179fbSJeff Roberson { 101854b0e65fSJeff Roberson struct td_sched *ts; 101954b0e65fSJeff Roberson 10207b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 102154b0e65fSJeff Roberson ts = td->td_sched; 1022c5aa6b58SJeff Roberson td->td_flags &= ~TDF_CANSWAP; 102354b0e65fSJeff Roberson if (ts->ts_slptime > 1) { 10248460a577SJohn Birrell updatepri(td); 10258460a577SJohn Birrell resetpriority(td); 10268460a577SJohn Birrell } 102754b0e65fSJeff Roberson td->td_slptick = ticks; 102854b0e65fSJeff Roberson ts->ts_slptime = 0; 1029f0393f06SJeff Roberson sched_add(td, SRQ_BORING); 1030b43179fbSJeff Roberson } 1031b43179fbSJeff Roberson 103237c28a02SJulian Elischer #ifdef SMP 103382a1dfc1SJulian Elischer /* enable HTT_2 if you have a 2-way HTT cpu.*/ 103482a1dfc1SJulian Elischer static int 103582a1dfc1SJulian Elischer forward_wakeup(int cpunum) 103682a1dfc1SJulian Elischer { 103782a1dfc1SJulian Elischer cpumask_t map, me, dontuse; 103882a1dfc1SJulian Elischer cpumask_t map2; 103982a1dfc1SJulian Elischer struct pcpu *pc; 104082a1dfc1SJulian Elischer cpumask_t id, map3; 104182a1dfc1SJulian Elischer 104282a1dfc1SJulian Elischer mtx_assert(&sched_lock, MA_OWNED); 104382a1dfc1SJulian Elischer 1044ed062c8dSJulian Elischer CTR0(KTR_RUNQ, "forward_wakeup()"); 104582a1dfc1SJulian Elischer 104682a1dfc1SJulian Elischer if ((!forward_wakeup_enabled) || 104782a1dfc1SJulian Elischer (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0)) 104882a1dfc1SJulian Elischer return (0); 104982a1dfc1SJulian Elischer if (!smp_started || cold || panicstr) 105082a1dfc1SJulian Elischer return (0); 105182a1dfc1SJulian Elischer 105282a1dfc1SJulian Elischer forward_wakeups_requested++; 105382a1dfc1SJulian Elischer 105482a1dfc1SJulian Elischer /* 105582a1dfc1SJulian Elischer * check the idle mask we received against what we calculated before 105682a1dfc1SJulian Elischer * in the old version. 105782a1dfc1SJulian Elischer */ 105882a1dfc1SJulian Elischer me = PCPU_GET(cpumask); 105982a1dfc1SJulian Elischer /* 106082a1dfc1SJulian Elischer * don't bother if we should be doing it ourself.. 106182a1dfc1SJulian Elischer */ 106282a1dfc1SJulian Elischer if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum))) 106382a1dfc1SJulian Elischer return (0); 106482a1dfc1SJulian Elischer 106582a1dfc1SJulian Elischer dontuse = me | stopped_cpus | hlt_cpus_mask; 106682a1dfc1SJulian Elischer map3 = 0; 106782a1dfc1SJulian Elischer if (forward_wakeup_use_loop) { 106882a1dfc1SJulian Elischer SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 106982a1dfc1SJulian Elischer id = pc->pc_cpumask; 107082a1dfc1SJulian Elischer if ( (id & dontuse) == 0 && 107182a1dfc1SJulian Elischer pc->pc_curthread == pc->pc_idlethread) { 107282a1dfc1SJulian Elischer map3 |= id; 107382a1dfc1SJulian Elischer } 107482a1dfc1SJulian Elischer } 107582a1dfc1SJulian Elischer } 107682a1dfc1SJulian Elischer 107782a1dfc1SJulian Elischer if (forward_wakeup_use_mask) { 107882a1dfc1SJulian Elischer map = 0; 107982a1dfc1SJulian Elischer map = idle_cpus_mask & ~dontuse; 108082a1dfc1SJulian Elischer 108182a1dfc1SJulian Elischer /* If they are both on, compare and use loop if different */ 108282a1dfc1SJulian Elischer if (forward_wakeup_use_loop) { 108382a1dfc1SJulian Elischer if (map != map3) { 108482a1dfc1SJulian Elischer printf("map (%02X) != map3 (%02X)\n", 108582a1dfc1SJulian Elischer map, map3); 108682a1dfc1SJulian Elischer map = map3; 108782a1dfc1SJulian Elischer } 108882a1dfc1SJulian Elischer } 108982a1dfc1SJulian Elischer } else { 109082a1dfc1SJulian Elischer map = map3; 109182a1dfc1SJulian Elischer } 109282a1dfc1SJulian Elischer /* If we only allow a specific CPU, then mask off all the others */ 109382a1dfc1SJulian Elischer if (cpunum != NOCPU) { 109482a1dfc1SJulian Elischer KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum.")); 109582a1dfc1SJulian Elischer map &= (1 << cpunum); 109682a1dfc1SJulian Elischer } else { 109782a1dfc1SJulian Elischer /* Try choose an idle die. */ 109882a1dfc1SJulian Elischer if (forward_wakeup_use_htt) { 109982a1dfc1SJulian Elischer map2 = (map & (map >> 1)) & 0x5555; 110082a1dfc1SJulian Elischer if (map2) { 110182a1dfc1SJulian Elischer map = map2; 110282a1dfc1SJulian Elischer } 110382a1dfc1SJulian Elischer } 110482a1dfc1SJulian Elischer 110582a1dfc1SJulian Elischer /* set only one bit */ 110682a1dfc1SJulian Elischer if (forward_wakeup_use_single) { 110782a1dfc1SJulian Elischer map = map & ((~map) + 1); 110882a1dfc1SJulian Elischer } 110982a1dfc1SJulian Elischer } 111082a1dfc1SJulian Elischer if (map) { 111182a1dfc1SJulian Elischer forward_wakeups_delivered++; 111282a1dfc1SJulian Elischer ipi_selected(map, IPI_AST); 111382a1dfc1SJulian Elischer return (1); 111482a1dfc1SJulian Elischer } 111582a1dfc1SJulian Elischer if (cpunum == NOCPU) 111682a1dfc1SJulian Elischer printf("forward_wakeup: Idle processor not found\n"); 111782a1dfc1SJulian Elischer return (0); 111882a1dfc1SJulian Elischer } 111937c28a02SJulian Elischer #endif 112082a1dfc1SJulian Elischer 1121f3a0f873SStephan Uphoff #ifdef SMP 1122a3f2d842SStephan Uphoff static void kick_other_cpu(int pri,int cpuid); 1123f3a0f873SStephan Uphoff 1124f3a0f873SStephan Uphoff static void 1125f3a0f873SStephan Uphoff kick_other_cpu(int pri,int cpuid) 1126f3a0f873SStephan Uphoff { 1127f3a0f873SStephan Uphoff struct pcpu * pcpu = pcpu_find(cpuid); 1128f3a0f873SStephan Uphoff int cpri = pcpu->pc_curthread->td_priority; 1129f3a0f873SStephan Uphoff 1130f3a0f873SStephan Uphoff if (idle_cpus_mask & pcpu->pc_cpumask) { 1131f3a0f873SStephan Uphoff forward_wakeups_delivered++; 1132f3a0f873SStephan Uphoff ipi_selected(pcpu->pc_cpumask, IPI_AST); 1133f3a0f873SStephan Uphoff return; 1134f3a0f873SStephan Uphoff } 1135f3a0f873SStephan Uphoff 1136f3a0f873SStephan Uphoff if (pri >= cpri) 1137f3a0f873SStephan Uphoff return; 1138f3a0f873SStephan Uphoff 1139f3a0f873SStephan Uphoff #if defined(IPI_PREEMPTION) && defined(PREEMPTION) 1140f3a0f873SStephan Uphoff #if !defined(FULL_PREEMPTION) 1141f3a0f873SStephan Uphoff if (pri <= PRI_MAX_ITHD) 1142f3a0f873SStephan Uphoff #endif /* ! FULL_PREEMPTION */ 1143f3a0f873SStephan Uphoff { 1144f3a0f873SStephan Uphoff ipi_selected(pcpu->pc_cpumask, IPI_PREEMPT); 1145f3a0f873SStephan Uphoff return; 1146f3a0f873SStephan Uphoff } 1147f3a0f873SStephan Uphoff #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */ 1148f3a0f873SStephan Uphoff 1149f3a0f873SStephan Uphoff pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED; 1150f3a0f873SStephan Uphoff ipi_selected( pcpu->pc_cpumask , IPI_AST); 1151f3a0f873SStephan Uphoff return; 1152f3a0f873SStephan Uphoff } 1153f3a0f873SStephan Uphoff #endif /* SMP */ 1154f3a0f873SStephan Uphoff 1155b43179fbSJeff Roberson void 11562630e4c9SJulian Elischer sched_add(struct thread *td, int flags) 11576804a3abSJulian Elischer #ifdef SMP 1158f3a0f873SStephan Uphoff { 1159ad1e7d28SJulian Elischer struct td_sched *ts; 11606804a3abSJulian Elischer int forwarded = 0; 11616804a3abSJulian Elischer int cpu; 1162f3a0f873SStephan Uphoff int single_cpu = 0; 11637cf90fb3SJeff Roberson 1164ad1e7d28SJulian Elischer ts = td->td_sched; 11657b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1166f0393f06SJeff Roberson KASSERT((td->td_inhibitors == 0), 1167f0393f06SJeff Roberson ("sched_add: trying to run inhibited thread")); 1168f0393f06SJeff Roberson KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 1169f0393f06SJeff Roberson ("sched_add: bad thread state")); 1170b61ce5b0SJeff Roberson KASSERT(td->td_flags & TDF_INMEM, 1171b61ce5b0SJeff Roberson ("sched_add: thread swapped out")); 1172907bdbc2SJeff Roberson CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 1173431f8906SJulian Elischer td, td->td_name, td->td_priority, curthread, 1174431f8906SJulian Elischer curthread->td_name); 11757b20fb19SJeff Roberson /* 11767b20fb19SJeff Roberson * Now that the thread is moving to the run-queue, set the lock 11777b20fb19SJeff Roberson * to the scheduler's lock. 11787b20fb19SJeff Roberson */ 11797b20fb19SJeff Roberson if (td->td_lock != &sched_lock) { 11807b20fb19SJeff Roberson mtx_lock_spin(&sched_lock); 11817b20fb19SJeff Roberson thread_lock_set(td, &sched_lock); 11827b20fb19SJeff Roberson } 1183f0393f06SJeff Roberson TD_SET_RUNQ(td); 1184f3a0f873SStephan Uphoff 1185f3a0f873SStephan Uphoff if (td->td_pinned != 0) { 1186f3a0f873SStephan Uphoff cpu = td->td_lastcpu; 1187ad1e7d28SJulian Elischer ts->ts_runq = &runq_pcpu[cpu]; 1188f3a0f873SStephan Uphoff single_cpu = 1; 1189f3a0f873SStephan Uphoff CTR3(KTR_RUNQ, 1190ad1e7d28SJulian Elischer "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu); 11919727e637SJeff Roberson } else if ((td)->td_flags & TDF_BOUND) { 1192f3a0f873SStephan Uphoff /* Find CPU from bound runq */ 1193ad1e7d28SJulian Elischer KASSERT(SKE_RUNQ_PCPU(ts),("sched_add: bound td_sched not on cpu runq")); 1194ad1e7d28SJulian Elischer cpu = ts->ts_runq - &runq_pcpu[0]; 1195f3a0f873SStephan Uphoff single_cpu = 1; 1196f3a0f873SStephan Uphoff CTR3(KTR_RUNQ, 1197ad1e7d28SJulian Elischer "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu); 1198f3a0f873SStephan Uphoff } else { 11996804a3abSJulian Elischer CTR2(KTR_RUNQ, 1200ad1e7d28SJulian Elischer "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts, td); 12016804a3abSJulian Elischer cpu = NOCPU; 1202ad1e7d28SJulian Elischer ts->ts_runq = &runq; 1203e17c57b1SJeff Roberson } 1204f3a0f873SStephan Uphoff 1205a3f2d842SStephan Uphoff if (single_cpu && (cpu != PCPU_GET(cpuid))) { 1206f3a0f873SStephan Uphoff kick_other_cpu(td->td_priority,cpu); 1207f3a0f873SStephan Uphoff } else { 1208f3a0f873SStephan Uphoff 1209f3a0f873SStephan Uphoff if (!single_cpu) { 1210f3a0f873SStephan Uphoff cpumask_t me = PCPU_GET(cpumask); 1211f3a0f873SStephan Uphoff int idle = idle_cpus_mask & me; 1212f3a0f873SStephan Uphoff 1213f3a0f873SStephan Uphoff if (!idle && ((flags & SRQ_INTR) == 0) && 1214f3a0f873SStephan Uphoff (idle_cpus_mask & ~(hlt_cpus_mask | me))) 1215f3a0f873SStephan Uphoff forwarded = forward_wakeup(cpu); 1216f3a0f873SStephan Uphoff } 1217f3a0f873SStephan Uphoff 1218f3a0f873SStephan Uphoff if (!forwarded) { 1219a3f2d842SStephan Uphoff if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td)) 1220f3a0f873SStephan Uphoff return; 1221f3a0f873SStephan Uphoff else 1222f3a0f873SStephan Uphoff maybe_resched(td); 1223f3a0f873SStephan Uphoff } 1224f3a0f873SStephan Uphoff } 1225f3a0f873SStephan Uphoff 1226f3a0f873SStephan Uphoff if ((td->td_proc->p_flag & P_NOLOAD) == 0) 1227f3a0f873SStephan Uphoff sched_load_add(); 12289727e637SJeff Roberson runq_add(ts->ts_runq, td, flags); 1229f3a0f873SStephan Uphoff } 1230f3a0f873SStephan Uphoff #else /* SMP */ 1231f3a0f873SStephan Uphoff { 1232ad1e7d28SJulian Elischer struct td_sched *ts; 1233ad1e7d28SJulian Elischer ts = td->td_sched; 12347b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1235f0393f06SJeff Roberson KASSERT((td->td_inhibitors == 0), 1236f0393f06SJeff Roberson ("sched_add: trying to run inhibited thread")); 1237f0393f06SJeff Roberson KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 1238f0393f06SJeff Roberson ("sched_add: bad thread state")); 1239b61ce5b0SJeff Roberson KASSERT(td->td_flags & TDF_INMEM, 1240b61ce5b0SJeff Roberson ("sched_add: thread swapped out")); 1241f3a0f873SStephan Uphoff CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 1242431f8906SJulian Elischer td, td->td_name, td->td_priority, curthread, 1243431f8906SJulian Elischer curthread->td_name); 12447b20fb19SJeff Roberson /* 12457b20fb19SJeff Roberson * Now that the thread is moving to the run-queue, set the lock 12467b20fb19SJeff Roberson * to the scheduler's lock. 12477b20fb19SJeff Roberson */ 12487b20fb19SJeff Roberson if (td->td_lock != &sched_lock) { 12497b20fb19SJeff Roberson mtx_lock_spin(&sched_lock); 12507b20fb19SJeff Roberson thread_lock_set(td, &sched_lock); 12517b20fb19SJeff Roberson } 1252f0393f06SJeff Roberson TD_SET_RUNQ(td); 1253ad1e7d28SJulian Elischer CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td); 1254ad1e7d28SJulian Elischer ts->ts_runq = &runq; 12556804a3abSJulian Elischer 12566804a3abSJulian Elischer /* 12576804a3abSJulian Elischer * If we are yielding (on the way out anyhow) 12586804a3abSJulian Elischer * or the thread being saved is US, 12596804a3abSJulian Elischer * then don't try be smart about preemption 12606804a3abSJulian Elischer * or kicking off another CPU 12616804a3abSJulian Elischer * as it won't help and may hinder. 12626804a3abSJulian Elischer * In the YIEDLING case, we are about to run whoever is 12636804a3abSJulian Elischer * being put in the queue anyhow, and in the 12646804a3abSJulian Elischer * OURSELF case, we are puting ourself on the run queue 12656804a3abSJulian Elischer * which also only happens when we are about to yield. 12666804a3abSJulian Elischer */ 12676804a3abSJulian Elischer if((flags & SRQ_YIELDING) == 0) { 12686804a3abSJulian Elischer if (maybe_preempt(td)) 12696804a3abSJulian Elischer return; 12706804a3abSJulian Elischer } 1271f2f51f8aSJeff Roberson if ((td->td_proc->p_flag & P_NOLOAD) == 0) 1272907bdbc2SJeff Roberson sched_load_add(); 12739727e637SJeff Roberson runq_add(ts->ts_runq, td, flags); 12746942d433SJohn Baldwin maybe_resched(td); 1275b43179fbSJeff Roberson } 1276f3a0f873SStephan Uphoff #endif /* SMP */ 1277f3a0f873SStephan Uphoff 1278b43179fbSJeff Roberson void 12797cf90fb3SJeff Roberson sched_rem(struct thread *td) 1280b43179fbSJeff Roberson { 1281ad1e7d28SJulian Elischer struct td_sched *ts; 12827cf90fb3SJeff Roberson 1283ad1e7d28SJulian Elischer ts = td->td_sched; 1284b61ce5b0SJeff Roberson KASSERT(td->td_flags & TDF_INMEM, 1285b61ce5b0SJeff Roberson ("sched_rem: thread swapped out")); 1286f0393f06SJeff Roberson KASSERT(TD_ON_RUNQ(td), 1287ad1e7d28SJulian Elischer ("sched_rem: thread not on run queue")); 1288b43179fbSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 1289907bdbc2SJeff Roberson CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 1290431f8906SJulian Elischer td, td->td_name, td->td_priority, curthread, 1291431f8906SJulian Elischer curthread->td_name); 1292b43179fbSJeff Roberson 1293f2f51f8aSJeff Roberson if ((td->td_proc->p_flag & P_NOLOAD) == 0) 1294907bdbc2SJeff Roberson sched_load_rem(); 12959727e637SJeff Roberson runq_remove(ts->ts_runq, td); 1296f0393f06SJeff Roberson TD_SET_CAN_RUN(td); 1297b43179fbSJeff Roberson } 1298b43179fbSJeff Roberson 129914f0e2e9SJulian Elischer /* 130014f0e2e9SJulian Elischer * Select threads to run. 130114f0e2e9SJulian Elischer * Notice that the running threads still consume a slot. 130214f0e2e9SJulian Elischer */ 1303f0393f06SJeff Roberson struct thread * 1304b43179fbSJeff Roberson sched_choose(void) 1305b43179fbSJeff Roberson { 13069727e637SJeff Roberson struct thread *td; 1307e17c57b1SJeff Roberson struct runq *rq; 1308b43179fbSJeff Roberson 13097b20fb19SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 1310e17c57b1SJeff Roberson #ifdef SMP 13119727e637SJeff Roberson struct thread *tdcpu; 1312e17c57b1SJeff Roberson 1313e17c57b1SJeff Roberson rq = &runq; 13149727e637SJeff Roberson td = runq_choose_fuzz(&runq, runq_fuzz); 13159727e637SJeff Roberson tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]); 1316e17c57b1SJeff Roberson 13179727e637SJeff Roberson if (td == NULL || 13189727e637SJeff Roberson (tdcpu != NULL && 13199727e637SJeff Roberson tdcpu->td_priority < td->td_priority)) { 13209727e637SJeff Roberson CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu, 1321e17c57b1SJeff Roberson PCPU_GET(cpuid)); 13229727e637SJeff Roberson td = tdcpu; 1323e17c57b1SJeff Roberson rq = &runq_pcpu[PCPU_GET(cpuid)]; 1324e17c57b1SJeff Roberson } else { 13259727e637SJeff Roberson CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td); 1326e17c57b1SJeff Roberson } 1327e17c57b1SJeff Roberson 1328e17c57b1SJeff Roberson #else 1329e17c57b1SJeff Roberson rq = &runq; 13309727e637SJeff Roberson td = runq_choose(&runq); 1331e17c57b1SJeff Roberson #endif 1332b43179fbSJeff Roberson 13339727e637SJeff Roberson if (td) { 13349727e637SJeff Roberson runq_remove(rq, td); 13359727e637SJeff Roberson td->td_flags |= TDF_DIDRUN; 1336b43179fbSJeff Roberson 13379727e637SJeff Roberson KASSERT(td->td_flags & TDF_INMEM, 1338b61ce5b0SJeff Roberson ("sched_choose: thread swapped out")); 13399727e637SJeff Roberson return (td); 1340b43179fbSJeff Roberson } 1341f0393f06SJeff Roberson return (PCPU_GET(idlethread)); 1342b43179fbSJeff Roberson } 1343b43179fbSJeff Roberson 1344b43179fbSJeff Roberson void 13451e24c28fSJeff Roberson sched_preempt(struct thread *td) 13461e24c28fSJeff Roberson { 13471e24c28fSJeff Roberson thread_lock(td); 13481e24c28fSJeff Roberson if (td->td_critnest > 1) 13491e24c28fSJeff Roberson td->td_owepreempt = 1; 13501e24c28fSJeff Roberson else 13518df78c41SJeff Roberson mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, NULL); 13521e24c28fSJeff Roberson thread_unlock(td); 13531e24c28fSJeff Roberson } 13541e24c28fSJeff Roberson 13551e24c28fSJeff Roberson void 1356b43179fbSJeff Roberson sched_userret(struct thread *td) 1357b43179fbSJeff Roberson { 1358b43179fbSJeff Roberson /* 1359b43179fbSJeff Roberson * XXX we cheat slightly on the locking here to avoid locking in 1360b43179fbSJeff Roberson * the usual case. Setting td_priority here is essentially an 1361b43179fbSJeff Roberson * incomplete workaround for not setting it properly elsewhere. 1362b43179fbSJeff Roberson * Now that some interrupt handlers are threads, not setting it 1363b43179fbSJeff Roberson * properly elsewhere can clobber it in the window between setting 1364b43179fbSJeff Roberson * it here and returning to user mode, so don't waste time setting 1365b43179fbSJeff Roberson * it perfectly here. 1366b43179fbSJeff Roberson */ 1367f5c157d9SJohn Baldwin KASSERT((td->td_flags & TDF_BORROWING) == 0, 1368f5c157d9SJohn Baldwin ("thread with borrowed priority returning to userland")); 13698460a577SJohn Birrell if (td->td_priority != td->td_user_pri) { 13707b20fb19SJeff Roberson thread_lock(td); 13718460a577SJohn Birrell td->td_priority = td->td_user_pri; 13728460a577SJohn Birrell td->td_base_pri = td->td_user_pri; 13737b20fb19SJeff Roberson thread_unlock(td); 13748460a577SJohn Birrell } 1375b43179fbSJeff Roberson } 1376de028f5aSJeff Roberson 1377e17c57b1SJeff Roberson void 1378e17c57b1SJeff Roberson sched_bind(struct thread *td, int cpu) 1379e17c57b1SJeff Roberson { 1380ad1e7d28SJulian Elischer struct td_sched *ts; 1381e17c57b1SJeff Roberson 13827b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 1383e17c57b1SJeff Roberson KASSERT(TD_IS_RUNNING(td), 1384e17c57b1SJeff Roberson ("sched_bind: cannot bind non-running thread")); 1385e17c57b1SJeff Roberson 1386ad1e7d28SJulian Elischer ts = td->td_sched; 1387e17c57b1SJeff Roberson 13889727e637SJeff Roberson td->td_flags |= TDF_BOUND; 1389e17c57b1SJeff Roberson #ifdef SMP 1390ad1e7d28SJulian Elischer ts->ts_runq = &runq_pcpu[cpu]; 1391e17c57b1SJeff Roberson if (PCPU_GET(cpuid) == cpu) 1392e17c57b1SJeff Roberson return; 1393e17c57b1SJeff Roberson 1394bf0acc27SJohn Baldwin mi_switch(SW_VOL, NULL); 1395e17c57b1SJeff Roberson #endif 1396e17c57b1SJeff Roberson } 1397e17c57b1SJeff Roberson 1398e17c57b1SJeff Roberson void 1399e17c57b1SJeff Roberson sched_unbind(struct thread* td) 1400e17c57b1SJeff Roberson { 14017b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 14029727e637SJeff Roberson td->td_flags &= ~TDF_BOUND; 1403e17c57b1SJeff Roberson } 1404e17c57b1SJeff Roberson 1405de028f5aSJeff Roberson int 1406ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td) 1407ebccf1e3SJoseph Koshy { 14087b20fb19SJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED); 14099727e637SJeff Roberson return (td->td_flags & TDF_BOUND); 1410ebccf1e3SJoseph Koshy } 1411ebccf1e3SJoseph Koshy 141236ec198bSDavid Xu void 141336ec198bSDavid Xu sched_relinquish(struct thread *td) 141436ec198bSDavid Xu { 14157b20fb19SJeff Roberson thread_lock(td); 14168df78c41SJeff Roberson mi_switch(SW_VOL | SWT_RELINQUISH, NULL); 14177b20fb19SJeff Roberson thread_unlock(td); 141836ec198bSDavid Xu } 141936ec198bSDavid Xu 1420ebccf1e3SJoseph Koshy int 1421ca59f152SJeff Roberson sched_load(void) 1422ca59f152SJeff Roberson { 1423ca59f152SJeff Roberson return (sched_tdcnt); 1424ca59f152SJeff Roberson } 1425ca59f152SJeff Roberson 1426de028f5aSJeff Roberson int 1427de028f5aSJeff Roberson sched_sizeof_proc(void) 1428de028f5aSJeff Roberson { 1429de028f5aSJeff Roberson return (sizeof(struct proc)); 1430de028f5aSJeff Roberson } 143136ec198bSDavid Xu 1432de028f5aSJeff Roberson int 1433de028f5aSJeff Roberson sched_sizeof_thread(void) 1434de028f5aSJeff Roberson { 1435ad1e7d28SJulian Elischer return (sizeof(struct thread) + sizeof(struct td_sched)); 1436de028f5aSJeff Roberson } 143779acfc49SJeff Roberson 143879acfc49SJeff Roberson fixpt_t 14397cf90fb3SJeff Roberson sched_pctcpu(struct thread *td) 144079acfc49SJeff Roberson { 1441ad1e7d28SJulian Elischer struct td_sched *ts; 144255f2099aSJeff Roberson 1443ad1e7d28SJulian Elischer ts = td->td_sched; 1444ad1e7d28SJulian Elischer return (ts->ts_pctcpu); 144579acfc49SJeff Roberson } 1446b41f1452SDavid Xu 1447b41f1452SDavid Xu void 1448b41f1452SDavid Xu sched_tick(void) 1449b41f1452SDavid Xu { 1450b41f1452SDavid Xu } 1451f0393f06SJeff Roberson 1452f0393f06SJeff Roberson /* 1453f0393f06SJeff Roberson * The actual idle process. 1454f0393f06SJeff Roberson */ 1455f0393f06SJeff Roberson void 1456f0393f06SJeff Roberson sched_idletd(void *dummy) 1457f0393f06SJeff Roberson { 1458f0393f06SJeff Roberson 1459f0393f06SJeff Roberson for (;;) { 1460f0393f06SJeff Roberson mtx_assert(&Giant, MA_NOTOWNED); 1461f0393f06SJeff Roberson 1462f0393f06SJeff Roberson while (sched_runnable() == 0) 14636c47aaaeSJeff Roberson cpu_idle(0); 1464f0393f06SJeff Roberson 1465f0393f06SJeff Roberson mtx_lock_spin(&sched_lock); 14668df78c41SJeff Roberson mi_switch(SW_VOL | SWT_IDLE, NULL); 1467f0393f06SJeff Roberson mtx_unlock_spin(&sched_lock); 1468f0393f06SJeff Roberson } 1469f0393f06SJeff Roberson } 1470f0393f06SJeff Roberson 14717b20fb19SJeff Roberson /* 14727b20fb19SJeff Roberson * A CPU is entering for the first time or a thread is exiting. 14737b20fb19SJeff Roberson */ 14747b20fb19SJeff Roberson void 14757b20fb19SJeff Roberson sched_throw(struct thread *td) 14767b20fb19SJeff Roberson { 14777b20fb19SJeff Roberson /* 14787b20fb19SJeff Roberson * Correct spinlock nesting. The idle thread context that we are 14797b20fb19SJeff Roberson * borrowing was created so that it would start out with a single 14807b20fb19SJeff Roberson * spin lock (sched_lock) held in fork_trampoline(). Since we've 14817b20fb19SJeff Roberson * explicitly acquired locks in this function, the nesting count 14827b20fb19SJeff Roberson * is now 2 rather than 1. Since we are nested, calling 14837b20fb19SJeff Roberson * spinlock_exit() will simply adjust the counts without allowing 14847b20fb19SJeff Roberson * spin lock using code to interrupt us. 14857b20fb19SJeff Roberson */ 14867b20fb19SJeff Roberson if (td == NULL) { 14877b20fb19SJeff Roberson mtx_lock_spin(&sched_lock); 14887b20fb19SJeff Roberson spinlock_exit(); 14897b20fb19SJeff Roberson } else { 1490eea4f254SJeff Roberson lock_profile_release_lock(&sched_lock.lock_object); 14917b20fb19SJeff Roberson MPASS(td->td_lock == &sched_lock); 14927b20fb19SJeff Roberson } 14937b20fb19SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 14947b20fb19SJeff Roberson KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 14957b20fb19SJeff Roberson PCPU_SET(switchtime, cpu_ticks()); 14967b20fb19SJeff Roberson PCPU_SET(switchticks, ticks); 14977b20fb19SJeff Roberson cpu_throw(td, choosethread()); /* doesn't return */ 14987b20fb19SJeff Roberson } 14997b20fb19SJeff Roberson 15007b20fb19SJeff Roberson void 1501fe54587fSJeff Roberson sched_fork_exit(struct thread *td) 15027b20fb19SJeff Roberson { 15037b20fb19SJeff Roberson 15047b20fb19SJeff Roberson /* 15057b20fb19SJeff Roberson * Finish setting up thread glue so that it begins execution in a 15067b20fb19SJeff Roberson * non-nested critical section with sched_lock held but not recursed. 15077b20fb19SJeff Roberson */ 1508fe54587fSJeff Roberson td->td_oncpu = PCPU_GET(cpuid); 1509fe54587fSJeff Roberson sched_lock.mtx_lock = (uintptr_t)td; 1510eea4f254SJeff Roberson lock_profile_obtain_lock_success(&sched_lock.lock_object, 1511eea4f254SJeff Roberson 0, 0, __FILE__, __LINE__); 1512fe54587fSJeff Roberson THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); 15137b20fb19SJeff Roberson } 15147b20fb19SJeff Roberson 1515885d51a3SJeff Roberson void 1516885d51a3SJeff Roberson sched_affinity(struct thread *td) 1517885d51a3SJeff Roberson { 1518885d51a3SJeff Roberson } 1519