1b43179fbSJeff Roberson /*- 2b43179fbSJeff Roberson * Copyright (c) 1982, 1986, 1990, 1991, 1993 3b43179fbSJeff Roberson * The Regents of the University of California. All rights reserved. 4b43179fbSJeff Roberson * (c) UNIX System Laboratories, Inc. 5b43179fbSJeff Roberson * All or some portions of this file are derived from material licensed 6b43179fbSJeff Roberson * to the University of California by American Telephone and Telegraph 7b43179fbSJeff Roberson * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8b43179fbSJeff Roberson * the permission of UNIX System Laboratories, Inc. 9b43179fbSJeff Roberson * 10b43179fbSJeff Roberson * Redistribution and use in source and binary forms, with or without 11b43179fbSJeff Roberson * modification, are permitted provided that the following conditions 12b43179fbSJeff Roberson * are met: 13b43179fbSJeff Roberson * 1. Redistributions of source code must retain the above copyright 14b43179fbSJeff Roberson * notice, this list of conditions and the following disclaimer. 15b43179fbSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 16b43179fbSJeff Roberson * notice, this list of conditions and the following disclaimer in the 17b43179fbSJeff Roberson * documentation and/or other materials provided with the distribution. 18b43179fbSJeff Roberson * 4. Neither the name of the University nor the names of its contributors 19b43179fbSJeff Roberson * may be used to endorse or promote products derived from this software 20b43179fbSJeff Roberson * without specific prior written permission. 21b43179fbSJeff Roberson * 22b43179fbSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23b43179fbSJeff Roberson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24b43179fbSJeff Roberson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25b43179fbSJeff Roberson * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26b43179fbSJeff Roberson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27b43179fbSJeff Roberson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28b43179fbSJeff Roberson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29b43179fbSJeff Roberson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30b43179fbSJeff Roberson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31b43179fbSJeff Roberson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32b43179fbSJeff Roberson * SUCH DAMAGE. 33b43179fbSJeff Roberson */ 34b43179fbSJeff Roberson 35677b542eSDavid E. O'Brien #include <sys/cdefs.h> 36677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 37677b542eSDavid E. O'Brien 384da0d332SPeter Wemm #include "opt_hwpmc_hooks.h" 394da0d332SPeter Wemm 40ed062c8dSJulian Elischer #define kse td_sched 41ed062c8dSJulian Elischer 42b43179fbSJeff Roberson #include <sys/param.h> 43b43179fbSJeff Roberson #include <sys/systm.h> 44b43179fbSJeff Roberson #include <sys/kernel.h> 45b43179fbSJeff Roberson #include <sys/ktr.h> 46b43179fbSJeff Roberson #include <sys/lock.h> 47c55bbb6cSJohn Baldwin #include <sys/kthread.h> 48b43179fbSJeff Roberson #include <sys/mutex.h> 49b43179fbSJeff Roberson #include <sys/proc.h> 50b43179fbSJeff Roberson #include <sys/resourcevar.h> 51b43179fbSJeff Roberson #include <sys/sched.h> 52b43179fbSJeff Roberson #include <sys/smp.h> 53b43179fbSJeff Roberson #include <sys/sysctl.h> 54b43179fbSJeff Roberson #include <sys/sx.h> 55f5c157d9SJohn Baldwin #include <sys/turnstile.h> 563db720fdSDavid Xu #include <sys/umtx.h> 572e4db89cSDavid E. O'Brien #include <machine/pcb.h> 58293968d8SJulian Elischer #include <machine/smp.h> 59b43179fbSJeff Roberson 60ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 61ebccf1e3SJoseph Koshy #include <sys/pmckern.h> 62ebccf1e3SJoseph Koshy #endif 63ebccf1e3SJoseph Koshy 6406439a04SJeff Roberson /* 6506439a04SJeff Roberson * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in 6606439a04SJeff Roberson * the range 100-256 Hz (approximately). 6706439a04SJeff Roberson */ 6806439a04SJeff Roberson #define ESTCPULIM(e) \ 6906439a04SJeff Roberson min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \ 7006439a04SJeff Roberson RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1) 71b698380fSBruce Evans #ifdef SMP 72b698380fSBruce Evans #define INVERSE_ESTCPU_WEIGHT (8 * smp_cpus) 73b698380fSBruce Evans #else 7406439a04SJeff Roberson #define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */ 75b698380fSBruce Evans #endif 7606439a04SJeff Roberson #define NICE_WEIGHT 1 /* Priorities per nice level. */ 7706439a04SJeff Roberson 78ed062c8dSJulian Elischer /* 79ed062c8dSJulian Elischer * The schedulable entity that can be given a context to run. 80ed062c8dSJulian Elischer * A process may have several of these. Probably one per processor 811f36c876SMaxim Konovalov * but possibly a few more. In this universe they are grouped 82ed062c8dSJulian Elischer * with a KSEG that contains the priority and niceness 83ed062c8dSJulian Elischer * for the group. 84ed062c8dSJulian Elischer */ 85ed062c8dSJulian Elischer struct kse { 86ed062c8dSJulian Elischer TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */ 87ed062c8dSJulian Elischer struct thread *ke_thread; /* (*) Active associated thread. */ 88ed062c8dSJulian Elischer fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */ 890ae716e5SDavid Xu u_char ke_rqindex; /* (j) Run queue index. */ 90ed062c8dSJulian Elischer enum { 91ed062c8dSJulian Elischer KES_THREAD = 0x0, /* slaved to thread state */ 92ed062c8dSJulian Elischer KES_ONRUNQ 93ed062c8dSJulian Elischer } ke_state; /* (j) KSE status. */ 94ed062c8dSJulian Elischer int ke_cpticks; /* (j) Ticks of cpu time. */ 95ed062c8dSJulian Elischer struct runq *ke_runq; /* runq the kse is currently on */ 96bcb06d59SJeff Roberson }; 97ed062c8dSJulian Elischer 98ed062c8dSJulian Elischer #define ke_proc ke_thread->td_proc 99ed062c8dSJulian Elischer #define ke_ksegrp ke_thread->td_ksegrp 100ed062c8dSJulian Elischer 101ed062c8dSJulian Elischer #define td_kse td_sched 102ed062c8dSJulian Elischer 103ed062c8dSJulian Elischer /* flags kept in td_flags */ 104ed062c8dSJulian Elischer #define TDF_DIDRUN TDF_SCHED0 /* KSE actually ran. */ 105ed062c8dSJulian Elischer #define TDF_EXIT TDF_SCHED1 /* KSE is being killed. */ 106ed062c8dSJulian Elischer #define TDF_BOUND TDF_SCHED2 107ed062c8dSJulian Elischer 108ed062c8dSJulian Elischer #define ke_flags ke_thread->td_flags 109ed062c8dSJulian Elischer #define KEF_DIDRUN TDF_DIDRUN /* KSE actually ran. */ 110ed062c8dSJulian Elischer #define KEF_EXIT TDF_EXIT /* KSE is being killed. */ 111ed062c8dSJulian Elischer #define KEF_BOUND TDF_BOUND /* stuck to one CPU */ 112bcb06d59SJeff Roberson 113e17c57b1SJeff Roberson #define SKE_RUNQ_PCPU(ke) \ 114e17c57b1SJeff Roberson ((ke)->ke_runq != 0 && (ke)->ke_runq != &runq) 115e17c57b1SJeff Roberson 116ed062c8dSJulian Elischer struct kg_sched { 117ed062c8dSJulian Elischer struct thread *skg_last_assigned; /* (j) Last thread assigned to */ 118ed062c8dSJulian Elischer /* the system scheduler. */ 119ed062c8dSJulian Elischer int skg_avail_opennings; /* (j) Num KSEs requested in group. */ 120ed062c8dSJulian Elischer int skg_concurrency; /* (j) Num KSEs requested in group. */ 121ed062c8dSJulian Elischer }; 122ed062c8dSJulian Elischer #define kg_last_assigned kg_sched->skg_last_assigned 123ed062c8dSJulian Elischer #define kg_avail_opennings kg_sched->skg_avail_opennings 124ed062c8dSJulian Elischer #define kg_concurrency kg_sched->skg_concurrency 125ed062c8dSJulian Elischer 126d39063f2SJulian Elischer #define SLOT_RELEASE(kg) \ 127d39063f2SJulian Elischer do { \ 128d39063f2SJulian Elischer kg->kg_avail_opennings++; \ 129d39063f2SJulian Elischer CTR3(KTR_RUNQ, "kg %p(%d) Slot released (->%d)", \ 130d39063f2SJulian Elischer kg, \ 131d39063f2SJulian Elischer kg->kg_concurrency, \ 132d39063f2SJulian Elischer kg->kg_avail_opennings); \ 133d39063f2SJulian Elischer /* KASSERT((kg->kg_avail_opennings <= kg->kg_concurrency), \ 134d39063f2SJulian Elischer ("slots out of whack"));*/ \ 135d39063f2SJulian Elischer } while (0) 136d39063f2SJulian Elischer 137d39063f2SJulian Elischer #define SLOT_USE(kg) \ 138d39063f2SJulian Elischer do { \ 139d39063f2SJulian Elischer kg->kg_avail_opennings--; \ 140d39063f2SJulian Elischer CTR3(KTR_RUNQ, "kg %p(%d) Slot used (->%d)", \ 141d39063f2SJulian Elischer kg, \ 142d39063f2SJulian Elischer kg->kg_concurrency, \ 143d39063f2SJulian Elischer kg->kg_avail_opennings); \ 144d39063f2SJulian Elischer /* KASSERT((kg->kg_avail_opennings >= 0), \ 145d39063f2SJulian Elischer ("slots out of whack"));*/ \ 146d39063f2SJulian Elischer } while (0) 147d39063f2SJulian Elischer 148e17c57b1SJeff Roberson /* 149e17c57b1SJeff Roberson * KSE_CAN_MIGRATE macro returns true if the kse can migrate between 150f2f51f8aSJeff Roberson * cpus. 151e17c57b1SJeff Roberson */ 152e17c57b1SJeff Roberson #define KSE_CAN_MIGRATE(ke) \ 1531e7fad6bSScott Long ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0) 154bcb06d59SJeff Roberson 155ed062c8dSJulian Elischer static struct kse kse0; 156ed062c8dSJulian Elischer static struct kg_sched kg_sched0; 157b43179fbSJeff Roberson 158ca59f152SJeff Roberson static int sched_tdcnt; /* Total runnable threads in the system. */ 159b43179fbSJeff Roberson static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 1604974b53eSMaxime Henrion #define SCHED_QUANTUM (hz / 10) /* Default sched quantum */ 161b43179fbSJeff Roberson 162b43179fbSJeff Roberson static struct callout roundrobin_callout; 163b43179fbSJeff Roberson 164ed062c8dSJulian Elischer static void slot_fill(struct ksegrp *kg); 165ed062c8dSJulian Elischer static struct kse *sched_choose(void); /* XXX Should be thread * */ 166ed062c8dSJulian Elischer 167e17c57b1SJeff Roberson static void setup_runqs(void); 168b43179fbSJeff Roberson static void roundrobin(void *arg); 169c55bbb6cSJohn Baldwin static void schedcpu(void); 170e17c57b1SJeff Roberson static void schedcpu_thread(void); 171f5c157d9SJohn Baldwin static void sched_priority(struct thread *td, u_char prio); 172b43179fbSJeff Roberson static void sched_setup(void *dummy); 173b43179fbSJeff Roberson static void maybe_resched(struct thread *td); 174b43179fbSJeff Roberson static void updatepri(struct ksegrp *kg); 175b43179fbSJeff Roberson static void resetpriority(struct ksegrp *kg); 176f5c157d9SJohn Baldwin static void resetpriority_thread(struct thread *td, struct ksegrp *kg); 17700b0483dSJulian Elischer #ifdef SMP 17882a1dfc1SJulian Elischer static int forward_wakeup(int cpunum); 17900b0483dSJulian Elischer #endif 180b43179fbSJeff Roberson 181e17c57b1SJeff Roberson static struct kproc_desc sched_kp = { 182e17c57b1SJeff Roberson "schedcpu", 183e17c57b1SJeff Roberson schedcpu_thread, 184e17c57b1SJeff Roberson NULL 185e17c57b1SJeff Roberson }; 186e17c57b1SJeff Roberson SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, &sched_kp) 187e17c57b1SJeff Roberson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 188b43179fbSJeff Roberson 189b43179fbSJeff Roberson /* 190b43179fbSJeff Roberson * Global run queue. 191b43179fbSJeff Roberson */ 192b43179fbSJeff Roberson static struct runq runq; 193e17c57b1SJeff Roberson 194e17c57b1SJeff Roberson #ifdef SMP 195e17c57b1SJeff Roberson /* 196e17c57b1SJeff Roberson * Per-CPU run queues 197e17c57b1SJeff Roberson */ 198e17c57b1SJeff Roberson static struct runq runq_pcpu[MAXCPU]; 199e17c57b1SJeff Roberson #endif 200e17c57b1SJeff Roberson 201e17c57b1SJeff Roberson static void 202e17c57b1SJeff Roberson setup_runqs(void) 203e17c57b1SJeff Roberson { 204e17c57b1SJeff Roberson #ifdef SMP 205e17c57b1SJeff Roberson int i; 206e17c57b1SJeff Roberson 207e17c57b1SJeff Roberson for (i = 0; i < MAXCPU; ++i) 208e17c57b1SJeff Roberson runq_init(&runq_pcpu[i]); 209e17c57b1SJeff Roberson #endif 210e17c57b1SJeff Roberson 211e17c57b1SJeff Roberson runq_init(&runq); 212e17c57b1SJeff Roberson } 213b43179fbSJeff Roberson 214b43179fbSJeff Roberson static int 215b43179fbSJeff Roberson sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 216b43179fbSJeff Roberson { 217b43179fbSJeff Roberson int error, new_val; 218b43179fbSJeff Roberson 219b43179fbSJeff Roberson new_val = sched_quantum * tick; 220b43179fbSJeff Roberson error = sysctl_handle_int(oidp, &new_val, 0, req); 221b43179fbSJeff Roberson if (error != 0 || req->newptr == NULL) 222b43179fbSJeff Roberson return (error); 223b43179fbSJeff Roberson if (new_val < tick) 224b43179fbSJeff Roberson return (EINVAL); 225b43179fbSJeff Roberson sched_quantum = new_val / tick; 226b43179fbSJeff Roberson hogticks = 2 * sched_quantum; 227b43179fbSJeff Roberson return (0); 228b43179fbSJeff Roberson } 229b43179fbSJeff Roberson 230e038d354SScott Long SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler"); 231dc095794SScott Long 232e038d354SScott Long SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0, 233e038d354SScott Long "Scheduler name"); 234dc095794SScott Long 235dc095794SScott Long SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW, 236b43179fbSJeff Roberson 0, sizeof sched_quantum, sysctl_kern_quantum, "I", 237b43179fbSJeff Roberson "Roundrobin scheduling quantum in microseconds"); 238b43179fbSJeff Roberson 23937c28a02SJulian Elischer #ifdef SMP 24082a1dfc1SJulian Elischer /* Enable forwarding of wakeups to all other cpus */ 24182a1dfc1SJulian Elischer SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP"); 24282a1dfc1SJulian Elischer 243bce73aedSJulian Elischer static int forward_wakeup_enabled = 1; 24482a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW, 24582a1dfc1SJulian Elischer &forward_wakeup_enabled, 0, 24682a1dfc1SJulian Elischer "Forwarding of wakeup to idle CPUs"); 24782a1dfc1SJulian Elischer 24882a1dfc1SJulian Elischer static int forward_wakeups_requested = 0; 24982a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD, 25082a1dfc1SJulian Elischer &forward_wakeups_requested, 0, 25182a1dfc1SJulian Elischer "Requests for Forwarding of wakeup to idle CPUs"); 25282a1dfc1SJulian Elischer 25382a1dfc1SJulian Elischer static int forward_wakeups_delivered = 0; 25482a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD, 25582a1dfc1SJulian Elischer &forward_wakeups_delivered, 0, 25682a1dfc1SJulian Elischer "Completed Forwarding of wakeup to idle CPUs"); 25782a1dfc1SJulian Elischer 258bce73aedSJulian Elischer static int forward_wakeup_use_mask = 1; 25982a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW, 26082a1dfc1SJulian Elischer &forward_wakeup_use_mask, 0, 26182a1dfc1SJulian Elischer "Use the mask of idle cpus"); 26282a1dfc1SJulian Elischer 26382a1dfc1SJulian Elischer static int forward_wakeup_use_loop = 0; 26482a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW, 26582a1dfc1SJulian Elischer &forward_wakeup_use_loop, 0, 26682a1dfc1SJulian Elischer "Use a loop to find idle cpus"); 26782a1dfc1SJulian Elischer 26882a1dfc1SJulian Elischer static int forward_wakeup_use_single = 0; 26982a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW, 27082a1dfc1SJulian Elischer &forward_wakeup_use_single, 0, 27182a1dfc1SJulian Elischer "Only signal one idle cpu"); 27282a1dfc1SJulian Elischer 27382a1dfc1SJulian Elischer static int forward_wakeup_use_htt = 0; 27482a1dfc1SJulian Elischer SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW, 27582a1dfc1SJulian Elischer &forward_wakeup_use_htt, 0, 27682a1dfc1SJulian Elischer "account for htt"); 2773389af30SJulian Elischer 27837c28a02SJulian Elischer #endif 2793389af30SJulian Elischer static int sched_followon = 0; 2803389af30SJulian Elischer SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW, 2813389af30SJulian Elischer &sched_followon, 0, 2823389af30SJulian Elischer "allow threads to share a quantum"); 2833389af30SJulian Elischer 2843389af30SJulian Elischer static int sched_pfollowons = 0; 2853389af30SJulian Elischer SYSCTL_INT(_kern_sched, OID_AUTO, pfollowons, CTLFLAG_RD, 2863389af30SJulian Elischer &sched_pfollowons, 0, 2873389af30SJulian Elischer "number of followons done to a different ksegrp"); 2883389af30SJulian Elischer 2893389af30SJulian Elischer static int sched_kgfollowons = 0; 2903389af30SJulian Elischer SYSCTL_INT(_kern_sched, OID_AUTO, kgfollowons, CTLFLAG_RD, 2913389af30SJulian Elischer &sched_kgfollowons, 0, 2923389af30SJulian Elischer "number of followons done in a ksegrp"); 29382a1dfc1SJulian Elischer 294907bdbc2SJeff Roberson static __inline void 295907bdbc2SJeff Roberson sched_load_add(void) 296907bdbc2SJeff Roberson { 297907bdbc2SJeff Roberson sched_tdcnt++; 298907bdbc2SJeff Roberson CTR1(KTR_SCHED, "global load: %d", sched_tdcnt); 299907bdbc2SJeff Roberson } 300907bdbc2SJeff Roberson 301907bdbc2SJeff Roberson static __inline void 302907bdbc2SJeff Roberson sched_load_rem(void) 303907bdbc2SJeff Roberson { 304907bdbc2SJeff Roberson sched_tdcnt--; 305907bdbc2SJeff Roberson CTR1(KTR_SCHED, "global load: %d", sched_tdcnt); 306907bdbc2SJeff Roberson } 307b43179fbSJeff Roberson /* 308b43179fbSJeff Roberson * Arrange to reschedule if necessary, taking the priorities and 309b43179fbSJeff Roberson * schedulers into account. 310b43179fbSJeff Roberson */ 311b43179fbSJeff Roberson static void 312b43179fbSJeff Roberson maybe_resched(struct thread *td) 313b43179fbSJeff Roberson { 314b43179fbSJeff Roberson 315b43179fbSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 316ed062c8dSJulian Elischer if (td->td_priority < curthread->td_priority) 3174a338afdSJulian Elischer curthread->td_flags |= TDF_NEEDRESCHED; 318b43179fbSJeff Roberson } 319b43179fbSJeff Roberson 320b43179fbSJeff Roberson /* 321b43179fbSJeff Roberson * Force switch among equal priority processes every 100ms. 322b43179fbSJeff Roberson * We don't actually need to force a context switch of the current process. 323b43179fbSJeff Roberson * The act of firing the event triggers a context switch to softclock() and 324b43179fbSJeff Roberson * then switching back out again which is equivalent to a preemption, thus 325b43179fbSJeff Roberson * no further work is needed on the local CPU. 326b43179fbSJeff Roberson */ 327b43179fbSJeff Roberson /* ARGSUSED */ 328b43179fbSJeff Roberson static void 329b43179fbSJeff Roberson roundrobin(void *arg) 330b43179fbSJeff Roberson { 331b43179fbSJeff Roberson 332b43179fbSJeff Roberson #ifdef SMP 333b43179fbSJeff Roberson mtx_lock_spin(&sched_lock); 334b43179fbSJeff Roberson forward_roundrobin(); 335b43179fbSJeff Roberson mtx_unlock_spin(&sched_lock); 336b43179fbSJeff Roberson #endif 337b43179fbSJeff Roberson 338b43179fbSJeff Roberson callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL); 339b43179fbSJeff Roberson } 340b43179fbSJeff Roberson 341b43179fbSJeff Roberson /* 342b43179fbSJeff Roberson * Constants for digital decay and forget: 34370fca427SJohn Baldwin * 90% of (kg_estcpu) usage in 5 * loadav time 34470fca427SJohn Baldwin * 95% of (ke_pctcpu) usage in 60 seconds (load insensitive) 345b43179fbSJeff Roberson * Note that, as ps(1) mentions, this can let percentages 346b43179fbSJeff Roberson * total over 100% (I've seen 137.9% for 3 processes). 347b43179fbSJeff Roberson * 34870fca427SJohn Baldwin * Note that schedclock() updates kg_estcpu and p_cpticks asynchronously. 349b43179fbSJeff Roberson * 35070fca427SJohn Baldwin * We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds. 351b43179fbSJeff Roberson * That is, the system wants to compute a value of decay such 352b43179fbSJeff Roberson * that the following for loop: 353b43179fbSJeff Roberson * for (i = 0; i < (5 * loadavg); i++) 35470fca427SJohn Baldwin * kg_estcpu *= decay; 355b43179fbSJeff Roberson * will compute 35670fca427SJohn Baldwin * kg_estcpu *= 0.1; 357b43179fbSJeff Roberson * for all values of loadavg: 358b43179fbSJeff Roberson * 359b43179fbSJeff Roberson * Mathematically this loop can be expressed by saying: 360b43179fbSJeff Roberson * decay ** (5 * loadavg) ~= .1 361b43179fbSJeff Roberson * 362b43179fbSJeff Roberson * The system computes decay as: 363b43179fbSJeff Roberson * decay = (2 * loadavg) / (2 * loadavg + 1) 364b43179fbSJeff Roberson * 365b43179fbSJeff Roberson * We wish to prove that the system's computation of decay 366b43179fbSJeff Roberson * will always fulfill the equation: 367b43179fbSJeff Roberson * decay ** (5 * loadavg) ~= .1 368b43179fbSJeff Roberson * 369b43179fbSJeff Roberson * If we compute b as: 370b43179fbSJeff Roberson * b = 2 * loadavg 371b43179fbSJeff Roberson * then 372b43179fbSJeff Roberson * decay = b / (b + 1) 373b43179fbSJeff Roberson * 374b43179fbSJeff Roberson * We now need to prove two things: 375b43179fbSJeff Roberson * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 376b43179fbSJeff Roberson * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 377b43179fbSJeff Roberson * 378b43179fbSJeff Roberson * Facts: 379b43179fbSJeff Roberson * For x close to zero, exp(x) =~ 1 + x, since 380b43179fbSJeff Roberson * exp(x) = 0! + x**1/1! + x**2/2! + ... . 381b43179fbSJeff Roberson * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 382b43179fbSJeff Roberson * For x close to zero, ln(1+x) =~ x, since 383b43179fbSJeff Roberson * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 384b43179fbSJeff Roberson * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 385b43179fbSJeff Roberson * ln(.1) =~ -2.30 386b43179fbSJeff Roberson * 387b43179fbSJeff Roberson * Proof of (1): 388b43179fbSJeff Roberson * Solve (factor)**(power) =~ .1 given power (5*loadav): 389b43179fbSJeff Roberson * solving for factor, 390b43179fbSJeff Roberson * ln(factor) =~ (-2.30/5*loadav), or 391b43179fbSJeff Roberson * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 392b43179fbSJeff Roberson * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 393b43179fbSJeff Roberson * 394b43179fbSJeff Roberson * Proof of (2): 395b43179fbSJeff Roberson * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 396b43179fbSJeff Roberson * solving for power, 397b43179fbSJeff Roberson * power*ln(b/(b+1)) =~ -2.30, or 398b43179fbSJeff Roberson * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 399b43179fbSJeff Roberson * 400b43179fbSJeff Roberson * Actual power values for the implemented algorithm are as follows: 401b43179fbSJeff Roberson * loadav: 1 2 3 4 402b43179fbSJeff Roberson * power: 5.68 10.32 14.94 19.55 403b43179fbSJeff Roberson */ 404b43179fbSJeff Roberson 405b43179fbSJeff Roberson /* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 406b43179fbSJeff Roberson #define loadfactor(loadav) (2 * (loadav)) 407b43179fbSJeff Roberson #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 408b43179fbSJeff Roberson 40970fca427SJohn Baldwin /* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 410b43179fbSJeff Roberson static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 4115c06d111SJohn-Mark Gurney SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 412b43179fbSJeff Roberson 413b43179fbSJeff Roberson /* 414b43179fbSJeff Roberson * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 415b43179fbSJeff Roberson * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 416b43179fbSJeff Roberson * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 417b43179fbSJeff Roberson * 418b43179fbSJeff Roberson * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 419b43179fbSJeff Roberson * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 420b43179fbSJeff Roberson * 421b43179fbSJeff Roberson * If you don't want to bother with the faster/more-accurate formula, you 422b43179fbSJeff Roberson * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 423b43179fbSJeff Roberson * (more general) method of calculating the %age of CPU used by a process. 424b43179fbSJeff Roberson */ 425b43179fbSJeff Roberson #define CCPU_SHIFT 11 426b43179fbSJeff Roberson 427b43179fbSJeff Roberson /* 428b43179fbSJeff Roberson * Recompute process priorities, every hz ticks. 429b43179fbSJeff Roberson * MP-safe, called without the Giant mutex. 430b43179fbSJeff Roberson */ 431b43179fbSJeff Roberson /* ARGSUSED */ 432b43179fbSJeff Roberson static void 433c55bbb6cSJohn Baldwin schedcpu(void) 434b43179fbSJeff Roberson { 435b43179fbSJeff Roberson register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 436b43179fbSJeff Roberson struct thread *td; 437b43179fbSJeff Roberson struct proc *p; 438b43179fbSJeff Roberson struct kse *ke; 439b43179fbSJeff Roberson struct ksegrp *kg; 44070fca427SJohn Baldwin int awake, realstathz; 441b43179fbSJeff Roberson 442b43179fbSJeff Roberson realstathz = stathz ? stathz : hz; 443b43179fbSJeff Roberson sx_slock(&allproc_lock); 444b43179fbSJeff Roberson FOREACH_PROC_IN_SYSTEM(p) { 44570fca427SJohn Baldwin /* 44670fca427SJohn Baldwin * Prevent state changes and protect run queue. 44770fca427SJohn Baldwin */ 448b43179fbSJeff Roberson mtx_lock_spin(&sched_lock); 44970fca427SJohn Baldwin /* 45070fca427SJohn Baldwin * Increment time in/out of memory. We ignore overflow; with 45170fca427SJohn Baldwin * 16-bit int's (remember them?) overflow takes 45 days. 45270fca427SJohn Baldwin */ 453b43179fbSJeff Roberson p->p_swtime++; 454b43179fbSJeff Roberson FOREACH_KSEGRP_IN_PROC(p, kg) { 455b43179fbSJeff Roberson awake = 0; 456ed062c8dSJulian Elischer FOREACH_THREAD_IN_GROUP(kg, td) { 457ed062c8dSJulian Elischer ke = td->td_kse; 458b43179fbSJeff Roberson /* 45970fca427SJohn Baldwin * Increment sleep time (if sleeping). We 46070fca427SJohn Baldwin * ignore overflow, as above. 461b43179fbSJeff Roberson */ 462b43179fbSJeff Roberson /* 463b43179fbSJeff Roberson * The kse slptimes are not touched in wakeup 464b43179fbSJeff Roberson * because the thread may not HAVE a KSE. 465b43179fbSJeff Roberson */ 466b43179fbSJeff Roberson if (ke->ke_state == KES_ONRUNQ) { 467b43179fbSJeff Roberson awake = 1; 468b43179fbSJeff Roberson ke->ke_flags &= ~KEF_DIDRUN; 469b43179fbSJeff Roberson } else if ((ke->ke_state == KES_THREAD) && 470ed062c8dSJulian Elischer (TD_IS_RUNNING(td))) { 471b43179fbSJeff Roberson awake = 1; 472b43179fbSJeff Roberson /* Do not clear KEF_DIDRUN */ 473b43179fbSJeff Roberson } else if (ke->ke_flags & KEF_DIDRUN) { 474b43179fbSJeff Roberson awake = 1; 475b43179fbSJeff Roberson ke->ke_flags &= ~KEF_DIDRUN; 476b43179fbSJeff Roberson } 477b43179fbSJeff Roberson 478b43179fbSJeff Roberson /* 47970fca427SJohn Baldwin * ke_pctcpu is only for ps and ttyinfo(). 48070fca427SJohn Baldwin * Do it per kse, and add them up at the end? 481b43179fbSJeff Roberson * XXXKSE 482b43179fbSJeff Roberson */ 48370fca427SJohn Baldwin ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >> 484bcb06d59SJeff Roberson FSHIFT; 485b43179fbSJeff Roberson /* 486b43179fbSJeff Roberson * If the kse has been idle the entire second, 487b43179fbSJeff Roberson * stop recalculating its priority until 488b43179fbSJeff Roberson * it wakes up. 489b43179fbSJeff Roberson */ 490ad59c36bSJulian Elischer if (ke->ke_cpticks == 0) 491b43179fbSJeff Roberson continue; 492b43179fbSJeff Roberson #if (FSHIFT >= CCPU_SHIFT) 4938fb913faSJeff Roberson ke->ke_pctcpu += (realstathz == 100) 494ad59c36bSJulian Elischer ? ((fixpt_t) ke->ke_cpticks) << 495b43179fbSJeff Roberson (FSHIFT - CCPU_SHIFT) : 496ad59c36bSJulian Elischer 100 * (((fixpt_t) ke->ke_cpticks) 497bcb06d59SJeff Roberson << (FSHIFT - CCPU_SHIFT)) / realstathz; 498b43179fbSJeff Roberson #else 4998fb913faSJeff Roberson ke->ke_pctcpu += ((FSCALE - ccpu) * 500ad59c36bSJulian Elischer (ke->ke_cpticks * 501bcb06d59SJeff Roberson FSCALE / realstathz)) >> FSHIFT; 502b43179fbSJeff Roberson #endif 503ad59c36bSJulian Elischer ke->ke_cpticks = 0; 504b43179fbSJeff Roberson } /* end of kse loop */ 505b43179fbSJeff Roberson /* 506b43179fbSJeff Roberson * If there are ANY running threads in this KSEGRP, 507b43179fbSJeff Roberson * then don't count it as sleeping. 508b43179fbSJeff Roberson */ 509b43179fbSJeff Roberson if (awake) { 510b43179fbSJeff Roberson if (kg->kg_slptime > 1) { 511b43179fbSJeff Roberson /* 512b43179fbSJeff Roberson * In an ideal world, this should not 513b43179fbSJeff Roberson * happen, because whoever woke us 514b43179fbSJeff Roberson * up from the long sleep should have 515b43179fbSJeff Roberson * unwound the slptime and reset our 516b43179fbSJeff Roberson * priority before we run at the stale 517b43179fbSJeff Roberson * priority. Should KASSERT at some 518b43179fbSJeff Roberson * point when all the cases are fixed. 519b43179fbSJeff Roberson */ 520b43179fbSJeff Roberson updatepri(kg); 521b43179fbSJeff Roberson } 522b43179fbSJeff Roberson kg->kg_slptime = 0; 52370fca427SJohn Baldwin } else 524b43179fbSJeff Roberson kg->kg_slptime++; 525b43179fbSJeff Roberson if (kg->kg_slptime > 1) 526b43179fbSJeff Roberson continue; 527b43179fbSJeff Roberson kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu); 528b43179fbSJeff Roberson resetpriority(kg); 529b43179fbSJeff Roberson FOREACH_THREAD_IN_GROUP(kg, td) { 530f5c157d9SJohn Baldwin resetpriority_thread(td, kg); 531b43179fbSJeff Roberson } 532b43179fbSJeff Roberson } /* end of ksegrp loop */ 533b43179fbSJeff Roberson mtx_unlock_spin(&sched_lock); 534b43179fbSJeff Roberson } /* end of process loop */ 535b43179fbSJeff Roberson sx_sunlock(&allproc_lock); 536c55bbb6cSJohn Baldwin } 537c55bbb6cSJohn Baldwin 538c55bbb6cSJohn Baldwin /* 539c55bbb6cSJohn Baldwin * Main loop for a kthread that executes schedcpu once a second. 540c55bbb6cSJohn Baldwin */ 541c55bbb6cSJohn Baldwin static void 542e17c57b1SJeff Roberson schedcpu_thread(void) 543c55bbb6cSJohn Baldwin { 544c55bbb6cSJohn Baldwin int nowake; 545c55bbb6cSJohn Baldwin 546c55bbb6cSJohn Baldwin for (;;) { 547c55bbb6cSJohn Baldwin schedcpu(); 5480f180a7cSJohn Baldwin tsleep(&nowake, 0, "-", hz); 549c55bbb6cSJohn Baldwin } 550b43179fbSJeff Roberson } 551b43179fbSJeff Roberson 552b43179fbSJeff Roberson /* 553b43179fbSJeff Roberson * Recalculate the priority of a process after it has slept for a while. 55470fca427SJohn Baldwin * For all load averages >= 1 and max kg_estcpu of 255, sleeping for at 55570fca427SJohn Baldwin * least six times the loadfactor will decay kg_estcpu to zero. 556b43179fbSJeff Roberson */ 557b43179fbSJeff Roberson static void 558b43179fbSJeff Roberson updatepri(struct ksegrp *kg) 559b43179fbSJeff Roberson { 56070fca427SJohn Baldwin register fixpt_t loadfac; 561b43179fbSJeff Roberson register unsigned int newcpu; 562b43179fbSJeff Roberson 56370fca427SJohn Baldwin loadfac = loadfactor(averunnable.ldavg[0]); 564b43179fbSJeff Roberson if (kg->kg_slptime > 5 * loadfac) 565b43179fbSJeff Roberson kg->kg_estcpu = 0; 566b43179fbSJeff Roberson else { 56770fca427SJohn Baldwin newcpu = kg->kg_estcpu; 56870fca427SJohn Baldwin kg->kg_slptime--; /* was incremented in schedcpu() */ 569b43179fbSJeff Roberson while (newcpu && --kg->kg_slptime) 570b43179fbSJeff Roberson newcpu = decay_cpu(loadfac, newcpu); 571b43179fbSJeff Roberson kg->kg_estcpu = newcpu; 572b43179fbSJeff Roberson } 573b43179fbSJeff Roberson } 574b43179fbSJeff Roberson 575b43179fbSJeff Roberson /* 576b43179fbSJeff Roberson * Compute the priority of a process when running in user mode. 577b43179fbSJeff Roberson * Arrange to reschedule if the resulting priority is better 578b43179fbSJeff Roberson * than that of the current process. 579b43179fbSJeff Roberson */ 580b43179fbSJeff Roberson static void 581b43179fbSJeff Roberson resetpriority(struct ksegrp *kg) 582b43179fbSJeff Roberson { 583b43179fbSJeff Roberson register unsigned int newpriority; 584b43179fbSJeff Roberson 585b43179fbSJeff Roberson if (kg->kg_pri_class == PRI_TIMESHARE) { 586b43179fbSJeff Roberson newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + 587fa885116SJulian Elischer NICE_WEIGHT * (kg->kg_proc->p_nice - PRIO_MIN); 588b43179fbSJeff Roberson newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 589b43179fbSJeff Roberson PRI_MAX_TIMESHARE); 5903db720fdSDavid Xu sched_user_prio(kg, newpriority); 591b43179fbSJeff Roberson } 592b43179fbSJeff Roberson } 593f5c157d9SJohn Baldwin 594f5c157d9SJohn Baldwin /* 595f5c157d9SJohn Baldwin * Update the thread's priority when the associated ksegroup's user 596f5c157d9SJohn Baldwin * priority changes. 597f5c157d9SJohn Baldwin */ 598f5c157d9SJohn Baldwin static void 599f5c157d9SJohn Baldwin resetpriority_thread(struct thread *td, struct ksegrp *kg) 600f5c157d9SJohn Baldwin { 601f5c157d9SJohn Baldwin 602f5c157d9SJohn Baldwin /* Only change threads with a time sharing user priority. */ 603f5c157d9SJohn Baldwin if (td->td_priority < PRI_MIN_TIMESHARE || 604f5c157d9SJohn Baldwin td->td_priority > PRI_MAX_TIMESHARE) 605f5c157d9SJohn Baldwin return; 606f5c157d9SJohn Baldwin 607f5c157d9SJohn Baldwin /* XXX the whole needresched thing is broken, but not silly. */ 608f5c157d9SJohn Baldwin maybe_resched(td); 609f5c157d9SJohn Baldwin 610f5c157d9SJohn Baldwin sched_prio(td, kg->kg_user_pri); 611b43179fbSJeff Roberson } 612b43179fbSJeff Roberson 613b43179fbSJeff Roberson /* ARGSUSED */ 614b43179fbSJeff Roberson static void 615b43179fbSJeff Roberson sched_setup(void *dummy) 616b43179fbSJeff Roberson { 617e17c57b1SJeff Roberson setup_runqs(); 61870fca427SJohn Baldwin 619b43179fbSJeff Roberson if (sched_quantum == 0) 620b43179fbSJeff Roberson sched_quantum = SCHED_QUANTUM; 621b43179fbSJeff Roberson hogticks = 2 * sched_quantum; 622b43179fbSJeff Roberson 6238cbec0c8SRobert Watson callout_init(&roundrobin_callout, CALLOUT_MPSAFE); 624b43179fbSJeff Roberson 625b43179fbSJeff Roberson /* Kick off timeout driven events by calling first time. */ 626b43179fbSJeff Roberson roundrobin(NULL); 627ca59f152SJeff Roberson 628ca59f152SJeff Roberson /* Account for thread0. */ 629907bdbc2SJeff Roberson sched_load_add(); 630b43179fbSJeff Roberson } 631b43179fbSJeff Roberson 632b43179fbSJeff Roberson /* External interfaces start here */ 633ed062c8dSJulian Elischer /* 634ed062c8dSJulian Elischer * Very early in the boot some setup of scheduler-specific 635f3050486SMaxim Konovalov * parts of proc0 and of some scheduler resources needs to be done. 636ed062c8dSJulian Elischer * Called from: 637ed062c8dSJulian Elischer * proc0_init() 638ed062c8dSJulian Elischer */ 639ed062c8dSJulian Elischer void 640ed062c8dSJulian Elischer schedinit(void) 641ed062c8dSJulian Elischer { 642ed062c8dSJulian Elischer /* 643ed062c8dSJulian Elischer * Set up the scheduler specific parts of proc0. 644ed062c8dSJulian Elischer */ 645ed062c8dSJulian Elischer proc0.p_sched = NULL; /* XXX */ 646ed062c8dSJulian Elischer ksegrp0.kg_sched = &kg_sched0; 647ed062c8dSJulian Elischer thread0.td_sched = &kse0; 648ed062c8dSJulian Elischer kse0.ke_thread = &thread0; 649ed062c8dSJulian Elischer kse0.ke_state = KES_THREAD; 650ed062c8dSJulian Elischer kg_sched0.skg_concurrency = 1; 651ed062c8dSJulian Elischer kg_sched0.skg_avail_opennings = 0; /* we are already running */ 652ed062c8dSJulian Elischer } 653ed062c8dSJulian Elischer 654b43179fbSJeff Roberson int 655b43179fbSJeff Roberson sched_runnable(void) 656b43179fbSJeff Roberson { 657e17c57b1SJeff Roberson #ifdef SMP 658e17c57b1SJeff Roberson return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]); 659e17c57b1SJeff Roberson #else 660b43179fbSJeff Roberson return runq_check(&runq); 661e17c57b1SJeff Roberson #endif 662b43179fbSJeff Roberson } 663b43179fbSJeff Roberson 664b43179fbSJeff Roberson int 665b43179fbSJeff Roberson sched_rr_interval(void) 666b43179fbSJeff Roberson { 667b43179fbSJeff Roberson if (sched_quantum == 0) 668b43179fbSJeff Roberson sched_quantum = SCHED_QUANTUM; 669b43179fbSJeff Roberson return (sched_quantum); 670b43179fbSJeff Roberson } 671b43179fbSJeff Roberson 672b43179fbSJeff Roberson /* 673b43179fbSJeff Roberson * We adjust the priority of the current process. The priority of 674b43179fbSJeff Roberson * a process gets worse as it accumulates CPU time. The cpu usage 67570fca427SJohn Baldwin * estimator (kg_estcpu) is increased here. resetpriority() will 67670fca427SJohn Baldwin * compute a different priority each time kg_estcpu increases by 677b43179fbSJeff Roberson * INVERSE_ESTCPU_WEIGHT 678b43179fbSJeff Roberson * (until MAXPRI is reached). The cpu usage estimator ramps up 679b43179fbSJeff Roberson * quite quickly when the process is running (linearly), and decays 680b43179fbSJeff Roberson * away exponentially, at a rate which is proportionally slower when 681b43179fbSJeff Roberson * the system is busy. The basic principle is that the system will 682b43179fbSJeff Roberson * 90% forget that the process used a lot of CPU time in 5 * loadav 683b43179fbSJeff Roberson * seconds. This causes the system to favor processes which haven't 684b43179fbSJeff Roberson * run much recently, and to round-robin among other processes. 685b43179fbSJeff Roberson */ 686b43179fbSJeff Roberson void 6877cf90fb3SJeff Roberson sched_clock(struct thread *td) 688b43179fbSJeff Roberson { 689b43179fbSJeff Roberson struct ksegrp *kg; 6907cf90fb3SJeff Roberson struct kse *ke; 691b43179fbSJeff Roberson 6922056d0a1SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 6937cf90fb3SJeff Roberson kg = td->td_ksegrp; 6947cf90fb3SJeff Roberson ke = td->td_kse; 695f7f9e7f3SJeff Roberson 696ad59c36bSJulian Elischer ke->ke_cpticks++; 697b43179fbSJeff Roberson kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1); 698b43179fbSJeff Roberson if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 699b43179fbSJeff Roberson resetpriority(kg); 700f5c157d9SJohn Baldwin resetpriority_thread(td, kg); 701b43179fbSJeff Roberson } 702b43179fbSJeff Roberson } 70370fca427SJohn Baldwin 704b43179fbSJeff Roberson /* 705b43179fbSJeff Roberson * charge childs scheduling cpu usage to parent. 706b43179fbSJeff Roberson * 707b43179fbSJeff Roberson * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp. 708b43179fbSJeff Roberson * Charge it to the ksegrp that did the wait since process estcpu is sum of 709b43179fbSJeff Roberson * all ksegrps, this is strictly as expected. Assume that the child process 710b43179fbSJeff Roberson * aggregated all the estcpu into the 'built-in' ksegrp. 711b43179fbSJeff Roberson */ 712b43179fbSJeff Roberson void 71355d44f79SJulian Elischer sched_exit(struct proc *p, struct thread *td) 714f7f9e7f3SJeff Roberson { 71555d44f79SJulian Elischer sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td); 71655d44f79SJulian Elischer sched_exit_thread(FIRST_THREAD_IN_PROC(p), td); 717f7f9e7f3SJeff Roberson } 718f7f9e7f3SJeff Roberson 719f7f9e7f3SJeff Roberson void 72055d44f79SJulian Elischer sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd) 721b43179fbSJeff Roberson { 7222056d0a1SJohn Baldwin 7232056d0a1SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 72455d44f79SJulian Elischer kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + childtd->td_ksegrp->kg_estcpu); 725b43179fbSJeff Roberson } 726b43179fbSJeff Roberson 727b43179fbSJeff Roberson void 728f7f9e7f3SJeff Roberson sched_exit_thread(struct thread *td, struct thread *child) 729b43179fbSJeff Roberson { 730907bdbc2SJeff Roberson CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", 731907bdbc2SJeff Roberson child, child->td_proc->p_comm, child->td_priority); 7327d5ea13fSDoug Rabson if ((child->td_proc->p_flag & P_NOLOAD) == 0) 733907bdbc2SJeff Roberson sched_load_rem(); 734f7f9e7f3SJeff Roberson } 735bcb06d59SJeff Roberson 736f7f9e7f3SJeff Roberson void 737ed062c8dSJulian Elischer sched_fork(struct thread *td, struct thread *childtd) 738f7f9e7f3SJeff Roberson { 739ed062c8dSJulian Elischer sched_fork_ksegrp(td, childtd->td_ksegrp); 740ed062c8dSJulian Elischer sched_fork_thread(td, childtd); 741f7f9e7f3SJeff Roberson } 742f7f9e7f3SJeff Roberson 743f7f9e7f3SJeff Roberson void 74455d44f79SJulian Elischer sched_fork_ksegrp(struct thread *td, struct ksegrp *child) 745f7f9e7f3SJeff Roberson { 7462056d0a1SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 74755d44f79SJulian Elischer child->kg_estcpu = td->td_ksegrp->kg_estcpu; 748f7f9e7f3SJeff Roberson } 749bcb06d59SJeff Roberson 750f7f9e7f3SJeff Roberson void 751ed062c8dSJulian Elischer sched_fork_thread(struct thread *td, struct thread *childtd) 752f7f9e7f3SJeff Roberson { 753ed062c8dSJulian Elischer sched_newthread(childtd); 754b43179fbSJeff Roberson } 755b43179fbSJeff Roberson 756b43179fbSJeff Roberson void 757fa885116SJulian Elischer sched_nice(struct proc *p, int nice) 758b43179fbSJeff Roberson { 759fa885116SJulian Elischer struct ksegrp *kg; 760f5c157d9SJohn Baldwin struct thread *td; 7610b5318c8SJohn Baldwin 762fa885116SJulian Elischer PROC_LOCK_ASSERT(p, MA_OWNED); 7630b5318c8SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 764fa885116SJulian Elischer p->p_nice = nice; 765fa885116SJulian Elischer FOREACH_KSEGRP_IN_PROC(p, kg) { 766b43179fbSJeff Roberson resetpriority(kg); 767f5c157d9SJohn Baldwin FOREACH_THREAD_IN_GROUP(kg, td) { 768f5c157d9SJohn Baldwin resetpriority_thread(td, kg); 769f5c157d9SJohn Baldwin } 770b43179fbSJeff Roberson } 771fa885116SJulian Elischer } 772b43179fbSJeff Roberson 773f7f9e7f3SJeff Roberson void 774f7f9e7f3SJeff Roberson sched_class(struct ksegrp *kg, int class) 775f7f9e7f3SJeff Roberson { 7762056d0a1SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 777f7f9e7f3SJeff Roberson kg->kg_pri_class = class; 778f7f9e7f3SJeff Roberson } 779f7f9e7f3SJeff Roberson 7801f955e2dSJulian Elischer /* 7811f955e2dSJulian Elischer * Adjust the priority of a thread. 7821f955e2dSJulian Elischer * This may include moving the thread within the KSEGRP, 7831f955e2dSJulian Elischer * changing the assignment of a kse to the thread, 7841f955e2dSJulian Elischer * and moving a KSE in the system run queue. 7851f955e2dSJulian Elischer */ 786f5c157d9SJohn Baldwin static void 787f5c157d9SJohn Baldwin sched_priority(struct thread *td, u_char prio) 788b43179fbSJeff Roberson { 789907bdbc2SJeff Roberson CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", 790907bdbc2SJeff Roberson td, td->td_proc->p_comm, td->td_priority, prio, curthread, 791907bdbc2SJeff Roberson curthread->td_proc->p_comm); 792b43179fbSJeff Roberson 7932056d0a1SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 794f5c157d9SJohn Baldwin if (td->td_priority == prio) 795f5c157d9SJohn Baldwin return; 796b43179fbSJeff Roberson if (TD_ON_RUNQ(td)) { 7971f955e2dSJulian Elischer adjustrunqueue(td, prio); 7981f955e2dSJulian Elischer } else { 7991f955e2dSJulian Elischer td->td_priority = prio; 800b43179fbSJeff Roberson } 801b43179fbSJeff Roberson } 802b43179fbSJeff Roberson 803f5c157d9SJohn Baldwin /* 804f5c157d9SJohn Baldwin * Update a thread's priority when it is lent another thread's 805f5c157d9SJohn Baldwin * priority. 806f5c157d9SJohn Baldwin */ 807f5c157d9SJohn Baldwin void 808f5c157d9SJohn Baldwin sched_lend_prio(struct thread *td, u_char prio) 809f5c157d9SJohn Baldwin { 810f5c157d9SJohn Baldwin 811f5c157d9SJohn Baldwin td->td_flags |= TDF_BORROWING; 812f5c157d9SJohn Baldwin sched_priority(td, prio); 813f5c157d9SJohn Baldwin } 814f5c157d9SJohn Baldwin 815f5c157d9SJohn Baldwin /* 816f5c157d9SJohn Baldwin * Restore a thread's priority when priority propagation is 817f5c157d9SJohn Baldwin * over. The prio argument is the minimum priority the thread 818f5c157d9SJohn Baldwin * needs to have to satisfy other possible priority lending 819f5c157d9SJohn Baldwin * requests. If the thread's regulary priority is less 820f5c157d9SJohn Baldwin * important than prio the thread will keep a priority boost 821f5c157d9SJohn Baldwin * of prio. 822f5c157d9SJohn Baldwin */ 823f5c157d9SJohn Baldwin void 824f5c157d9SJohn Baldwin sched_unlend_prio(struct thread *td, u_char prio) 825f5c157d9SJohn Baldwin { 826f5c157d9SJohn Baldwin u_char base_pri; 827f5c157d9SJohn Baldwin 828f5c157d9SJohn Baldwin if (td->td_base_pri >= PRI_MIN_TIMESHARE && 829f5c157d9SJohn Baldwin td->td_base_pri <= PRI_MAX_TIMESHARE) 830f5c157d9SJohn Baldwin base_pri = td->td_ksegrp->kg_user_pri; 831f5c157d9SJohn Baldwin else 832f5c157d9SJohn Baldwin base_pri = td->td_base_pri; 833f5c157d9SJohn Baldwin if (prio >= base_pri) { 834f5c157d9SJohn Baldwin td->td_flags &= ~TDF_BORROWING; 835f5c157d9SJohn Baldwin sched_prio(td, base_pri); 836f5c157d9SJohn Baldwin } else 837f5c157d9SJohn Baldwin sched_lend_prio(td, prio); 838f5c157d9SJohn Baldwin } 839f5c157d9SJohn Baldwin 840f5c157d9SJohn Baldwin void 841f5c157d9SJohn Baldwin sched_prio(struct thread *td, u_char prio) 842f5c157d9SJohn Baldwin { 843f5c157d9SJohn Baldwin u_char oldprio; 844f5c157d9SJohn Baldwin 845f5c157d9SJohn Baldwin /* First, update the base priority. */ 846f5c157d9SJohn Baldwin td->td_base_pri = prio; 847f5c157d9SJohn Baldwin 848f5c157d9SJohn Baldwin /* 849f5c157d9SJohn Baldwin * If the thread is borrowing another thread's priority, don't ever 850f5c157d9SJohn Baldwin * lower the priority. 851f5c157d9SJohn Baldwin */ 852f5c157d9SJohn Baldwin if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 853f5c157d9SJohn Baldwin return; 854f5c157d9SJohn Baldwin 855f5c157d9SJohn Baldwin /* Change the real priority. */ 856f5c157d9SJohn Baldwin oldprio = td->td_priority; 857f5c157d9SJohn Baldwin sched_priority(td, prio); 858f5c157d9SJohn Baldwin 859f5c157d9SJohn Baldwin /* 860f5c157d9SJohn Baldwin * If the thread is on a turnstile, then let the turnstile update 861f5c157d9SJohn Baldwin * its state. 862f5c157d9SJohn Baldwin */ 863f5c157d9SJohn Baldwin if (TD_ON_LOCK(td) && oldprio != prio) 864f5c157d9SJohn Baldwin turnstile_adjust(td, oldprio); 865f5c157d9SJohn Baldwin } 866f5c157d9SJohn Baldwin 867b43179fbSJeff Roberson void 8683db720fdSDavid Xu sched_user_prio(struct ksegrp *kg, u_char prio) 8693db720fdSDavid Xu { 8703db720fdSDavid Xu struct thread *td; 8713db720fdSDavid Xu u_char oldprio; 8723db720fdSDavid Xu 8733db720fdSDavid Xu kg->kg_base_user_pri = prio; 8743db720fdSDavid Xu 8753db720fdSDavid Xu /* XXXKSE only for 1:1 */ 8763db720fdSDavid Xu 8773db720fdSDavid Xu td = TAILQ_FIRST(&kg->kg_threads); 8783db720fdSDavid Xu if (td == NULL) { 8793db720fdSDavid Xu kg->kg_user_pri = prio; 8803db720fdSDavid Xu return; 8813db720fdSDavid Xu } 8823db720fdSDavid Xu 8833db720fdSDavid Xu if (td->td_flags & TDF_UBORROWING && kg->kg_user_pri <= prio) 8843db720fdSDavid Xu return; 8853db720fdSDavid Xu 8863db720fdSDavid Xu oldprio = kg->kg_user_pri; 8873db720fdSDavid Xu kg->kg_user_pri = prio; 8883db720fdSDavid Xu 8893db720fdSDavid Xu if (TD_ON_UPILOCK(td) && oldprio != prio) 8903db720fdSDavid Xu umtx_pi_adjust(td, oldprio); 8913db720fdSDavid Xu } 8923db720fdSDavid Xu 8933db720fdSDavid Xu void 8943db720fdSDavid Xu sched_lend_user_prio(struct thread *td, u_char prio) 8953db720fdSDavid Xu { 8963db720fdSDavid Xu u_char oldprio; 8973db720fdSDavid Xu 8983db720fdSDavid Xu td->td_flags |= TDF_UBORROWING; 8993db720fdSDavid Xu 9003db720fdSDavid Xu oldprio = td->td_ksegrp->kg_user_pri; 9013db720fdSDavid Xu td->td_ksegrp->kg_user_pri = prio; 9023db720fdSDavid Xu 9033db720fdSDavid Xu if (TD_ON_UPILOCK(td) && oldprio != prio) 9043db720fdSDavid Xu umtx_pi_adjust(td, oldprio); 9053db720fdSDavid Xu } 9063db720fdSDavid Xu 9073db720fdSDavid Xu void 9083db720fdSDavid Xu sched_unlend_user_prio(struct thread *td, u_char prio) 9093db720fdSDavid Xu { 9103db720fdSDavid Xu struct ksegrp *kg = td->td_ksegrp; 9113db720fdSDavid Xu u_char base_pri; 9123db720fdSDavid Xu 9133db720fdSDavid Xu base_pri = kg->kg_base_user_pri; 9143db720fdSDavid Xu if (prio >= base_pri) { 9153db720fdSDavid Xu td->td_flags &= ~TDF_UBORROWING; 9163db720fdSDavid Xu sched_user_prio(kg, base_pri); 9173db720fdSDavid Xu } else 9183db720fdSDavid Xu sched_lend_user_prio(td, prio); 9193db720fdSDavid Xu } 9203db720fdSDavid Xu 9213db720fdSDavid Xu void 92244f3b092SJohn Baldwin sched_sleep(struct thread *td) 923b43179fbSJeff Roberson { 9242056d0a1SJohn Baldwin 9252056d0a1SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 926b43179fbSJeff Roberson td->td_ksegrp->kg_slptime = 0; 927b43179fbSJeff Roberson } 928b43179fbSJeff Roberson 9293389af30SJulian Elischer static void remrunqueue(struct thread *td); 9303389af30SJulian Elischer 931b43179fbSJeff Roberson void 9323389af30SJulian Elischer sched_switch(struct thread *td, struct thread *newtd, int flags) 933b43179fbSJeff Roberson { 934b43179fbSJeff Roberson struct kse *ke; 9353389af30SJulian Elischer struct ksegrp *kg; 936b43179fbSJeff Roberson struct proc *p; 937b43179fbSJeff Roberson 938b43179fbSJeff Roberson ke = td->td_kse; 939b43179fbSJeff Roberson p = td->td_proc; 940b43179fbSJeff Roberson 9412056d0a1SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 942b43179fbSJeff Roberson 943f2f51f8aSJeff Roberson if ((p->p_flag & P_NOLOAD) == 0) 944907bdbc2SJeff Roberson sched_load_rem(); 9453389af30SJulian Elischer /* 9463389af30SJulian Elischer * We are volunteering to switch out so we get to nominate 9473389af30SJulian Elischer * a successor for the rest of our quantum 9483389af30SJulian Elischer * First try another thread in our ksegrp, and then look for 9493389af30SJulian Elischer * other ksegrps in our process. 9503389af30SJulian Elischer */ 9513389af30SJulian Elischer if (sched_followon && 9523389af30SJulian Elischer (p->p_flag & P_HADTHREADS) && 9533389af30SJulian Elischer (flags & SW_VOL) && 9543389af30SJulian Elischer newtd == NULL) { 9553389af30SJulian Elischer /* lets schedule another thread from this process */ 9563389af30SJulian Elischer kg = td->td_ksegrp; 9573389af30SJulian Elischer if ((newtd = TAILQ_FIRST(&kg->kg_runq))) { 9583389af30SJulian Elischer remrunqueue(newtd); 9593389af30SJulian Elischer sched_kgfollowons++; 9603389af30SJulian Elischer } else { 9613389af30SJulian Elischer FOREACH_KSEGRP_IN_PROC(p, kg) { 9623389af30SJulian Elischer if ((newtd = TAILQ_FIRST(&kg->kg_runq))) { 9633389af30SJulian Elischer sched_pfollowons++; 9643389af30SJulian Elischer remrunqueue(newtd); 9653389af30SJulian Elischer break; 9663389af30SJulian Elischer } 9673389af30SJulian Elischer } 9683389af30SJulian Elischer } 9693389af30SJulian Elischer } 9703389af30SJulian Elischer 97156564741SStephan Uphoff if (newtd) 97256564741SStephan Uphoff newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED); 97356564741SStephan Uphoff 974060563ecSJulian Elischer td->td_lastcpu = td->td_oncpu; 97552eb8464SJohn Baldwin td->td_flags &= ~TDF_NEEDRESCHED; 97677918643SStephan Uphoff td->td_owepreempt = 0; 977ca59f152SJeff Roberson td->td_oncpu = NOCPU; 978b43179fbSJeff Roberson /* 979b43179fbSJeff Roberson * At the last moment, if this thread is still marked RUNNING, 980b43179fbSJeff Roberson * then put it back on the run queue as it has not been suspended 981bf0acc27SJohn Baldwin * or stopped or any thing else similar. We never put the idle 982bf0acc27SJohn Baldwin * threads on the run queue, however. 983b43179fbSJeff Roberson */ 984bf0acc27SJohn Baldwin if (td == PCPU_GET(idlethread)) 985bf0acc27SJohn Baldwin TD_SET_CAN_RUN(td); 986ed062c8dSJulian Elischer else { 987d39063f2SJulian Elischer SLOT_RELEASE(td->td_ksegrp); 988ed062c8dSJulian Elischer if (TD_IS_RUNNING(td)) { 989b43179fbSJeff Roberson /* Put us back on the run queue (kse and all). */ 990c20c691bSJulian Elischer setrunqueue(td, (flags & SW_PREEMPT) ? 991c20c691bSJulian Elischer SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 992c20c691bSJulian Elischer SRQ_OURSELF|SRQ_YIELDING); 993ed062c8dSJulian Elischer } else if (p->p_flag & P_HADTHREADS) { 994b43179fbSJeff Roberson /* 995b43179fbSJeff Roberson * We will not be on the run queue. So we must be 996b43179fbSJeff Roberson * sleeping or similar. As it's available, 997b43179fbSJeff Roberson * someone else can use the KSE if they need it. 998c20c691bSJulian Elischer * It's NOT available if we are about to need it 999b43179fbSJeff Roberson */ 1000c20c691bSJulian Elischer if (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp) 1001ed062c8dSJulian Elischer slot_fill(td->td_ksegrp); 1002ed062c8dSJulian Elischer } 1003b43179fbSJeff Roberson } 1004c20c691bSJulian Elischer if (newtd) { 1005c20c691bSJulian Elischer /* 1006c20c691bSJulian Elischer * The thread we are about to run needs to be counted 1007c20c691bSJulian Elischer * as if it had been added to the run queue and selected. 1008c20c691bSJulian Elischer * It came from: 1009c20c691bSJulian Elischer * * A preemption 1010c20c691bSJulian Elischer * * An upcall 1011c20c691bSJulian Elischer * * A followon 1012c20c691bSJulian Elischer */ 1013c20c691bSJulian Elischer KASSERT((newtd->td_inhibitors == 0), 1014c20c691bSJulian Elischer ("trying to run inhibitted thread")); 1015c20c691bSJulian Elischer SLOT_USE(newtd->td_ksegrp); 1016c20c691bSJulian Elischer newtd->td_kse->ke_flags |= KEF_DIDRUN; 1017c20c691bSJulian Elischer TD_SET_RUNNING(newtd); 1018c20c691bSJulian Elischer if ((newtd->td_proc->p_flag & P_NOLOAD) == 0) 1019907bdbc2SJeff Roberson sched_load_add(); 1020c20c691bSJulian Elischer } else { 1021ae53b483SJeff Roberson newtd = choosethread(); 1022c20c691bSJulian Elischer } 1023c20c691bSJulian Elischer 1024ebccf1e3SJoseph Koshy if (td != newtd) { 1025ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 1026ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1027ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 1028ebccf1e3SJoseph Koshy #endif 1029ae53b483SJeff Roberson cpu_switch(td, newtd); 1030ebccf1e3SJoseph Koshy #ifdef HWPMC_HOOKS 1031ebccf1e3SJoseph Koshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1032ebccf1e3SJoseph Koshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1033ebccf1e3SJoseph Koshy #endif 1034ebccf1e3SJoseph Koshy } 1035ebccf1e3SJoseph Koshy 1036ae53b483SJeff Roberson sched_lock.mtx_lock = (uintptr_t)td; 1037ae53b483SJeff Roberson td->td_oncpu = PCPU_GET(cpuid); 1038b43179fbSJeff Roberson } 1039b43179fbSJeff Roberson 1040b43179fbSJeff Roberson void 1041b43179fbSJeff Roberson sched_wakeup(struct thread *td) 1042b43179fbSJeff Roberson { 1043b43179fbSJeff Roberson struct ksegrp *kg; 1044b43179fbSJeff Roberson 10452056d0a1SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 1046b43179fbSJeff Roberson kg = td->td_ksegrp; 1047f5c157d9SJohn Baldwin if (kg->kg_slptime > 1) { 1048b43179fbSJeff Roberson updatepri(kg); 1049f5c157d9SJohn Baldwin resetpriority(kg); 1050f5c157d9SJohn Baldwin } 1051b43179fbSJeff Roberson kg->kg_slptime = 0; 10522630e4c9SJulian Elischer setrunqueue(td, SRQ_BORING); 1053b43179fbSJeff Roberson } 1054b43179fbSJeff Roberson 105537c28a02SJulian Elischer #ifdef SMP 105682a1dfc1SJulian Elischer /* enable HTT_2 if you have a 2-way HTT cpu.*/ 105782a1dfc1SJulian Elischer static int 105882a1dfc1SJulian Elischer forward_wakeup(int cpunum) 105982a1dfc1SJulian Elischer { 106082a1dfc1SJulian Elischer cpumask_t map, me, dontuse; 106182a1dfc1SJulian Elischer cpumask_t map2; 106282a1dfc1SJulian Elischer struct pcpu *pc; 106382a1dfc1SJulian Elischer cpumask_t id, map3; 106482a1dfc1SJulian Elischer 106582a1dfc1SJulian Elischer mtx_assert(&sched_lock, MA_OWNED); 106682a1dfc1SJulian Elischer 1067ed062c8dSJulian Elischer CTR0(KTR_RUNQ, "forward_wakeup()"); 106882a1dfc1SJulian Elischer 106982a1dfc1SJulian Elischer if ((!forward_wakeup_enabled) || 107082a1dfc1SJulian Elischer (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0)) 107182a1dfc1SJulian Elischer return (0); 107282a1dfc1SJulian Elischer if (!smp_started || cold || panicstr) 107382a1dfc1SJulian Elischer return (0); 107482a1dfc1SJulian Elischer 107582a1dfc1SJulian Elischer forward_wakeups_requested++; 107682a1dfc1SJulian Elischer 107782a1dfc1SJulian Elischer /* 107882a1dfc1SJulian Elischer * check the idle mask we received against what we calculated before 107982a1dfc1SJulian Elischer * in the old version. 108082a1dfc1SJulian Elischer */ 108182a1dfc1SJulian Elischer me = PCPU_GET(cpumask); 108282a1dfc1SJulian Elischer /* 108382a1dfc1SJulian Elischer * don't bother if we should be doing it ourself.. 108482a1dfc1SJulian Elischer */ 108582a1dfc1SJulian Elischer if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum))) 108682a1dfc1SJulian Elischer return (0); 108782a1dfc1SJulian Elischer 108882a1dfc1SJulian Elischer dontuse = me | stopped_cpus | hlt_cpus_mask; 108982a1dfc1SJulian Elischer map3 = 0; 109082a1dfc1SJulian Elischer if (forward_wakeup_use_loop) { 109182a1dfc1SJulian Elischer SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 109282a1dfc1SJulian Elischer id = pc->pc_cpumask; 109382a1dfc1SJulian Elischer if ( (id & dontuse) == 0 && 109482a1dfc1SJulian Elischer pc->pc_curthread == pc->pc_idlethread) { 109582a1dfc1SJulian Elischer map3 |= id; 109682a1dfc1SJulian Elischer } 109782a1dfc1SJulian Elischer } 109882a1dfc1SJulian Elischer } 109982a1dfc1SJulian Elischer 110082a1dfc1SJulian Elischer if (forward_wakeup_use_mask) { 110182a1dfc1SJulian Elischer map = 0; 110282a1dfc1SJulian Elischer map = idle_cpus_mask & ~dontuse; 110382a1dfc1SJulian Elischer 110482a1dfc1SJulian Elischer /* If they are both on, compare and use loop if different */ 110582a1dfc1SJulian Elischer if (forward_wakeup_use_loop) { 110682a1dfc1SJulian Elischer if (map != map3) { 110782a1dfc1SJulian Elischer printf("map (%02X) != map3 (%02X)\n", 110882a1dfc1SJulian Elischer map, map3); 110982a1dfc1SJulian Elischer map = map3; 111082a1dfc1SJulian Elischer } 111182a1dfc1SJulian Elischer } 111282a1dfc1SJulian Elischer } else { 111382a1dfc1SJulian Elischer map = map3; 111482a1dfc1SJulian Elischer } 111582a1dfc1SJulian Elischer /* If we only allow a specific CPU, then mask off all the others */ 111682a1dfc1SJulian Elischer if (cpunum != NOCPU) { 111782a1dfc1SJulian Elischer KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum.")); 111882a1dfc1SJulian Elischer map &= (1 << cpunum); 111982a1dfc1SJulian Elischer } else { 112082a1dfc1SJulian Elischer /* Try choose an idle die. */ 112182a1dfc1SJulian Elischer if (forward_wakeup_use_htt) { 112282a1dfc1SJulian Elischer map2 = (map & (map >> 1)) & 0x5555; 112382a1dfc1SJulian Elischer if (map2) { 112482a1dfc1SJulian Elischer map = map2; 112582a1dfc1SJulian Elischer } 112682a1dfc1SJulian Elischer } 112782a1dfc1SJulian Elischer 112882a1dfc1SJulian Elischer /* set only one bit */ 112982a1dfc1SJulian Elischer if (forward_wakeup_use_single) { 113082a1dfc1SJulian Elischer map = map & ((~map) + 1); 113182a1dfc1SJulian Elischer } 113282a1dfc1SJulian Elischer } 113382a1dfc1SJulian Elischer if (map) { 113482a1dfc1SJulian Elischer forward_wakeups_delivered++; 113582a1dfc1SJulian Elischer ipi_selected(map, IPI_AST); 113682a1dfc1SJulian Elischer return (1); 113782a1dfc1SJulian Elischer } 113882a1dfc1SJulian Elischer if (cpunum == NOCPU) 113982a1dfc1SJulian Elischer printf("forward_wakeup: Idle processor not found\n"); 114082a1dfc1SJulian Elischer return (0); 114182a1dfc1SJulian Elischer } 114237c28a02SJulian Elischer #endif 114382a1dfc1SJulian Elischer 1144f3a0f873SStephan Uphoff #ifdef SMP 1145a3f2d842SStephan Uphoff static void kick_other_cpu(int pri,int cpuid); 1146f3a0f873SStephan Uphoff 1147f3a0f873SStephan Uphoff static void 1148f3a0f873SStephan Uphoff kick_other_cpu(int pri,int cpuid) 1149f3a0f873SStephan Uphoff { 1150f3a0f873SStephan Uphoff struct pcpu * pcpu = pcpu_find(cpuid); 1151f3a0f873SStephan Uphoff int cpri = pcpu->pc_curthread->td_priority; 1152f3a0f873SStephan Uphoff 1153f3a0f873SStephan Uphoff if (idle_cpus_mask & pcpu->pc_cpumask) { 1154f3a0f873SStephan Uphoff forward_wakeups_delivered++; 1155f3a0f873SStephan Uphoff ipi_selected(pcpu->pc_cpumask, IPI_AST); 1156f3a0f873SStephan Uphoff return; 1157f3a0f873SStephan Uphoff } 1158f3a0f873SStephan Uphoff 1159f3a0f873SStephan Uphoff if (pri >= cpri) 1160f3a0f873SStephan Uphoff return; 1161f3a0f873SStephan Uphoff 1162f3a0f873SStephan Uphoff #if defined(IPI_PREEMPTION) && defined(PREEMPTION) 1163f3a0f873SStephan Uphoff #if !defined(FULL_PREEMPTION) 1164f3a0f873SStephan Uphoff if (pri <= PRI_MAX_ITHD) 1165f3a0f873SStephan Uphoff #endif /* ! FULL_PREEMPTION */ 1166f3a0f873SStephan Uphoff { 1167f3a0f873SStephan Uphoff ipi_selected(pcpu->pc_cpumask, IPI_PREEMPT); 1168f3a0f873SStephan Uphoff return; 1169f3a0f873SStephan Uphoff } 1170f3a0f873SStephan Uphoff #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */ 1171f3a0f873SStephan Uphoff 1172f3a0f873SStephan Uphoff pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED; 1173f3a0f873SStephan Uphoff ipi_selected( pcpu->pc_cpumask , IPI_AST); 1174f3a0f873SStephan Uphoff return; 1175f3a0f873SStephan Uphoff } 1176f3a0f873SStephan Uphoff #endif /* SMP */ 1177f3a0f873SStephan Uphoff 1178b43179fbSJeff Roberson void 11792630e4c9SJulian Elischer sched_add(struct thread *td, int flags) 11806804a3abSJulian Elischer #ifdef SMP 1181f3a0f873SStephan Uphoff { 1182f3a0f873SStephan Uphoff struct kse *ke; 11836804a3abSJulian Elischer int forwarded = 0; 11846804a3abSJulian Elischer int cpu; 1185f3a0f873SStephan Uphoff int single_cpu = 0; 11867cf90fb3SJeff Roberson 11877cf90fb3SJeff Roberson ke = td->td_kse; 1188b43179fbSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 1189b43179fbSJeff Roberson KASSERT(ke->ke_state != KES_ONRUNQ, 11905a2b158dSJeff Roberson ("sched_add: kse %p (%s) already in run queue", ke, 1191b43179fbSJeff Roberson ke->ke_proc->p_comm)); 1192b43179fbSJeff Roberson KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 11935a2b158dSJeff Roberson ("sched_add: process swapped out")); 1194907bdbc2SJeff Roberson CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 1195907bdbc2SJeff Roberson td, td->td_proc->p_comm, td->td_priority, curthread, 1196907bdbc2SJeff Roberson curthread->td_proc->p_comm); 11970c0b25aeSJohn Baldwin 1198f3a0f873SStephan Uphoff 1199f3a0f873SStephan Uphoff if (td->td_pinned != 0) { 1200f3a0f873SStephan Uphoff cpu = td->td_lastcpu; 1201f3a0f873SStephan Uphoff ke->ke_runq = &runq_pcpu[cpu]; 1202f3a0f873SStephan Uphoff single_cpu = 1; 1203f3a0f873SStephan Uphoff CTR3(KTR_RUNQ, 1204f3a0f873SStephan Uphoff "sched_add: Put kse:%p(td:%p) on cpu%d runq", ke, td, cpu); 1205f3a0f873SStephan Uphoff } else if ((ke)->ke_flags & KEF_BOUND) { 1206f3a0f873SStephan Uphoff /* Find CPU from bound runq */ 1207f3a0f873SStephan Uphoff KASSERT(SKE_RUNQ_PCPU(ke),("sched_add: bound kse not on cpu runq")); 1208f3a0f873SStephan Uphoff cpu = ke->ke_runq - &runq_pcpu[0]; 1209f3a0f873SStephan Uphoff single_cpu = 1; 1210f3a0f873SStephan Uphoff CTR3(KTR_RUNQ, 1211f3a0f873SStephan Uphoff "sched_add: Put kse:%p(td:%p) on cpu%d runq", ke, td, cpu); 1212f3a0f873SStephan Uphoff } else { 12136804a3abSJulian Elischer CTR2(KTR_RUNQ, 12146804a3abSJulian Elischer "sched_add: adding kse:%p (td:%p) to gbl runq", ke, td); 12156804a3abSJulian Elischer cpu = NOCPU; 1216e17c57b1SJeff Roberson ke->ke_runq = &runq; 1217e17c57b1SJeff Roberson } 1218f3a0f873SStephan Uphoff 1219a3f2d842SStephan Uphoff if (single_cpu && (cpu != PCPU_GET(cpuid))) { 1220f3a0f873SStephan Uphoff kick_other_cpu(td->td_priority,cpu); 1221f3a0f873SStephan Uphoff } else { 1222f3a0f873SStephan Uphoff 1223f3a0f873SStephan Uphoff if (!single_cpu) { 1224f3a0f873SStephan Uphoff cpumask_t me = PCPU_GET(cpumask); 1225f3a0f873SStephan Uphoff int idle = idle_cpus_mask & me; 1226f3a0f873SStephan Uphoff 1227f3a0f873SStephan Uphoff if (!idle && ((flags & SRQ_INTR) == 0) && 1228f3a0f873SStephan Uphoff (idle_cpus_mask & ~(hlt_cpus_mask | me))) 1229f3a0f873SStephan Uphoff forwarded = forward_wakeup(cpu); 1230f3a0f873SStephan Uphoff } 1231f3a0f873SStephan Uphoff 1232f3a0f873SStephan Uphoff if (!forwarded) { 1233a3f2d842SStephan Uphoff if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td)) 1234f3a0f873SStephan Uphoff return; 1235f3a0f873SStephan Uphoff else 1236f3a0f873SStephan Uphoff maybe_resched(td); 1237f3a0f873SStephan Uphoff } 1238f3a0f873SStephan Uphoff } 1239f3a0f873SStephan Uphoff 1240f3a0f873SStephan Uphoff if ((td->td_proc->p_flag & P_NOLOAD) == 0) 1241f3a0f873SStephan Uphoff sched_load_add(); 1242f3a0f873SStephan Uphoff SLOT_USE(td->td_ksegrp); 1243f3a0f873SStephan Uphoff runq_add(ke->ke_runq, ke, flags); 1244f3a0f873SStephan Uphoff ke->ke_state = KES_ONRUNQ; 1245f3a0f873SStephan Uphoff } 1246f3a0f873SStephan Uphoff #else /* SMP */ 1247f3a0f873SStephan Uphoff { 1248f3a0f873SStephan Uphoff struct kse *ke; 1249f3a0f873SStephan Uphoff ke = td->td_kse; 1250f3a0f873SStephan Uphoff mtx_assert(&sched_lock, MA_OWNED); 1251f3a0f873SStephan Uphoff KASSERT(ke->ke_state != KES_ONRUNQ, 1252f3a0f873SStephan Uphoff ("sched_add: kse %p (%s) already in run queue", ke, 1253f3a0f873SStephan Uphoff ke->ke_proc->p_comm)); 1254f3a0f873SStephan Uphoff KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 1255f3a0f873SStephan Uphoff ("sched_add: process swapped out")); 1256f3a0f873SStephan Uphoff CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 1257f3a0f873SStephan Uphoff td, td->td_proc->p_comm, td->td_priority, curthread, 1258f3a0f873SStephan Uphoff curthread->td_proc->p_comm); 1259732d9528SJulian Elischer CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to runq", ke, td); 1260e17c57b1SJeff Roberson ke->ke_runq = &runq; 12616804a3abSJulian Elischer 12626804a3abSJulian Elischer /* 12636804a3abSJulian Elischer * If we are yielding (on the way out anyhow) 12646804a3abSJulian Elischer * or the thread being saved is US, 12656804a3abSJulian Elischer * then don't try be smart about preemption 12666804a3abSJulian Elischer * or kicking off another CPU 12676804a3abSJulian Elischer * as it won't help and may hinder. 12686804a3abSJulian Elischer * In the YIEDLING case, we are about to run whoever is 12696804a3abSJulian Elischer * being put in the queue anyhow, and in the 12706804a3abSJulian Elischer * OURSELF case, we are puting ourself on the run queue 12716804a3abSJulian Elischer * which also only happens when we are about to yield. 12726804a3abSJulian Elischer */ 12736804a3abSJulian Elischer if((flags & SRQ_YIELDING) == 0) { 12746804a3abSJulian Elischer if (maybe_preempt(td)) 12756804a3abSJulian Elischer return; 12766804a3abSJulian Elischer } 1277f2f51f8aSJeff Roberson if ((td->td_proc->p_flag & P_NOLOAD) == 0) 1278907bdbc2SJeff Roberson sched_load_add(); 1279d39063f2SJulian Elischer SLOT_USE(td->td_ksegrp); 1280c20c691bSJulian Elischer runq_add(ke->ke_runq, ke, flags); 12810f54f482SJulian Elischer ke->ke_state = KES_ONRUNQ; 12826942d433SJohn Baldwin maybe_resched(td); 1283b43179fbSJeff Roberson } 1284f3a0f873SStephan Uphoff #endif /* SMP */ 1285f3a0f873SStephan Uphoff 1286b43179fbSJeff Roberson void 12877cf90fb3SJeff Roberson sched_rem(struct thread *td) 1288b43179fbSJeff Roberson { 12897cf90fb3SJeff Roberson struct kse *ke; 12907cf90fb3SJeff Roberson 12917cf90fb3SJeff Roberson ke = td->td_kse; 1292b43179fbSJeff Roberson KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 12935a2b158dSJeff Roberson ("sched_rem: process swapped out")); 12945a2b158dSJeff Roberson KASSERT((ke->ke_state == KES_ONRUNQ), 12955a2b158dSJeff Roberson ("sched_rem: KSE not on run queue")); 1296b43179fbSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 1297907bdbc2SJeff Roberson CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 1298907bdbc2SJeff Roberson td, td->td_proc->p_comm, td->td_priority, curthread, 1299907bdbc2SJeff Roberson curthread->td_proc->p_comm); 1300b43179fbSJeff Roberson 1301f2f51f8aSJeff Roberson if ((td->td_proc->p_flag & P_NOLOAD) == 0) 1302907bdbc2SJeff Roberson sched_load_rem(); 1303d39063f2SJulian Elischer SLOT_RELEASE(td->td_ksegrp); 1304ad59c36bSJulian Elischer runq_remove(ke->ke_runq, ke); 1305e17c57b1SJeff Roberson 1306b43179fbSJeff Roberson ke->ke_state = KES_THREAD; 1307b43179fbSJeff Roberson } 1308b43179fbSJeff Roberson 130914f0e2e9SJulian Elischer /* 131014f0e2e9SJulian Elischer * Select threads to run. 131114f0e2e9SJulian Elischer * Notice that the running threads still consume a slot. 131214f0e2e9SJulian Elischer */ 1313b43179fbSJeff Roberson struct kse * 1314b43179fbSJeff Roberson sched_choose(void) 1315b43179fbSJeff Roberson { 1316b43179fbSJeff Roberson struct kse *ke; 1317e17c57b1SJeff Roberson struct runq *rq; 1318b43179fbSJeff Roberson 1319e17c57b1SJeff Roberson #ifdef SMP 1320e17c57b1SJeff Roberson struct kse *kecpu; 1321e17c57b1SJeff Roberson 1322e17c57b1SJeff Roberson rq = &runq; 1323b43179fbSJeff Roberson ke = runq_choose(&runq); 1324e17c57b1SJeff Roberson kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]); 1325e17c57b1SJeff Roberson 1326e17c57b1SJeff Roberson if (ke == NULL || 1327e17c57b1SJeff Roberson (kecpu != NULL && 1328e17c57b1SJeff Roberson kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) { 1329732d9528SJulian Elischer CTR2(KTR_RUNQ, "choosing kse %p from pcpu runq %d", kecpu, 1330e17c57b1SJeff Roberson PCPU_GET(cpuid)); 1331e17c57b1SJeff Roberson ke = kecpu; 1332e17c57b1SJeff Roberson rq = &runq_pcpu[PCPU_GET(cpuid)]; 1333e17c57b1SJeff Roberson } else { 1334732d9528SJulian Elischer CTR1(KTR_RUNQ, "choosing kse %p from main runq", ke); 1335e17c57b1SJeff Roberson } 1336e17c57b1SJeff Roberson 1337e17c57b1SJeff Roberson #else 1338e17c57b1SJeff Roberson rq = &runq; 1339e17c57b1SJeff Roberson ke = runq_choose(&runq); 1340e17c57b1SJeff Roberson #endif 1341b43179fbSJeff Roberson 1342b43179fbSJeff Roberson if (ke != NULL) { 1343e17c57b1SJeff Roberson runq_remove(rq, ke); 1344b43179fbSJeff Roberson ke->ke_state = KES_THREAD; 1345b43179fbSJeff Roberson 1346b43179fbSJeff Roberson KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 13475a2b158dSJeff Roberson ("sched_choose: process swapped out")); 1348b43179fbSJeff Roberson } 1349b43179fbSJeff Roberson return (ke); 1350b43179fbSJeff Roberson } 1351b43179fbSJeff Roberson 1352b43179fbSJeff Roberson void 1353b43179fbSJeff Roberson sched_userret(struct thread *td) 1354b43179fbSJeff Roberson { 1355b43179fbSJeff Roberson struct ksegrp *kg; 1356b43179fbSJeff Roberson /* 1357b43179fbSJeff Roberson * XXX we cheat slightly on the locking here to avoid locking in 1358b43179fbSJeff Roberson * the usual case. Setting td_priority here is essentially an 1359b43179fbSJeff Roberson * incomplete workaround for not setting it properly elsewhere. 1360b43179fbSJeff Roberson * Now that some interrupt handlers are threads, not setting it 1361b43179fbSJeff Roberson * properly elsewhere can clobber it in the window between setting 1362b43179fbSJeff Roberson * it here and returning to user mode, so don't waste time setting 1363b43179fbSJeff Roberson * it perfectly here. 1364b43179fbSJeff Roberson */ 1365f5c157d9SJohn Baldwin KASSERT((td->td_flags & TDF_BORROWING) == 0, 1366f5c157d9SJohn Baldwin ("thread with borrowed priority returning to userland")); 1367b43179fbSJeff Roberson kg = td->td_ksegrp; 1368b43179fbSJeff Roberson if (td->td_priority != kg->kg_user_pri) { 1369b43179fbSJeff Roberson mtx_lock_spin(&sched_lock); 1370b43179fbSJeff Roberson td->td_priority = kg->kg_user_pri; 1371f5c157d9SJohn Baldwin td->td_base_pri = kg->kg_user_pri; 1372b43179fbSJeff Roberson mtx_unlock_spin(&sched_lock); 1373b43179fbSJeff Roberson } 1374b43179fbSJeff Roberson } 1375de028f5aSJeff Roberson 1376e17c57b1SJeff Roberson void 1377e17c57b1SJeff Roberson sched_bind(struct thread *td, int cpu) 1378e17c57b1SJeff Roberson { 1379e17c57b1SJeff Roberson struct kse *ke; 1380e17c57b1SJeff Roberson 1381e17c57b1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 1382e17c57b1SJeff Roberson KASSERT(TD_IS_RUNNING(td), 1383e17c57b1SJeff Roberson ("sched_bind: cannot bind non-running thread")); 1384e17c57b1SJeff Roberson 1385e17c57b1SJeff Roberson ke = td->td_kse; 1386e17c57b1SJeff Roberson 1387e17c57b1SJeff Roberson ke->ke_flags |= KEF_BOUND; 1388e17c57b1SJeff Roberson #ifdef SMP 1389e17c57b1SJeff Roberson ke->ke_runq = &runq_pcpu[cpu]; 1390e17c57b1SJeff Roberson if (PCPU_GET(cpuid) == cpu) 1391e17c57b1SJeff Roberson return; 1392e17c57b1SJeff Roberson 1393e17c57b1SJeff Roberson ke->ke_state = KES_THREAD; 1394e17c57b1SJeff Roberson 1395bf0acc27SJohn Baldwin mi_switch(SW_VOL, NULL); 1396e17c57b1SJeff Roberson #endif 1397e17c57b1SJeff Roberson } 1398e17c57b1SJeff Roberson 1399e17c57b1SJeff Roberson void 1400e17c57b1SJeff Roberson sched_unbind(struct thread* td) 1401e17c57b1SJeff Roberson { 1402e17c57b1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 1403e17c57b1SJeff Roberson td->td_kse->ke_flags &= ~KEF_BOUND; 1404e17c57b1SJeff Roberson } 1405e17c57b1SJeff Roberson 1406de028f5aSJeff Roberson int 1407ebccf1e3SJoseph Koshy sched_is_bound(struct thread *td) 1408ebccf1e3SJoseph Koshy { 1409ebccf1e3SJoseph Koshy mtx_assert(&sched_lock, MA_OWNED); 1410ebccf1e3SJoseph Koshy return (td->td_kse->ke_flags & KEF_BOUND); 1411ebccf1e3SJoseph Koshy } 1412ebccf1e3SJoseph Koshy 141336ec198bSDavid Xu void 141436ec198bSDavid Xu sched_relinquish(struct thread *td) 141536ec198bSDavid Xu { 141636ec198bSDavid Xu struct ksegrp *kg; 141736ec198bSDavid Xu 141836ec198bSDavid Xu kg = td->td_ksegrp; 141936ec198bSDavid Xu mtx_lock_spin(&sched_lock); 142036ec198bSDavid Xu if (kg->kg_pri_class == PRI_TIMESHARE) 142136ec198bSDavid Xu sched_prio(td, PRI_MAX_TIMESHARE); 142236ec198bSDavid Xu mi_switch(SW_VOL, NULL); 142336ec198bSDavid Xu mtx_unlock_spin(&sched_lock); 142436ec198bSDavid Xu } 142536ec198bSDavid Xu 1426ebccf1e3SJoseph Koshy int 1427ca59f152SJeff Roberson sched_load(void) 1428ca59f152SJeff Roberson { 1429ca59f152SJeff Roberson return (sched_tdcnt); 1430ca59f152SJeff Roberson } 1431ca59f152SJeff Roberson 1432ca59f152SJeff Roberson int 1433de028f5aSJeff Roberson sched_sizeof_ksegrp(void) 1434de028f5aSJeff Roberson { 1435ed062c8dSJulian Elischer return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 1436de028f5aSJeff Roberson } 143736ec198bSDavid Xu 1438de028f5aSJeff Roberson int 1439de028f5aSJeff Roberson sched_sizeof_proc(void) 1440de028f5aSJeff Roberson { 1441de028f5aSJeff Roberson return (sizeof(struct proc)); 1442de028f5aSJeff Roberson } 144336ec198bSDavid Xu 1444de028f5aSJeff Roberson int 1445de028f5aSJeff Roberson sched_sizeof_thread(void) 1446de028f5aSJeff Roberson { 1447ed062c8dSJulian Elischer return (sizeof(struct thread) + sizeof(struct kse)); 1448de028f5aSJeff Roberson } 144979acfc49SJeff Roberson 145079acfc49SJeff Roberson fixpt_t 14517cf90fb3SJeff Roberson sched_pctcpu(struct thread *td) 145279acfc49SJeff Roberson { 145355f2099aSJeff Roberson struct kse *ke; 145455f2099aSJeff Roberson 145555f2099aSJeff Roberson ke = td->td_kse; 145655f2099aSJeff Roberson return (ke->ke_pctcpu); 145779acfc49SJeff Roberson } 1458b41f1452SDavid Xu 1459b41f1452SDavid Xu void 1460b41f1452SDavid Xu sched_tick(void) 1461b41f1452SDavid Xu { 1462b41f1452SDavid Xu } 1463ed062c8dSJulian Elischer #define KERN_SWITCH_INCLUDE 1 1464ed062c8dSJulian Elischer #include "kern/kern_switch.c" 1465