135e6168fSJeff Roberson /*- 215dc847eSJeff Roberson * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 335e6168fSJeff Roberson * All rights reserved. 435e6168fSJeff Roberson * 535e6168fSJeff Roberson * Redistribution and use in source and binary forms, with or without 635e6168fSJeff Roberson * modification, are permitted provided that the following conditions 735e6168fSJeff Roberson * are met: 835e6168fSJeff Roberson * 1. Redistributions of source code must retain the above copyright 935e6168fSJeff Roberson * notice unmodified, this list of conditions, and the following 1035e6168fSJeff Roberson * disclaimer. 1135e6168fSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 1235e6168fSJeff Roberson * notice, this list of conditions and the following disclaimer in the 1335e6168fSJeff Roberson * documentation and/or other materials provided with the distribution. 1435e6168fSJeff Roberson * 1535e6168fSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1635e6168fSJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1735e6168fSJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1835e6168fSJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1935e6168fSJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2035e6168fSJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2135e6168fSJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2235e6168fSJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2335e6168fSJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2435e6168fSJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2535e6168fSJeff Roberson */ 2635e6168fSJeff Roberson 27677b542eSDavid E. O'Brien #include <sys/cdefs.h> 28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 29677b542eSDavid E. O'Brien 3035e6168fSJeff Roberson #include <sys/param.h> 3135e6168fSJeff Roberson #include <sys/systm.h> 3235e6168fSJeff Roberson #include <sys/kernel.h> 3335e6168fSJeff Roberson #include <sys/ktr.h> 3435e6168fSJeff Roberson #include <sys/lock.h> 3535e6168fSJeff Roberson #include <sys/mutex.h> 3635e6168fSJeff Roberson #include <sys/proc.h> 37245f3abfSJeff Roberson #include <sys/resource.h> 3835e6168fSJeff Roberson #include <sys/sched.h> 3935e6168fSJeff Roberson #include <sys/smp.h> 4035e6168fSJeff Roberson #include <sys/sx.h> 4135e6168fSJeff Roberson #include <sys/sysctl.h> 4235e6168fSJeff Roberson #include <sys/sysproto.h> 4335e6168fSJeff Roberson #include <sys/vmmeter.h> 4435e6168fSJeff Roberson #ifdef DDB 4535e6168fSJeff Roberson #include <ddb/ddb.h> 4635e6168fSJeff Roberson #endif 4735e6168fSJeff Roberson #ifdef KTRACE 4835e6168fSJeff Roberson #include <sys/uio.h> 4935e6168fSJeff Roberson #include <sys/ktrace.h> 5035e6168fSJeff Roberson #endif 5135e6168fSJeff Roberson 5235e6168fSJeff Roberson #include <machine/cpu.h> 5335e6168fSJeff Roberson 5415dc847eSJeff Roberson #define KTR_ULE KTR_NFS 5515dc847eSJeff Roberson 5635e6168fSJeff Roberson /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 5735e6168fSJeff Roberson /* XXX This is bogus compatability crap for ps */ 5835e6168fSJeff Roberson static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 5935e6168fSJeff Roberson SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 6035e6168fSJeff Roberson 6135e6168fSJeff Roberson static void sched_setup(void *dummy); 6235e6168fSJeff Roberson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 6335e6168fSJeff Roberson 6415dc847eSJeff Roberson static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED"); 65e1f89c22SJeff Roberson 6615dc847eSJeff Roberson static int sched_strict; 6715dc847eSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, ""); 6815dc847eSJeff Roberson 6915dc847eSJeff Roberson static int slice_min = 1; 7015dc847eSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 7115dc847eSJeff Roberson 72210491d3SJeff Roberson static int slice_max = 10; 7315dc847eSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 7415dc847eSJeff Roberson 7515dc847eSJeff Roberson int realstathz; 7615dc847eSJeff Roberson int tickincr = 1; 77783caefbSJeff Roberson 78356500a3SJeff Roberson #ifdef SMP 79356500a3SJeff Roberson /* Callout to handle load balancing SMP systems. */ 80356500a3SJeff Roberson static struct callout kseq_lb_callout; 81356500a3SJeff Roberson #endif 82356500a3SJeff Roberson 8335e6168fSJeff Roberson /* 8435e6168fSJeff Roberson * These datastructures are allocated within their parent datastructure but 8535e6168fSJeff Roberson * are scheduler specific. 8635e6168fSJeff Roberson */ 8735e6168fSJeff Roberson 8835e6168fSJeff Roberson struct ke_sched { 8935e6168fSJeff Roberson int ske_slice; 9035e6168fSJeff Roberson struct runq *ske_runq; 9135e6168fSJeff Roberson /* The following variables are only used for pctcpu calculation */ 9235e6168fSJeff Roberson int ske_ltick; /* Last tick that we were running on */ 9335e6168fSJeff Roberson int ske_ftick; /* First tick that we were running on */ 9435e6168fSJeff Roberson int ske_ticks; /* Tick count */ 9515dc847eSJeff Roberson /* CPU that we have affinity for. */ 96cd6e33dfSJeff Roberson u_char ske_cpu; 9735e6168fSJeff Roberson }; 9835e6168fSJeff Roberson #define ke_slice ke_sched->ske_slice 9935e6168fSJeff Roberson #define ke_runq ke_sched->ske_runq 10035e6168fSJeff Roberson #define ke_ltick ke_sched->ske_ltick 10135e6168fSJeff Roberson #define ke_ftick ke_sched->ske_ftick 10235e6168fSJeff Roberson #define ke_ticks ke_sched->ske_ticks 103cd6e33dfSJeff Roberson #define ke_cpu ke_sched->ske_cpu 10435e6168fSJeff Roberson 10535e6168fSJeff Roberson struct kg_sched { 106407b0157SJeff Roberson int skg_slptime; /* Number of ticks we vol. slept */ 107407b0157SJeff Roberson int skg_runtime; /* Number of ticks we were running */ 10835e6168fSJeff Roberson }; 10935e6168fSJeff Roberson #define kg_slptime kg_sched->skg_slptime 110407b0157SJeff Roberson #define kg_runtime kg_sched->skg_runtime 11135e6168fSJeff Roberson 11235e6168fSJeff Roberson struct td_sched { 11335e6168fSJeff Roberson int std_slptime; 11435e6168fSJeff Roberson }; 11535e6168fSJeff Roberson #define td_slptime td_sched->std_slptime 11635e6168fSJeff Roberson 1175d7ef00cSJeff Roberson struct td_sched td_sched; 11835e6168fSJeff Roberson struct ke_sched ke_sched; 11935e6168fSJeff Roberson struct kg_sched kg_sched; 12035e6168fSJeff Roberson 12135e6168fSJeff Roberson struct ke_sched *kse0_sched = &ke_sched; 12235e6168fSJeff Roberson struct kg_sched *ksegrp0_sched = &kg_sched; 12335e6168fSJeff Roberson struct p_sched *proc0_sched = NULL; 12435e6168fSJeff Roberson struct td_sched *thread0_sched = &td_sched; 12535e6168fSJeff Roberson 12635e6168fSJeff Roberson /* 127665cb285SJeff Roberson * The priority is primarily determined by the interactivity score. Thus, we 128665cb285SJeff Roberson * give lower(better) priorities to kse groups that use less CPU. The nice 129665cb285SJeff Roberson * value is then directly added to this to allow nice to have some effect 130665cb285SJeff Roberson * on latency. 131e1f89c22SJeff Roberson * 132e1f89c22SJeff Roberson * PRI_RANGE: Total priority range for timeshare threads. 133665cb285SJeff Roberson * PRI_NRESV: Number of nice values. 134e1f89c22SJeff Roberson * PRI_BASE: The start of the dynamic range. 13535e6168fSJeff Roberson */ 136407b0157SJeff Roberson #define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 137245f3abfSJeff Roberson #define SCHED_PRI_NRESV PRIO_TOTAL 13898c9b132SJeff Roberson #define SCHED_PRI_NHALF (PRIO_TOTAL / 2) 13915dc847eSJeff Roberson #define SCHED_PRI_NTHRESH (SCHED_PRI_NHALF - 1) 140665cb285SJeff Roberson #define SCHED_PRI_BASE (PRI_MIN_TIMESHARE) 14115dc847eSJeff Roberson #define SCHED_PRI_INTERACT(score) \ 142665cb285SJeff Roberson ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX) 14335e6168fSJeff Roberson 14435e6168fSJeff Roberson /* 145e1f89c22SJeff Roberson * These determine the interactivity of a process. 14635e6168fSJeff Roberson * 147407b0157SJeff Roberson * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 148407b0157SJeff Roberson * before throttling back. 149a91172adSJeff Roberson * SLP_RUN_THROTTLE: Divisor for reducing slp/run time at fork time. 150210491d3SJeff Roberson * INTERACT_MAX: Maximum interactivity value. Smaller is better. 151e1f89c22SJeff Roberson * INTERACT_THRESH: Threshhold for placement on the current runq. 15235e6168fSJeff Roberson */ 1534b60e324SJeff Roberson #define SCHED_SLP_RUN_MAX ((hz * 2) << 10) 154a91172adSJeff Roberson #define SCHED_SLP_RUN_THROTTLE (100) 155210491d3SJeff Roberson #define SCHED_INTERACT_MAX (100) 156210491d3SJeff Roberson #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 157210491d3SJeff Roberson #define SCHED_INTERACT_THRESH (20) 158e1f89c22SJeff Roberson 15935e6168fSJeff Roberson /* 16035e6168fSJeff Roberson * These parameters and macros determine the size of the time slice that is 16135e6168fSJeff Roberson * granted to each thread. 16235e6168fSJeff Roberson * 16335e6168fSJeff Roberson * SLICE_MIN: Minimum time slice granted, in units of ticks. 16435e6168fSJeff Roberson * SLICE_MAX: Maximum time slice granted. 16535e6168fSJeff Roberson * SLICE_RANGE: Range of available time slices scaled by hz. 166245f3abfSJeff Roberson * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 167245f3abfSJeff Roberson * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 16835e6168fSJeff Roberson */ 16915dc847eSJeff Roberson #define SCHED_SLICE_MIN (slice_min) 17015dc847eSJeff Roberson #define SCHED_SLICE_MAX (slice_max) 17135e6168fSJeff Roberson #define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 17235e6168fSJeff Roberson #define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 173245f3abfSJeff Roberson #define SCHED_SLICE_NICE(nice) \ 17415dc847eSJeff Roberson (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_PRI_NTHRESH)) 17535e6168fSJeff Roberson 17635e6168fSJeff Roberson /* 17735e6168fSJeff Roberson * This macro determines whether or not the kse belongs on the current or 17835e6168fSJeff Roberson * next run queue. 179407b0157SJeff Roberson * 180407b0157SJeff Roberson * XXX nice value should effect how interactive a kg is. 18135e6168fSJeff Roberson */ 18215dc847eSJeff Roberson #define SCHED_INTERACTIVE(kg) \ 18315dc847eSJeff Roberson (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 184a5f099d0SJeff Roberson #define SCHED_CURR(kg, ke) \ 18515dc847eSJeff Roberson (ke->ke_thread->td_priority < PRI_MIN_TIMESHARE || SCHED_INTERACTIVE(kg)) 18635e6168fSJeff Roberson 18735e6168fSJeff Roberson /* 18835e6168fSJeff Roberson * Cpu percentage computation macros and defines. 18935e6168fSJeff Roberson * 19035e6168fSJeff Roberson * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 19135e6168fSJeff Roberson * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 19235e6168fSJeff Roberson */ 19335e6168fSJeff Roberson 1945053d272SJeff Roberson #define SCHED_CPU_TIME 10 19535e6168fSJeff Roberson #define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 19635e6168fSJeff Roberson 19735e6168fSJeff Roberson /* 19815dc847eSJeff Roberson * kseq - per processor runqs and statistics. 19935e6168fSJeff Roberson */ 20035e6168fSJeff Roberson 20115dc847eSJeff Roberson #define KSEQ_NCLASS (PRI_IDLE + 1) /* Number of run classes. */ 20215dc847eSJeff Roberson 20335e6168fSJeff Roberson struct kseq { 204a8949de2SJeff Roberson struct runq ksq_idle; /* Queue of IDLE threads. */ 20515dc847eSJeff Roberson struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 20615dc847eSJeff Roberson struct runq *ksq_next; /* Next timeshare queue. */ 20715dc847eSJeff Roberson struct runq *ksq_curr; /* Current queue. */ 20815dc847eSJeff Roberson int ksq_loads[KSEQ_NCLASS]; /* Load for each class */ 20915dc847eSJeff Roberson int ksq_load; /* Aggregate load. */ 21015dc847eSJeff Roberson short ksq_nice[PRIO_TOTAL + 1]; /* KSEs in each nice bin. */ 21115dc847eSJeff Roberson short ksq_nicemin; /* Least nice. */ 2125d7ef00cSJeff Roberson #ifdef SMP 213749d01b0SJeff Roberson int ksq_cpus; /* Count of CPUs in this kseq. */ 2145d7ef00cSJeff Roberson unsigned int ksq_rslices; /* Slices on run queue */ 2155d7ef00cSJeff Roberson #endif 21635e6168fSJeff Roberson }; 21735e6168fSJeff Roberson 21835e6168fSJeff Roberson /* 21935e6168fSJeff Roberson * One kse queue per processor. 22035e6168fSJeff Roberson */ 2210a016a05SJeff Roberson #ifdef SMP 22235e6168fSJeff Roberson struct kseq kseq_cpu[MAXCPU]; 223749d01b0SJeff Roberson struct kseq *kseq_idmap[MAXCPU]; 224749d01b0SJeff Roberson #define KSEQ_SELF() (kseq_idmap[PCPU_GET(cpuid)]) 225749d01b0SJeff Roberson #define KSEQ_CPU(x) (kseq_idmap[(x)]) 2260a016a05SJeff Roberson #else 2270a016a05SJeff Roberson struct kseq kseq_cpu; 2280a016a05SJeff Roberson #define KSEQ_SELF() (&kseq_cpu) 2290a016a05SJeff Roberson #define KSEQ_CPU(x) (&kseq_cpu) 2300a016a05SJeff Roberson #endif 23135e6168fSJeff Roberson 232245f3abfSJeff Roberson static void sched_slice(struct kse *ke); 23315dc847eSJeff Roberson static void sched_priority(struct ksegrp *kg); 234e1f89c22SJeff Roberson static int sched_interact_score(struct ksegrp *kg); 2354b60e324SJeff Roberson static void sched_interact_update(struct ksegrp *kg); 23635e6168fSJeff Roberson void sched_pctcpu_update(struct kse *ke); 23735e6168fSJeff Roberson int sched_pickcpu(void); 23835e6168fSJeff Roberson 2395d7ef00cSJeff Roberson /* Operations on per processor queues */ 2400a016a05SJeff Roberson static struct kse * kseq_choose(struct kseq *kseq); 2410a016a05SJeff Roberson static void kseq_setup(struct kseq *kseq); 242a8949de2SJeff Roberson static void kseq_add(struct kseq *kseq, struct kse *ke); 24315dc847eSJeff Roberson static void kseq_rem(struct kseq *kseq, struct kse *ke); 24415dc847eSJeff Roberson static void kseq_nice_add(struct kseq *kseq, int nice); 24515dc847eSJeff Roberson static void kseq_nice_rem(struct kseq *kseq, int nice); 2467cd650a9SJeff Roberson void kseq_print(int cpu); 2475d7ef00cSJeff Roberson #ifdef SMP 2485d7ef00cSJeff Roberson struct kseq * kseq_load_highest(void); 249356500a3SJeff Roberson void kseq_balance(void *arg); 250356500a3SJeff Roberson void kseq_move(struct kseq *from, int cpu); 2515d7ef00cSJeff Roberson #endif 2525d7ef00cSJeff Roberson 25315dc847eSJeff Roberson void 2547cd650a9SJeff Roberson kseq_print(int cpu) 25515dc847eSJeff Roberson { 2567cd650a9SJeff Roberson struct kseq *kseq; 25715dc847eSJeff Roberson int i; 25815dc847eSJeff Roberson 2597cd650a9SJeff Roberson kseq = KSEQ_CPU(cpu); 26015dc847eSJeff Roberson 26115dc847eSJeff Roberson printf("kseq:\n"); 26215dc847eSJeff Roberson printf("\tload: %d\n", kseq->ksq_load); 26315dc847eSJeff Roberson printf("\tload ITHD: %d\n", kseq->ksq_loads[PRI_ITHD]); 26415dc847eSJeff Roberson printf("\tload REALTIME: %d\n", kseq->ksq_loads[PRI_REALTIME]); 26515dc847eSJeff Roberson printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]); 26615dc847eSJeff Roberson printf("\tload IDLE: %d\n", kseq->ksq_loads[PRI_IDLE]); 26715dc847eSJeff Roberson printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 26815dc847eSJeff Roberson printf("\tnice counts:\n"); 26915dc847eSJeff Roberson for (i = 0; i < PRIO_TOTAL + 1; i++) 27015dc847eSJeff Roberson if (kseq->ksq_nice[i]) 27115dc847eSJeff Roberson printf("\t\t%d = %d\n", 27215dc847eSJeff Roberson i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 27315dc847eSJeff Roberson } 27415dc847eSJeff Roberson 275a8949de2SJeff Roberson static void 2765d7ef00cSJeff Roberson kseq_add(struct kseq *kseq, struct kse *ke) 2775d7ef00cSJeff Roberson { 278b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 279b5c4c4a7SJeff Roberson kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]++; 28015dc847eSJeff Roberson kseq->ksq_load++; 28115dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 28215dc847eSJeff Roberson CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 28315dc847eSJeff Roberson ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 28415dc847eSJeff Roberson ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); 28515dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 28615dc847eSJeff Roberson kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); 2875d7ef00cSJeff Roberson #ifdef SMP 2885d7ef00cSJeff Roberson kseq->ksq_rslices += ke->ke_slice; 2895d7ef00cSJeff Roberson #endif 2905d7ef00cSJeff Roberson } 29115dc847eSJeff Roberson 292a8949de2SJeff Roberson static void 2935d7ef00cSJeff Roberson kseq_rem(struct kseq *kseq, struct kse *ke) 2945d7ef00cSJeff Roberson { 295b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 296b5c4c4a7SJeff Roberson kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]--; 29715dc847eSJeff Roberson kseq->ksq_load--; 29815dc847eSJeff Roberson ke->ke_runq = NULL; 29915dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 30015dc847eSJeff Roberson kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); 3015d7ef00cSJeff Roberson #ifdef SMP 3025d7ef00cSJeff Roberson kseq->ksq_rslices -= ke->ke_slice; 3035d7ef00cSJeff Roberson #endif 3045d7ef00cSJeff Roberson } 3055d7ef00cSJeff Roberson 30615dc847eSJeff Roberson static void 30715dc847eSJeff Roberson kseq_nice_add(struct kseq *kseq, int nice) 30815dc847eSJeff Roberson { 309b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 31015dc847eSJeff Roberson /* Normalize to zero. */ 31115dc847eSJeff Roberson kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 312b90816f1SJeff Roberson if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 1) 31315dc847eSJeff Roberson kseq->ksq_nicemin = nice; 31415dc847eSJeff Roberson } 31515dc847eSJeff Roberson 31615dc847eSJeff Roberson static void 31715dc847eSJeff Roberson kseq_nice_rem(struct kseq *kseq, int nice) 31815dc847eSJeff Roberson { 31915dc847eSJeff Roberson int n; 32015dc847eSJeff Roberson 321b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 32215dc847eSJeff Roberson /* Normalize to zero. */ 32315dc847eSJeff Roberson n = nice + SCHED_PRI_NHALF; 32415dc847eSJeff Roberson kseq->ksq_nice[n]--; 32515dc847eSJeff Roberson KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 32615dc847eSJeff Roberson 32715dc847eSJeff Roberson /* 32815dc847eSJeff Roberson * If this wasn't the smallest nice value or there are more in 32915dc847eSJeff Roberson * this bucket we can just return. Otherwise we have to recalculate 33015dc847eSJeff Roberson * the smallest nice. 33115dc847eSJeff Roberson */ 33215dc847eSJeff Roberson if (nice != kseq->ksq_nicemin || 33315dc847eSJeff Roberson kseq->ksq_nice[n] != 0 || 33415dc847eSJeff Roberson kseq->ksq_loads[PRI_TIMESHARE] == 0) 33515dc847eSJeff Roberson return; 33615dc847eSJeff Roberson 33715dc847eSJeff Roberson for (; n < SCHED_PRI_NRESV + 1; n++) 33815dc847eSJeff Roberson if (kseq->ksq_nice[n]) { 33915dc847eSJeff Roberson kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 34015dc847eSJeff Roberson return; 34115dc847eSJeff Roberson } 34215dc847eSJeff Roberson } 34315dc847eSJeff Roberson 3445d7ef00cSJeff Roberson #ifdef SMP 345356500a3SJeff Roberson /* 346356500a3SJeff Roberson * kseq_balance is a simple CPU load balancing algorithm. It operates by 347356500a3SJeff Roberson * finding the least loaded and most loaded cpu and equalizing their load 348356500a3SJeff Roberson * by migrating some processes. 349356500a3SJeff Roberson * 350356500a3SJeff Roberson * Dealing only with two CPUs at a time has two advantages. Firstly, most 351356500a3SJeff Roberson * installations will only have 2 cpus. Secondly, load balancing too much at 352356500a3SJeff Roberson * once can have an unpleasant effect on the system. The scheduler rarely has 353356500a3SJeff Roberson * enough information to make perfect decisions. So this algorithm chooses 354356500a3SJeff Roberson * algorithm simplicity and more gradual effects on load in larger systems. 355356500a3SJeff Roberson * 356356500a3SJeff Roberson * It could be improved by considering the priorities and slices assigned to 357356500a3SJeff Roberson * each task prior to balancing them. There are many pathological cases with 358356500a3SJeff Roberson * any approach and so the semi random algorithm below may work as well as any. 359356500a3SJeff Roberson * 360356500a3SJeff Roberson */ 361356500a3SJeff Roberson void 362356500a3SJeff Roberson kseq_balance(void *arg) 363356500a3SJeff Roberson { 364356500a3SJeff Roberson struct kseq *kseq; 365356500a3SJeff Roberson int high_load; 366356500a3SJeff Roberson int low_load; 367356500a3SJeff Roberson int high_cpu; 368356500a3SJeff Roberson int low_cpu; 369356500a3SJeff Roberson int move; 370356500a3SJeff Roberson int diff; 371356500a3SJeff Roberson int i; 372356500a3SJeff Roberson 373356500a3SJeff Roberson high_cpu = 0; 374356500a3SJeff Roberson low_cpu = 0; 375356500a3SJeff Roberson high_load = 0; 376356500a3SJeff Roberson low_load = -1; 377356500a3SJeff Roberson 378356500a3SJeff Roberson mtx_lock_spin(&sched_lock); 37986f8ae96SJeff Roberson if (smp_started == 0) 38086f8ae96SJeff Roberson goto out; 38186f8ae96SJeff Roberson 382356500a3SJeff Roberson for (i = 0; i < mp_maxid; i++) { 3837a20304fSJeff Roberson if (CPU_ABSENT(i) || (i & stopped_cpus) != 0) 384356500a3SJeff Roberson continue; 385356500a3SJeff Roberson kseq = KSEQ_CPU(i); 386356500a3SJeff Roberson if (kseq->ksq_load > high_load) { 387356500a3SJeff Roberson high_load = kseq->ksq_load; 388356500a3SJeff Roberson high_cpu = i; 389356500a3SJeff Roberson } 390356500a3SJeff Roberson if (low_load == -1 || kseq->ksq_load < low_load) { 391356500a3SJeff Roberson low_load = kseq->ksq_load; 392356500a3SJeff Roberson low_cpu = i; 393356500a3SJeff Roberson } 394356500a3SJeff Roberson } 395356500a3SJeff Roberson 396749d01b0SJeff Roberson kseq = KSEQ_CPU(high_cpu); 397749d01b0SJeff Roberson 398356500a3SJeff Roberson /* 399356500a3SJeff Roberson * Nothing to do. 400356500a3SJeff Roberson */ 401749d01b0SJeff Roberson if (high_load < kseq->ksq_cpus + 1) 402749d01b0SJeff Roberson goto out; 403749d01b0SJeff Roberson 404749d01b0SJeff Roberson high_load -= kseq->ksq_cpus; 405749d01b0SJeff Roberson 406749d01b0SJeff Roberson if (low_load >= high_load) 407356500a3SJeff Roberson goto out; 408356500a3SJeff Roberson 409356500a3SJeff Roberson diff = high_load - low_load; 410356500a3SJeff Roberson move = diff / 2; 411356500a3SJeff Roberson if (diff & 0x1) 412356500a3SJeff Roberson move++; 413356500a3SJeff Roberson 414356500a3SJeff Roberson for (i = 0; i < move; i++) 415749d01b0SJeff Roberson kseq_move(kseq, low_cpu); 416356500a3SJeff Roberson 417356500a3SJeff Roberson out: 418356500a3SJeff Roberson mtx_unlock_spin(&sched_lock); 419356500a3SJeff Roberson callout_reset(&kseq_lb_callout, hz, kseq_balance, NULL); 420356500a3SJeff Roberson 421356500a3SJeff Roberson return; 422356500a3SJeff Roberson } 423356500a3SJeff Roberson 4245d7ef00cSJeff Roberson struct kseq * 4255d7ef00cSJeff Roberson kseq_load_highest(void) 4265d7ef00cSJeff Roberson { 4275d7ef00cSJeff Roberson struct kseq *kseq; 4285d7ef00cSJeff Roberson int load; 4295d7ef00cSJeff Roberson int cpu; 4305d7ef00cSJeff Roberson int i; 4315d7ef00cSJeff Roberson 432b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 4335d7ef00cSJeff Roberson cpu = 0; 4345d7ef00cSJeff Roberson load = 0; 4355d7ef00cSJeff Roberson 4365d7ef00cSJeff Roberson for (i = 0; i < mp_maxid; i++) { 4377a20304fSJeff Roberson if (CPU_ABSENT(i) || (i & stopped_cpus) != 0) 4385d7ef00cSJeff Roberson continue; 4395d7ef00cSJeff Roberson kseq = KSEQ_CPU(i); 44015dc847eSJeff Roberson if (kseq->ksq_load > load) { 44115dc847eSJeff Roberson load = kseq->ksq_load; 4425d7ef00cSJeff Roberson cpu = i; 4435d7ef00cSJeff Roberson } 4445d7ef00cSJeff Roberson } 445749d01b0SJeff Roberson kseq = KSEQ_CPU(cpu); 446749d01b0SJeff Roberson 447749d01b0SJeff Roberson if (load > kseq->ksq_cpus) 448749d01b0SJeff Roberson return (kseq); 4495d7ef00cSJeff Roberson 4505d7ef00cSJeff Roberson return (NULL); 4515d7ef00cSJeff Roberson } 452356500a3SJeff Roberson 453356500a3SJeff Roberson void 454356500a3SJeff Roberson kseq_move(struct kseq *from, int cpu) 455356500a3SJeff Roberson { 456356500a3SJeff Roberson struct kse *ke; 457356500a3SJeff Roberson 458356500a3SJeff Roberson ke = kseq_choose(from); 459356500a3SJeff Roberson runq_remove(ke->ke_runq, ke); 460356500a3SJeff Roberson ke->ke_state = KES_THREAD; 461356500a3SJeff Roberson kseq_rem(from, ke); 462356500a3SJeff Roberson 463356500a3SJeff Roberson ke->ke_cpu = cpu; 464356500a3SJeff Roberson sched_add(ke); 465356500a3SJeff Roberson } 4665d7ef00cSJeff Roberson #endif 4675d7ef00cSJeff Roberson 4685d7ef00cSJeff Roberson struct kse * 4695d7ef00cSJeff Roberson kseq_choose(struct kseq *kseq) 4705d7ef00cSJeff Roberson { 4715d7ef00cSJeff Roberson struct kse *ke; 4725d7ef00cSJeff Roberson struct runq *swap; 4735d7ef00cSJeff Roberson 474b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 47515dc847eSJeff Roberson swap = NULL; 476a8949de2SJeff Roberson 47715dc847eSJeff Roberson for (;;) { 47815dc847eSJeff Roberson ke = runq_choose(kseq->ksq_curr); 47915dc847eSJeff Roberson if (ke == NULL) { 48015dc847eSJeff Roberson /* 48115dc847eSJeff Roberson * We already swaped once and didn't get anywhere. 48215dc847eSJeff Roberson */ 48315dc847eSJeff Roberson if (swap) 48415dc847eSJeff Roberson break; 4855d7ef00cSJeff Roberson swap = kseq->ksq_curr; 4865d7ef00cSJeff Roberson kseq->ksq_curr = kseq->ksq_next; 4875d7ef00cSJeff Roberson kseq->ksq_next = swap; 48815dc847eSJeff Roberson continue; 489a8949de2SJeff Roberson } 49015dc847eSJeff Roberson /* 49115dc847eSJeff Roberson * If we encounter a slice of 0 the kse is in a 49215dc847eSJeff Roberson * TIMESHARE kse group and its nice was too far out 49315dc847eSJeff Roberson * of the range that receives slices. 49415dc847eSJeff Roberson */ 49515dc847eSJeff Roberson if (ke->ke_slice == 0) { 49615dc847eSJeff Roberson runq_remove(ke->ke_runq, ke); 49715dc847eSJeff Roberson sched_slice(ke); 49815dc847eSJeff Roberson ke->ke_runq = kseq->ksq_next; 49915dc847eSJeff Roberson runq_add(ke->ke_runq, ke); 50015dc847eSJeff Roberson continue; 50115dc847eSJeff Roberson } 50215dc847eSJeff Roberson return (ke); 50315dc847eSJeff Roberson } 50415dc847eSJeff Roberson 505a8949de2SJeff Roberson return (runq_choose(&kseq->ksq_idle)); 506245f3abfSJeff Roberson } 5070a016a05SJeff Roberson 5080a016a05SJeff Roberson static void 5090a016a05SJeff Roberson kseq_setup(struct kseq *kseq) 5100a016a05SJeff Roberson { 51115dc847eSJeff Roberson runq_init(&kseq->ksq_timeshare[0]); 51215dc847eSJeff Roberson runq_init(&kseq->ksq_timeshare[1]); 513a8949de2SJeff Roberson runq_init(&kseq->ksq_idle); 51415dc847eSJeff Roberson 51515dc847eSJeff Roberson kseq->ksq_curr = &kseq->ksq_timeshare[0]; 51615dc847eSJeff Roberson kseq->ksq_next = &kseq->ksq_timeshare[1]; 51715dc847eSJeff Roberson 51815dc847eSJeff Roberson kseq->ksq_loads[PRI_ITHD] = 0; 51915dc847eSJeff Roberson kseq->ksq_loads[PRI_REALTIME] = 0; 52015dc847eSJeff Roberson kseq->ksq_loads[PRI_TIMESHARE] = 0; 52115dc847eSJeff Roberson kseq->ksq_loads[PRI_IDLE] = 0; 5227cd650a9SJeff Roberson kseq->ksq_load = 0; 5235d7ef00cSJeff Roberson #ifdef SMP 5245d7ef00cSJeff Roberson kseq->ksq_rslices = 0; 5255d7ef00cSJeff Roberson #endif 5260a016a05SJeff Roberson } 5270a016a05SJeff Roberson 52835e6168fSJeff Roberson static void 52935e6168fSJeff Roberson sched_setup(void *dummy) 53035e6168fSJeff Roberson { 5310ec896fdSJeff Roberson #ifdef SMP 53235e6168fSJeff Roberson int i; 5330ec896fdSJeff Roberson #endif 53435e6168fSJeff Roberson 535e493a5d9SJeff Roberson slice_min = (hz/100); /* 10ms */ 536e493a5d9SJeff Roberson slice_max = (hz/7); /* ~140ms */ 537e1f89c22SJeff Roberson 538356500a3SJeff Roberson #ifdef SMP 539749d01b0SJeff Roberson /* init kseqs */ 540749d01b0SJeff Roberson /* Create the idmap. */ 541749d01b0SJeff Roberson #ifdef ULE_HTT_EXPERIMENTAL 542749d01b0SJeff Roberson if (smp_topology == NULL) { 543749d01b0SJeff Roberson #else 544749d01b0SJeff Roberson if (1) { 545749d01b0SJeff Roberson #endif 546749d01b0SJeff Roberson for (i = 0; i < MAXCPU; i++) { 547749d01b0SJeff Roberson kseq_setup(&kseq_cpu[i]); 548749d01b0SJeff Roberson kseq_idmap[i] = &kseq_cpu[i]; 549749d01b0SJeff Roberson kseq_cpu[i].ksq_cpus = 1; 550749d01b0SJeff Roberson } 551749d01b0SJeff Roberson } else { 552749d01b0SJeff Roberson int j; 553749d01b0SJeff Roberson 554749d01b0SJeff Roberson for (i = 0; i < smp_topology->ct_count; i++) { 555749d01b0SJeff Roberson struct cpu_group *cg; 556749d01b0SJeff Roberson 557749d01b0SJeff Roberson cg = &smp_topology->ct_group[i]; 558749d01b0SJeff Roberson kseq_setup(&kseq_cpu[i]); 559749d01b0SJeff Roberson 560749d01b0SJeff Roberson for (j = 0; j < MAXCPU; j++) 561749d01b0SJeff Roberson if ((cg->cg_mask & (1 << j)) != 0) 562749d01b0SJeff Roberson kseq_idmap[j] = &kseq_cpu[i]; 563749d01b0SJeff Roberson kseq_cpu[i].ksq_cpus = cg->cg_count; 564749d01b0SJeff Roberson } 565749d01b0SJeff Roberson } 566356500a3SJeff Roberson callout_init(&kseq_lb_callout, 1); 567356500a3SJeff Roberson kseq_balance(NULL); 568749d01b0SJeff Roberson #else 569749d01b0SJeff Roberson kseq_setup(KSEQ_SELF()); 570356500a3SJeff Roberson #endif 571749d01b0SJeff Roberson mtx_lock_spin(&sched_lock); 572749d01b0SJeff Roberson kseq_add(KSEQ_SELF(), &kse0); 573749d01b0SJeff Roberson mtx_unlock_spin(&sched_lock); 57435e6168fSJeff Roberson } 57535e6168fSJeff Roberson 57635e6168fSJeff Roberson /* 57735e6168fSJeff Roberson * Scale the scheduling priority according to the "interactivity" of this 57835e6168fSJeff Roberson * process. 57935e6168fSJeff Roberson */ 58015dc847eSJeff Roberson static void 58135e6168fSJeff Roberson sched_priority(struct ksegrp *kg) 58235e6168fSJeff Roberson { 58335e6168fSJeff Roberson int pri; 58435e6168fSJeff Roberson 58535e6168fSJeff Roberson if (kg->kg_pri_class != PRI_TIMESHARE) 58615dc847eSJeff Roberson return; 58735e6168fSJeff Roberson 58815dc847eSJeff Roberson pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 589e1f89c22SJeff Roberson pri += SCHED_PRI_BASE; 59035e6168fSJeff Roberson pri += kg->kg_nice; 59135e6168fSJeff Roberson 59235e6168fSJeff Roberson if (pri > PRI_MAX_TIMESHARE) 59335e6168fSJeff Roberson pri = PRI_MAX_TIMESHARE; 59435e6168fSJeff Roberson else if (pri < PRI_MIN_TIMESHARE) 59535e6168fSJeff Roberson pri = PRI_MIN_TIMESHARE; 59635e6168fSJeff Roberson 59735e6168fSJeff Roberson kg->kg_user_pri = pri; 59835e6168fSJeff Roberson 59915dc847eSJeff Roberson return; 60035e6168fSJeff Roberson } 60135e6168fSJeff Roberson 60235e6168fSJeff Roberson /* 603245f3abfSJeff Roberson * Calculate a time slice based on the properties of the kseg and the runq 604a8949de2SJeff Roberson * that we're on. This is only for PRI_TIMESHARE ksegrps. 60535e6168fSJeff Roberson */ 606245f3abfSJeff Roberson static void 607245f3abfSJeff Roberson sched_slice(struct kse *ke) 60835e6168fSJeff Roberson { 60915dc847eSJeff Roberson struct kseq *kseq; 610245f3abfSJeff Roberson struct ksegrp *kg; 61135e6168fSJeff Roberson 612245f3abfSJeff Roberson kg = ke->ke_ksegrp; 61315dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 61435e6168fSJeff Roberson 615245f3abfSJeff Roberson /* 616245f3abfSJeff Roberson * Rationale: 617245f3abfSJeff Roberson * KSEs in interactive ksegs get the minimum slice so that we 618245f3abfSJeff Roberson * quickly notice if it abuses its advantage. 619245f3abfSJeff Roberson * 620245f3abfSJeff Roberson * KSEs in non-interactive ksegs are assigned a slice that is 621245f3abfSJeff Roberson * based on the ksegs nice value relative to the least nice kseg 622245f3abfSJeff Roberson * on the run queue for this cpu. 623245f3abfSJeff Roberson * 624245f3abfSJeff Roberson * If the KSE is less nice than all others it gets the maximum 625245f3abfSJeff Roberson * slice and other KSEs will adjust their slice relative to 626245f3abfSJeff Roberson * this when they first expire. 627245f3abfSJeff Roberson * 628245f3abfSJeff Roberson * There is 20 point window that starts relative to the least 629245f3abfSJeff Roberson * nice kse on the run queue. Slice size is determined by 630245f3abfSJeff Roberson * the kse distance from the last nice ksegrp. 631245f3abfSJeff Roberson * 632245f3abfSJeff Roberson * If you are outside of the window you will get no slice and 633245f3abfSJeff Roberson * you will be reevaluated each time you are selected on the 634245f3abfSJeff Roberson * run queue. 635245f3abfSJeff Roberson * 636245f3abfSJeff Roberson */ 637245f3abfSJeff Roberson 63815dc847eSJeff Roberson if (!SCHED_INTERACTIVE(kg)) { 639245f3abfSJeff Roberson int nice; 640245f3abfSJeff Roberson 64115dc847eSJeff Roberson nice = kg->kg_nice + (0 - kseq->ksq_nicemin); 64215dc847eSJeff Roberson if (kseq->ksq_loads[PRI_TIMESHARE] == 0 || 64315dc847eSJeff Roberson kg->kg_nice < kseq->ksq_nicemin) 644245f3abfSJeff Roberson ke->ke_slice = SCHED_SLICE_MAX; 64515dc847eSJeff Roberson else if (nice <= SCHED_PRI_NTHRESH) 646245f3abfSJeff Roberson ke->ke_slice = SCHED_SLICE_NICE(nice); 647245f3abfSJeff Roberson else 648245f3abfSJeff Roberson ke->ke_slice = 0; 649245f3abfSJeff Roberson } else 650245f3abfSJeff Roberson ke->ke_slice = SCHED_SLICE_MIN; 65135e6168fSJeff Roberson 65215dc847eSJeff Roberson CTR6(KTR_ULE, 65315dc847eSJeff Roberson "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 65415dc847eSJeff Roberson ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, 65515dc847eSJeff Roberson kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg)); 65615dc847eSJeff Roberson 657407b0157SJeff Roberson /* 658a8949de2SJeff Roberson * Check to see if we need to scale back the slp and run time 659a8949de2SJeff Roberson * in the kg. This will cause us to forget old interactivity 660a8949de2SJeff Roberson * while maintaining the current ratio. 661407b0157SJeff Roberson */ 6624b60e324SJeff Roberson sched_interact_update(kg); 663407b0157SJeff Roberson 664245f3abfSJeff Roberson return; 66535e6168fSJeff Roberson } 66635e6168fSJeff Roberson 6674b60e324SJeff Roberson static void 6684b60e324SJeff Roberson sched_interact_update(struct ksegrp *kg) 6694b60e324SJeff Roberson { 6707cd0f833SJeff Roberson /* XXX Fixme, use a linear algorithm and not a while loop. */ 6717cd0f833SJeff Roberson while ((kg->kg_runtime + kg->kg_slptime) > SCHED_SLP_RUN_MAX) { 6724b60e324SJeff Roberson kg->kg_runtime = (kg->kg_runtime / 5) * 4; 6734b60e324SJeff Roberson kg->kg_slptime = (kg->kg_slptime / 5) * 4; 6744b60e324SJeff Roberson } 6754b60e324SJeff Roberson } 6764b60e324SJeff Roberson 677e1f89c22SJeff Roberson static int 678e1f89c22SJeff Roberson sched_interact_score(struct ksegrp *kg) 679e1f89c22SJeff Roberson { 680210491d3SJeff Roberson int div; 681e1f89c22SJeff Roberson 682e1f89c22SJeff Roberson if (kg->kg_runtime > kg->kg_slptime) { 683210491d3SJeff Roberson div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF); 684210491d3SJeff Roberson return (SCHED_INTERACT_HALF + 685210491d3SJeff Roberson (SCHED_INTERACT_HALF - (kg->kg_slptime / div))); 686210491d3SJeff Roberson } if (kg->kg_slptime > kg->kg_runtime) { 687210491d3SJeff Roberson div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF); 688210491d3SJeff Roberson return (kg->kg_runtime / div); 689e1f89c22SJeff Roberson } 690e1f89c22SJeff Roberson 691210491d3SJeff Roberson /* 692210491d3SJeff Roberson * This can happen if slptime and runtime are 0. 693210491d3SJeff Roberson */ 694210491d3SJeff Roberson return (0); 695e1f89c22SJeff Roberson 696e1f89c22SJeff Roberson } 697e1f89c22SJeff Roberson 69815dc847eSJeff Roberson /* 69915dc847eSJeff Roberson * This is only somewhat accurate since given many processes of the same 70015dc847eSJeff Roberson * priority they will switch when their slices run out, which will be 70115dc847eSJeff Roberson * at most SCHED_SLICE_MAX. 70215dc847eSJeff Roberson */ 70335e6168fSJeff Roberson int 70435e6168fSJeff Roberson sched_rr_interval(void) 70535e6168fSJeff Roberson { 70635e6168fSJeff Roberson return (SCHED_SLICE_MAX); 70735e6168fSJeff Roberson } 70835e6168fSJeff Roberson 70935e6168fSJeff Roberson void 71035e6168fSJeff Roberson sched_pctcpu_update(struct kse *ke) 71135e6168fSJeff Roberson { 71235e6168fSJeff Roberson /* 71335e6168fSJeff Roberson * Adjust counters and watermark for pctcpu calc. 714210491d3SJeff Roberson */ 715210491d3SJeff Roberson 716210491d3SJeff Roberson /* 71765c8760dSJeff Roberson * Shift the tick count out so that the divide doesn't round away 71865c8760dSJeff Roberson * our results. 71965c8760dSJeff Roberson */ 72065c8760dSJeff Roberson ke->ke_ticks <<= 10; 72135e6168fSJeff Roberson ke->ke_ticks = (ke->ke_ticks / (ke->ke_ltick - ke->ke_ftick)) * 72235e6168fSJeff Roberson SCHED_CPU_TICKS; 72365c8760dSJeff Roberson ke->ke_ticks >>= 10; 72435e6168fSJeff Roberson ke->ke_ltick = ticks; 72535e6168fSJeff Roberson ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 72635e6168fSJeff Roberson } 72735e6168fSJeff Roberson 72835e6168fSJeff Roberson #ifdef SMP 7295d7ef00cSJeff Roberson /* XXX Should be changed to kseq_load_lowest() */ 73035e6168fSJeff Roberson int 73135e6168fSJeff Roberson sched_pickcpu(void) 73235e6168fSJeff Roberson { 7330a016a05SJeff Roberson struct kseq *kseq; 73435e6168fSJeff Roberson int load; 7350a016a05SJeff Roberson int cpu; 73635e6168fSJeff Roberson int i; 73735e6168fSJeff Roberson 738b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 73935e6168fSJeff Roberson if (!smp_started) 74035e6168fSJeff Roberson return (0); 74135e6168fSJeff Roberson 7420a016a05SJeff Roberson load = 0; 7430a016a05SJeff Roberson cpu = 0; 74435e6168fSJeff Roberson 74535e6168fSJeff Roberson for (i = 0; i < mp_maxid; i++) { 7467a20304fSJeff Roberson if (CPU_ABSENT(i) || (i & stopped_cpus) != 0) 74735e6168fSJeff Roberson continue; 7480a016a05SJeff Roberson kseq = KSEQ_CPU(i); 74915dc847eSJeff Roberson if (kseq->ksq_load < load) { 75035e6168fSJeff Roberson cpu = i; 75115dc847eSJeff Roberson load = kseq->ksq_load; 75235e6168fSJeff Roberson } 75335e6168fSJeff Roberson } 75435e6168fSJeff Roberson 75535e6168fSJeff Roberson CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu); 75635e6168fSJeff Roberson return (cpu); 75735e6168fSJeff Roberson } 75835e6168fSJeff Roberson #else 75935e6168fSJeff Roberson int 76035e6168fSJeff Roberson sched_pickcpu(void) 76135e6168fSJeff Roberson { 76235e6168fSJeff Roberson return (0); 76335e6168fSJeff Roberson } 76435e6168fSJeff Roberson #endif 76535e6168fSJeff Roberson 76635e6168fSJeff Roberson void 76735e6168fSJeff Roberson sched_prio(struct thread *td, u_char prio) 76835e6168fSJeff Roberson { 76935e6168fSJeff Roberson struct kse *ke; 77035e6168fSJeff Roberson struct runq *rq; 77135e6168fSJeff Roberson 77235e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 77335e6168fSJeff Roberson ke = td->td_kse; 77435e6168fSJeff Roberson td->td_priority = prio; 77535e6168fSJeff Roberson 77635e6168fSJeff Roberson if (TD_ON_RUNQ(td)) { 77735e6168fSJeff Roberson rq = ke->ke_runq; 77835e6168fSJeff Roberson 77935e6168fSJeff Roberson runq_remove(rq, ke); 78035e6168fSJeff Roberson runq_add(rq, ke); 78135e6168fSJeff Roberson } 78235e6168fSJeff Roberson } 78335e6168fSJeff Roberson 78435e6168fSJeff Roberson void 78535e6168fSJeff Roberson sched_switchout(struct thread *td) 78635e6168fSJeff Roberson { 78735e6168fSJeff Roberson struct kse *ke; 78835e6168fSJeff Roberson 78935e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 79035e6168fSJeff Roberson 79135e6168fSJeff Roberson ke = td->td_kse; 79235e6168fSJeff Roberson 79335e6168fSJeff Roberson td->td_last_kse = ke; 794060563ecSJulian Elischer td->td_lastcpu = td->td_oncpu; 795060563ecSJulian Elischer td->td_oncpu = NOCPU; 7964a338afdSJulian Elischer td->td_flags &= ~TDF_NEEDRESCHED; 79735e6168fSJeff Roberson 79835e6168fSJeff Roberson if (TD_IS_RUNNING(td)) { 799210491d3SJeff Roberson /* 800210491d3SJeff Roberson * This queue is always correct except for idle threads which 801210491d3SJeff Roberson * have a higher priority due to priority propagation. 802210491d3SJeff Roberson */ 803210491d3SJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE && 804210491d3SJeff Roberson ke->ke_thread->td_priority > PRI_MIN_IDLE) 805210491d3SJeff Roberson ke->ke_runq = KSEQ_SELF()->ksq_curr; 80615dc847eSJeff Roberson runq_add(ke->ke_runq, ke); 80715dc847eSJeff Roberson /* setrunqueue(td); */ 80835e6168fSJeff Roberson return; 809e1f89c22SJeff Roberson } 81015dc847eSJeff Roberson if (ke->ke_runq) 81115dc847eSJeff Roberson kseq_rem(KSEQ_CPU(ke->ke_cpu), ke); 81235e6168fSJeff Roberson /* 81335e6168fSJeff Roberson * We will not be on the run queue. So we must be 81435e6168fSJeff Roberson * sleeping or similar. 81535e6168fSJeff Roberson */ 8160e2a4d3aSDavid Xu if (td->td_proc->p_flag & P_SA) 81735e6168fSJeff Roberson kse_reassign(ke); 81835e6168fSJeff Roberson } 81935e6168fSJeff Roberson 82035e6168fSJeff Roberson void 82135e6168fSJeff Roberson sched_switchin(struct thread *td) 82235e6168fSJeff Roberson { 82335e6168fSJeff Roberson /* struct kse *ke = td->td_kse; */ 82435e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 82535e6168fSJeff Roberson 826060563ecSJulian Elischer td->td_oncpu = PCPU_GET(cpuid); 82735e6168fSJeff Roberson } 82835e6168fSJeff Roberson 82935e6168fSJeff Roberson void 83035e6168fSJeff Roberson sched_nice(struct ksegrp *kg, int nice) 83135e6168fSJeff Roberson { 83215dc847eSJeff Roberson struct kse *ke; 83335e6168fSJeff Roberson struct thread *td; 83415dc847eSJeff Roberson struct kseq *kseq; 83535e6168fSJeff Roberson 8360b5318c8SJohn Baldwin PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); 8370b5318c8SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 83815dc847eSJeff Roberson /* 83915dc847eSJeff Roberson * We need to adjust the nice counts for running KSEs. 84015dc847eSJeff Roberson */ 84115dc847eSJeff Roberson if (kg->kg_pri_class == PRI_TIMESHARE) 84215dc847eSJeff Roberson FOREACH_KSE_IN_GROUP(kg, ke) { 843d07ac847SJeff Roberson if (ke->ke_runq == NULL) 84415dc847eSJeff Roberson continue; 84515dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 84615dc847eSJeff Roberson kseq_nice_rem(kseq, kg->kg_nice); 84715dc847eSJeff Roberson kseq_nice_add(kseq, nice); 84815dc847eSJeff Roberson } 84935e6168fSJeff Roberson kg->kg_nice = nice; 85035e6168fSJeff Roberson sched_priority(kg); 85115dc847eSJeff Roberson FOREACH_THREAD_IN_GROUP(kg, td) 8524a338afdSJulian Elischer td->td_flags |= TDF_NEEDRESCHED; 85335e6168fSJeff Roberson } 85435e6168fSJeff Roberson 85535e6168fSJeff Roberson void 85635e6168fSJeff Roberson sched_sleep(struct thread *td, u_char prio) 85735e6168fSJeff Roberson { 85835e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 85935e6168fSJeff Roberson 86035e6168fSJeff Roberson td->td_slptime = ticks; 86135e6168fSJeff Roberson td->td_priority = prio; 86235e6168fSJeff Roberson 86315dc847eSJeff Roberson CTR2(KTR_ULE, "sleep kse %p (tick: %d)", 86415dc847eSJeff Roberson td->td_kse, td->td_slptime); 86535e6168fSJeff Roberson } 86635e6168fSJeff Roberson 86735e6168fSJeff Roberson void 86835e6168fSJeff Roberson sched_wakeup(struct thread *td) 86935e6168fSJeff Roberson { 87035e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 87135e6168fSJeff Roberson 87235e6168fSJeff Roberson /* 87335e6168fSJeff Roberson * Let the kseg know how long we slept for. This is because process 87435e6168fSJeff Roberson * interactivity behavior is modeled in the kseg. 87535e6168fSJeff Roberson */ 87635e6168fSJeff Roberson if (td->td_slptime) { 877f1e8dc4aSJeff Roberson struct ksegrp *kg; 87815dc847eSJeff Roberson int hzticks; 879f1e8dc4aSJeff Roberson 880f1e8dc4aSJeff Roberson kg = td->td_ksegrp; 88115dc847eSJeff Roberson hzticks = ticks - td->td_slptime; 88215dc847eSJeff Roberson kg->kg_slptime += hzticks << 10; 8834b60e324SJeff Roberson sched_interact_update(kg); 884f1e8dc4aSJeff Roberson sched_priority(kg); 8854b60e324SJeff Roberson if (td->td_kse) 8864b60e324SJeff Roberson sched_slice(td->td_kse); 88715dc847eSJeff Roberson CTR2(KTR_ULE, "wakeup kse %p (%d ticks)", 88815dc847eSJeff Roberson td->td_kse, hzticks); 88935e6168fSJeff Roberson td->td_slptime = 0; 890f1e8dc4aSJeff Roberson } 89135e6168fSJeff Roberson setrunqueue(td); 89235e6168fSJeff Roberson if (td->td_priority < curthread->td_priority) 8934a338afdSJulian Elischer curthread->td_flags |= TDF_NEEDRESCHED; 89435e6168fSJeff Roberson } 89535e6168fSJeff Roberson 89635e6168fSJeff Roberson /* 89735e6168fSJeff Roberson * Penalize the parent for creating a new child and initialize the child's 89835e6168fSJeff Roberson * priority. 89935e6168fSJeff Roberson */ 90035e6168fSJeff Roberson void 90115dc847eSJeff Roberson sched_fork(struct proc *p, struct proc *p1) 90235e6168fSJeff Roberson { 90335e6168fSJeff Roberson 90435e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 90535e6168fSJeff Roberson 90615dc847eSJeff Roberson sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 90715dc847eSJeff Roberson sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 90815dc847eSJeff Roberson sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 90915dc847eSJeff Roberson } 91015dc847eSJeff Roberson 91115dc847eSJeff Roberson void 91215dc847eSJeff Roberson sched_fork_kse(struct kse *ke, struct kse *child) 91315dc847eSJeff Roberson { 9142056d0a1SJohn Baldwin 915210491d3SJeff Roberson child->ke_slice = 1; /* Attempt to quickly learn interactivity. */ 91615dc847eSJeff Roberson child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */ 91715dc847eSJeff Roberson child->ke_runq = NULL; 91815dc847eSJeff Roberson 91915dc847eSJeff Roberson /* 92015dc847eSJeff Roberson * Claim that we've been running for one second for statistical 92115dc847eSJeff Roberson * purposes. 92215dc847eSJeff Roberson */ 92315dc847eSJeff Roberson child->ke_ticks = 0; 92415dc847eSJeff Roberson child->ke_ltick = ticks; 92515dc847eSJeff Roberson child->ke_ftick = ticks - hz; 92615dc847eSJeff Roberson } 92715dc847eSJeff Roberson 92815dc847eSJeff Roberson void 92915dc847eSJeff Roberson sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 93015dc847eSJeff Roberson { 9312056d0a1SJohn Baldwin 9322056d0a1SJohn Baldwin PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED); 93335e6168fSJeff Roberson /* XXX Need something better here */ 934210491d3SJeff Roberson 935a91172adSJeff Roberson child->kg_slptime = kg->kg_slptime / SCHED_SLP_RUN_THROTTLE; 936a91172adSJeff Roberson child->kg_runtime = kg->kg_runtime / SCHED_SLP_RUN_THROTTLE; 9374b60e324SJeff Roberson kg->kg_runtime += tickincr << 10; 9384b60e324SJeff Roberson sched_interact_update(kg); 93915dc847eSJeff Roberson 94035e6168fSJeff Roberson child->kg_user_pri = kg->kg_user_pri; 94115dc847eSJeff Roberson child->kg_nice = kg->kg_nice; 942c9f25d8fSJeff Roberson } 943c9f25d8fSJeff Roberson 94415dc847eSJeff Roberson void 94515dc847eSJeff Roberson sched_fork_thread(struct thread *td, struct thread *child) 94615dc847eSJeff Roberson { 94715dc847eSJeff Roberson } 94815dc847eSJeff Roberson 94915dc847eSJeff Roberson void 95015dc847eSJeff Roberson sched_class(struct ksegrp *kg, int class) 95115dc847eSJeff Roberson { 95215dc847eSJeff Roberson struct kseq *kseq; 95315dc847eSJeff Roberson struct kse *ke; 95415dc847eSJeff Roberson 9552056d0a1SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 95615dc847eSJeff Roberson if (kg->kg_pri_class == class) 95715dc847eSJeff Roberson return; 95815dc847eSJeff Roberson 95915dc847eSJeff Roberson FOREACH_KSE_IN_GROUP(kg, ke) { 96015dc847eSJeff Roberson if (ke->ke_state != KES_ONRUNQ && 96115dc847eSJeff Roberson ke->ke_state != KES_THREAD) 96215dc847eSJeff Roberson continue; 96315dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 96415dc847eSJeff Roberson 965b5c4c4a7SJeff Roberson kseq->ksq_loads[PRI_BASE(kg->kg_pri_class)]--; 966b5c4c4a7SJeff Roberson kseq->ksq_loads[PRI_BASE(class)]++; 96715dc847eSJeff Roberson 96815dc847eSJeff Roberson if (kg->kg_pri_class == PRI_TIMESHARE) 96915dc847eSJeff Roberson kseq_nice_rem(kseq, kg->kg_nice); 97015dc847eSJeff Roberson else if (class == PRI_TIMESHARE) 97115dc847eSJeff Roberson kseq_nice_add(kseq, kg->kg_nice); 97215dc847eSJeff Roberson } 97315dc847eSJeff Roberson 97415dc847eSJeff Roberson kg->kg_pri_class = class; 97535e6168fSJeff Roberson } 97635e6168fSJeff Roberson 97735e6168fSJeff Roberson /* 97835e6168fSJeff Roberson * Return some of the child's priority and interactivity to the parent. 97935e6168fSJeff Roberson */ 98035e6168fSJeff Roberson void 98115dc847eSJeff Roberson sched_exit(struct proc *p, struct proc *child) 98235e6168fSJeff Roberson { 98335e6168fSJeff Roberson /* XXX Need something better here */ 98435e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 985141ad61cSJeff Roberson sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child)); 986210491d3SJeff Roberson sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child)); 987141ad61cSJeff Roberson } 988141ad61cSJeff Roberson 989141ad61cSJeff Roberson void 990141ad61cSJeff Roberson sched_exit_kse(struct kse *ke, struct kse *child) 991141ad61cSJeff Roberson { 992141ad61cSJeff Roberson kseq_rem(KSEQ_CPU(child->ke_cpu), child); 993141ad61cSJeff Roberson } 994141ad61cSJeff Roberson 995141ad61cSJeff Roberson void 996141ad61cSJeff Roberson sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 997141ad61cSJeff Roberson { 9984b60e324SJeff Roberson /* kg->kg_slptime += child->kg_slptime; */ 999210491d3SJeff Roberson kg->kg_runtime += child->kg_runtime; 10004b60e324SJeff Roberson sched_interact_update(kg); 1001141ad61cSJeff Roberson } 1002141ad61cSJeff Roberson 1003141ad61cSJeff Roberson void 1004141ad61cSJeff Roberson sched_exit_thread(struct thread *td, struct thread *child) 1005141ad61cSJeff Roberson { 100635e6168fSJeff Roberson } 100735e6168fSJeff Roberson 100835e6168fSJeff Roberson void 100915dc847eSJeff Roberson sched_clock(struct kse *ke) 101035e6168fSJeff Roberson { 101135e6168fSJeff Roberson struct kseq *kseq; 10120a016a05SJeff Roberson struct ksegrp *kg; 101315dc847eSJeff Roberson struct thread *td; 101415dc847eSJeff Roberson #if 0 101515dc847eSJeff Roberson struct kse *nke; 101615dc847eSJeff Roberson #endif 101735e6168fSJeff Roberson 101815dc847eSJeff Roberson /* 101915dc847eSJeff Roberson * sched_setup() apparently happens prior to stathz being set. We 102015dc847eSJeff Roberson * need to resolve the timers earlier in the boot so we can avoid 102115dc847eSJeff Roberson * calculating this here. 102215dc847eSJeff Roberson */ 102315dc847eSJeff Roberson if (realstathz == 0) { 102415dc847eSJeff Roberson realstathz = stathz ? stathz : hz; 102515dc847eSJeff Roberson tickincr = hz / realstathz; 102615dc847eSJeff Roberson /* 102715dc847eSJeff Roberson * XXX This does not work for values of stathz that are much 102815dc847eSJeff Roberson * larger than hz. 102915dc847eSJeff Roberson */ 103015dc847eSJeff Roberson if (tickincr == 0) 103115dc847eSJeff Roberson tickincr = 1; 103215dc847eSJeff Roberson } 103335e6168fSJeff Roberson 103415dc847eSJeff Roberson td = ke->ke_thread; 103515dc847eSJeff Roberson kg = ke->ke_ksegrp; 103635e6168fSJeff Roberson 10370a016a05SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 10380a016a05SJeff Roberson KASSERT((td != NULL), ("schedclock: null thread pointer")); 10390a016a05SJeff Roberson 10400a016a05SJeff Roberson /* Adjust ticks for pctcpu */ 104165c8760dSJeff Roberson ke->ke_ticks++; 1042d465fb95SJeff Roberson ke->ke_ltick = ticks; 1043a8949de2SJeff Roberson 1044d465fb95SJeff Roberson /* Go up to one second beyond our max and then trim back down */ 1045d465fb95SJeff Roberson if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 1046d465fb95SJeff Roberson sched_pctcpu_update(ke); 1047d465fb95SJeff Roberson 104843fdafb1SJulian Elischer if (td->td_flags & TDF_IDLETD) 104935e6168fSJeff Roberson return; 10500a016a05SJeff Roberson 105115dc847eSJeff Roberson CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)", 105215dc847eSJeff Roberson ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 1053c9f25d8fSJeff Roberson 105435e6168fSJeff Roberson /* 1055a8949de2SJeff Roberson * We only do slicing code for TIMESHARE ksegrps. 1056a8949de2SJeff Roberson */ 1057a8949de2SJeff Roberson if (kg->kg_pri_class != PRI_TIMESHARE) 1058a8949de2SJeff Roberson return; 1059a8949de2SJeff Roberson /* 106015dc847eSJeff Roberson * Check for a higher priority task on the run queue. This can happen 106115dc847eSJeff Roberson * on SMP if another processor woke up a process on our runq. 106235e6168fSJeff Roberson */ 106315dc847eSJeff Roberson kseq = KSEQ_SELF(); 106415dc847eSJeff Roberson #if 0 106515dc847eSJeff Roberson if (kseq->ksq_load > 1 && (nke = kseq_choose(kseq)) != NULL) { 106615dc847eSJeff Roberson if (sched_strict && 106715dc847eSJeff Roberson nke->ke_thread->td_priority < td->td_priority) 106815dc847eSJeff Roberson td->td_flags |= TDF_NEEDRESCHED; 106915dc847eSJeff Roberson else if (nke->ke_thread->td_priority < 107015dc847eSJeff Roberson td->td_priority SCHED_PRIO_SLOP) 107115dc847eSJeff Roberson 107215dc847eSJeff Roberson if (nke->ke_thread->td_priority < td->td_priority) 107315dc847eSJeff Roberson td->td_flags |= TDF_NEEDRESCHED; 107415dc847eSJeff Roberson } 107515dc847eSJeff Roberson #endif 107615dc847eSJeff Roberson /* 107715dc847eSJeff Roberson * We used a tick charge it to the ksegrp so that we can compute our 107815dc847eSJeff Roberson * interactivity. 107915dc847eSJeff Roberson */ 108015dc847eSJeff Roberson kg->kg_runtime += tickincr << 10; 10814b60e324SJeff Roberson sched_interact_update(kg); 1082407b0157SJeff Roberson 108335e6168fSJeff Roberson /* 108435e6168fSJeff Roberson * We used up one time slice. 108535e6168fSJeff Roberson */ 108635e6168fSJeff Roberson ke->ke_slice--; 108715dc847eSJeff Roberson #ifdef SMP 1088c36ccfa2SJeff Roberson kseq->ksq_rslices--; 108915dc847eSJeff Roberson #endif 109015dc847eSJeff Roberson 109115dc847eSJeff Roberson if (ke->ke_slice > 0) 109215dc847eSJeff Roberson return; 109335e6168fSJeff Roberson /* 109415dc847eSJeff Roberson * We're out of time, recompute priorities and requeue. 109535e6168fSJeff Roberson */ 109615dc847eSJeff Roberson kseq_rem(kseq, ke); 1097e1f89c22SJeff Roberson sched_priority(kg); 109815dc847eSJeff Roberson sched_slice(ke); 109915dc847eSJeff Roberson if (SCHED_CURR(kg, ke)) 110015dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 110115dc847eSJeff Roberson else 110215dc847eSJeff Roberson ke->ke_runq = kseq->ksq_next; 110315dc847eSJeff Roberson kseq_add(kseq, ke); 11044a338afdSJulian Elischer td->td_flags |= TDF_NEEDRESCHED; 110535e6168fSJeff Roberson } 110635e6168fSJeff Roberson 110735e6168fSJeff Roberson int 110835e6168fSJeff Roberson sched_runnable(void) 110935e6168fSJeff Roberson { 111035e6168fSJeff Roberson struct kseq *kseq; 1111b90816f1SJeff Roberson int load; 111235e6168fSJeff Roberson 1113b90816f1SJeff Roberson load = 1; 1114b90816f1SJeff Roberson 1115b90816f1SJeff Roberson mtx_lock_spin(&sched_lock); 11160a016a05SJeff Roberson kseq = KSEQ_SELF(); 111735e6168fSJeff Roberson 111815dc847eSJeff Roberson if (kseq->ksq_load) 1119b90816f1SJeff Roberson goto out; 1120c9f25d8fSJeff Roberson #ifdef SMP 11210a016a05SJeff Roberson /* 11220a016a05SJeff Roberson * For SMP we may steal other processor's KSEs. Just search until we 11230a016a05SJeff Roberson * verify that at least on other cpu has a runnable task. 11240a016a05SJeff Roberson */ 1125c9f25d8fSJeff Roberson if (smp_started) { 1126c9f25d8fSJeff Roberson int i; 1127c9f25d8fSJeff Roberson 1128c9f25d8fSJeff Roberson for (i = 0; i < mp_maxid; i++) { 11297a20304fSJeff Roberson if (CPU_ABSENT(i) || (i & stopped_cpus) != 0) 1130c9f25d8fSJeff Roberson continue; 11310a016a05SJeff Roberson kseq = KSEQ_CPU(i); 1132749d01b0SJeff Roberson if (kseq->ksq_load > kseq->ksq_cpus) 1133b90816f1SJeff Roberson goto out; 1134c9f25d8fSJeff Roberson } 1135c9f25d8fSJeff Roberson } 1136c9f25d8fSJeff Roberson #endif 1137b90816f1SJeff Roberson load = 0; 1138b90816f1SJeff Roberson out: 1139b90816f1SJeff Roberson mtx_unlock_spin(&sched_lock); 1140b90816f1SJeff Roberson return (load); 114135e6168fSJeff Roberson } 114235e6168fSJeff Roberson 114335e6168fSJeff Roberson void 114435e6168fSJeff Roberson sched_userret(struct thread *td) 114535e6168fSJeff Roberson { 114635e6168fSJeff Roberson struct ksegrp *kg; 1147210491d3SJeff Roberson struct kseq *kseq; 1148210491d3SJeff Roberson struct kse *ke; 114935e6168fSJeff Roberson 115035e6168fSJeff Roberson kg = td->td_ksegrp; 115135e6168fSJeff Roberson 115235e6168fSJeff Roberson if (td->td_priority != kg->kg_user_pri) { 115335e6168fSJeff Roberson mtx_lock_spin(&sched_lock); 115435e6168fSJeff Roberson td->td_priority = kg->kg_user_pri; 1155210491d3SJeff Roberson kseq = KSEQ_SELF(); 1156210491d3SJeff Roberson if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE && 1157749d01b0SJeff Roberson #ifdef SMP 1158749d01b0SJeff Roberson kseq->ksq_load > kseq->ksq_cpus && 1159749d01b0SJeff Roberson #else 1160210491d3SJeff Roberson kseq->ksq_load > 1 && 1161749d01b0SJeff Roberson #endif 1162210491d3SJeff Roberson (ke = kseq_choose(kseq)) != NULL && 1163210491d3SJeff Roberson ke->ke_thread->td_priority < td->td_priority) 1164210491d3SJeff Roberson curthread->td_flags |= TDF_NEEDRESCHED; 116535e6168fSJeff Roberson mtx_unlock_spin(&sched_lock); 116635e6168fSJeff Roberson } 116735e6168fSJeff Roberson } 116835e6168fSJeff Roberson 1169c9f25d8fSJeff Roberson struct kse * 1170c9f25d8fSJeff Roberson sched_choose(void) 1171c9f25d8fSJeff Roberson { 11720a016a05SJeff Roberson struct kseq *kseq; 1173c9f25d8fSJeff Roberson struct kse *ke; 117415dc847eSJeff Roberson 1175b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 117615dc847eSJeff Roberson #ifdef SMP 1177245f3abfSJeff Roberson retry: 117815dc847eSJeff Roberson #endif 1179c36ccfa2SJeff Roberson kseq = KSEQ_SELF(); 11800a016a05SJeff Roberson ke = kseq_choose(kseq); 118135e6168fSJeff Roberson if (ke) { 118215dc847eSJeff Roberson runq_remove(ke->ke_runq, ke); 118335e6168fSJeff Roberson ke->ke_state = KES_THREAD; 1184245f3abfSJeff Roberson 118515dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 118615dc847eSJeff Roberson CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)", 118715dc847eSJeff Roberson ke, ke->ke_runq, ke->ke_slice, 118815dc847eSJeff Roberson ke->ke_thread->td_priority); 1189245f3abfSJeff Roberson } 119015dc847eSJeff Roberson return (ke); 119135e6168fSJeff Roberson } 119235e6168fSJeff Roberson 1193c9f25d8fSJeff Roberson #ifdef SMP 1194c36ccfa2SJeff Roberson if (smp_started) { 1195c9f25d8fSJeff Roberson /* 1196c9f25d8fSJeff Roberson * Find the cpu with the highest load and steal one proc. 1197c9f25d8fSJeff Roberson */ 1198c36ccfa2SJeff Roberson if ((kseq = kseq_load_highest()) == NULL) 1199c36ccfa2SJeff Roberson return (NULL); 1200c36ccfa2SJeff Roberson 1201c36ccfa2SJeff Roberson /* 1202c36ccfa2SJeff Roberson * Remove this kse from this kseq and runq and then requeue 1203c36ccfa2SJeff Roberson * on the current processor. Then we will dequeue it 1204c36ccfa2SJeff Roberson * normally above. 1205c36ccfa2SJeff Roberson */ 1206356500a3SJeff Roberson kseq_move(kseq, PCPU_GET(cpuid)); 120715dc847eSJeff Roberson goto retry; 1208c9f25d8fSJeff Roberson } 1209c9f25d8fSJeff Roberson #endif 121015dc847eSJeff Roberson 121115dc847eSJeff Roberson return (NULL); 121235e6168fSJeff Roberson } 121335e6168fSJeff Roberson 121435e6168fSJeff Roberson void 121535e6168fSJeff Roberson sched_add(struct kse *ke) 121635e6168fSJeff Roberson { 1217c9f25d8fSJeff Roberson struct kseq *kseq; 121815dc847eSJeff Roberson struct ksegrp *kg; 1219c9f25d8fSJeff Roberson 12205d7ef00cSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 12215d7ef00cSJeff Roberson KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 12225d7ef00cSJeff Roberson KASSERT((ke->ke_thread->td_kse != NULL), 12235d7ef00cSJeff Roberson ("sched_add: No KSE on thread")); 12245d7ef00cSJeff Roberson KASSERT(ke->ke_state != KES_ONRUNQ, 12255d7ef00cSJeff Roberson ("sched_add: kse %p (%s) already in run queue", ke, 12265d7ef00cSJeff Roberson ke->ke_proc->p_comm)); 12275d7ef00cSJeff Roberson KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 12285d7ef00cSJeff Roberson ("sched_add: process swapped out")); 12299bca28a7SJeff Roberson KASSERT(ke->ke_runq == NULL, 12309bca28a7SJeff Roberson ("sched_add: KSE %p is still assigned to a run queue", ke)); 12315d7ef00cSJeff Roberson 123215dc847eSJeff Roberson kg = ke->ke_ksegrp; 123315dc847eSJeff Roberson 1234b5c4c4a7SJeff Roberson switch (PRI_BASE(kg->kg_pri_class)) { 1235a8949de2SJeff Roberson case PRI_ITHD: 1236a8949de2SJeff Roberson case PRI_REALTIME: 1237a6ed4186SJeff Roberson kseq = KSEQ_SELF(); 123815dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 123915dc847eSJeff Roberson ke->ke_slice = SCHED_SLICE_MAX; 12407cd650a9SJeff Roberson ke->ke_cpu = PCPU_GET(cpuid); 1241a8949de2SJeff Roberson break; 1242a8949de2SJeff Roberson case PRI_TIMESHARE: 1243a8949de2SJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 124415dc847eSJeff Roberson if (SCHED_CURR(kg, ke)) 124515dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 124615dc847eSJeff Roberson else 124715dc847eSJeff Roberson ke->ke_runq = kseq->ksq_next; 124815dc847eSJeff Roberson break; 124915dc847eSJeff Roberson case PRI_IDLE: 125015dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 125115dc847eSJeff Roberson /* 125215dc847eSJeff Roberson * This is for priority prop. 125315dc847eSJeff Roberson */ 1254210491d3SJeff Roberson if (ke->ke_thread->td_priority > PRI_MIN_IDLE) 125515dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 125615dc847eSJeff Roberson else 125715dc847eSJeff Roberson ke->ke_runq = &kseq->ksq_idle; 125815dc847eSJeff Roberson ke->ke_slice = SCHED_SLICE_MIN; 125915dc847eSJeff Roberson break; 126015dc847eSJeff Roberson default: 126115dc847eSJeff Roberson panic("Unknown pri class.\n"); 1262a8949de2SJeff Roberson break; 1263a6ed4186SJeff Roberson } 1264a8949de2SJeff Roberson 126535e6168fSJeff Roberson ke->ke_ksegrp->kg_runq_kses++; 126635e6168fSJeff Roberson ke->ke_state = KES_ONRUNQ; 126735e6168fSJeff Roberson 126815dc847eSJeff Roberson runq_add(ke->ke_runq, ke); 12699bca28a7SJeff Roberson kseq_add(kseq, ke); 127035e6168fSJeff Roberson } 127135e6168fSJeff Roberson 127235e6168fSJeff Roberson void 127335e6168fSJeff Roberson sched_rem(struct kse *ke) 127435e6168fSJeff Roberson { 127515dc847eSJeff Roberson struct kseq *kseq; 127615dc847eSJeff Roberson 127735e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 12789bca28a7SJeff Roberson KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 127935e6168fSJeff Roberson 128035e6168fSJeff Roberson ke->ke_state = KES_THREAD; 128135e6168fSJeff Roberson ke->ke_ksegrp->kg_runq_kses--; 128215dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 128315dc847eSJeff Roberson runq_remove(ke->ke_runq, ke); 128415dc847eSJeff Roberson kseq_rem(kseq, ke); 128535e6168fSJeff Roberson } 128635e6168fSJeff Roberson 128735e6168fSJeff Roberson fixpt_t 128835e6168fSJeff Roberson sched_pctcpu(struct kse *ke) 128935e6168fSJeff Roberson { 129035e6168fSJeff Roberson fixpt_t pctcpu; 129135e6168fSJeff Roberson 129235e6168fSJeff Roberson pctcpu = 0; 129335e6168fSJeff Roberson 1294b90816f1SJeff Roberson mtx_lock_spin(&sched_lock); 129535e6168fSJeff Roberson if (ke->ke_ticks) { 129635e6168fSJeff Roberson int rtick; 129735e6168fSJeff Roberson 1298210491d3SJeff Roberson /* 1299210491d3SJeff Roberson * Don't update more frequently than twice a second. Allowing 1300210491d3SJeff Roberson * this causes the cpu usage to decay away too quickly due to 1301210491d3SJeff Roberson * rounding errors. 1302210491d3SJeff Roberson */ 1303210491d3SJeff Roberson if (ke->ke_ltick < (ticks - (hz / 2))) 130435e6168fSJeff Roberson sched_pctcpu_update(ke); 130535e6168fSJeff Roberson 130635e6168fSJeff Roberson /* How many rtick per second ? */ 1307210491d3SJeff Roberson rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); 13087121cce5SScott Long pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 130935e6168fSJeff Roberson } 131035e6168fSJeff Roberson 131135e6168fSJeff Roberson ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1312828e7683SJohn Baldwin mtx_unlock_spin(&sched_lock); 131335e6168fSJeff Roberson 131435e6168fSJeff Roberson return (pctcpu); 131535e6168fSJeff Roberson } 131635e6168fSJeff Roberson 131735e6168fSJeff Roberson int 131835e6168fSJeff Roberson sched_sizeof_kse(void) 131935e6168fSJeff Roberson { 132035e6168fSJeff Roberson return (sizeof(struct kse) + sizeof(struct ke_sched)); 132135e6168fSJeff Roberson } 132235e6168fSJeff Roberson 132335e6168fSJeff Roberson int 132435e6168fSJeff Roberson sched_sizeof_ksegrp(void) 132535e6168fSJeff Roberson { 132635e6168fSJeff Roberson return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 132735e6168fSJeff Roberson } 132835e6168fSJeff Roberson 132935e6168fSJeff Roberson int 133035e6168fSJeff Roberson sched_sizeof_proc(void) 133135e6168fSJeff Roberson { 133235e6168fSJeff Roberson return (sizeof(struct proc)); 133335e6168fSJeff Roberson } 133435e6168fSJeff Roberson 133535e6168fSJeff Roberson int 133635e6168fSJeff Roberson sched_sizeof_thread(void) 133735e6168fSJeff Roberson { 133835e6168fSJeff Roberson return (sizeof(struct thread) + sizeof(struct td_sched)); 133935e6168fSJeff Roberson } 1340