135e6168fSJeff Roberson /*- 215dc847eSJeff Roberson * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 335e6168fSJeff Roberson * All rights reserved. 435e6168fSJeff Roberson * 535e6168fSJeff Roberson * Redistribution and use in source and binary forms, with or without 635e6168fSJeff Roberson * modification, are permitted provided that the following conditions 735e6168fSJeff Roberson * are met: 835e6168fSJeff Roberson * 1. Redistributions of source code must retain the above copyright 935e6168fSJeff Roberson * notice unmodified, this list of conditions, and the following 1035e6168fSJeff Roberson * disclaimer. 1135e6168fSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 1235e6168fSJeff Roberson * notice, this list of conditions and the following disclaimer in the 1335e6168fSJeff Roberson * documentation and/or other materials provided with the distribution. 1435e6168fSJeff Roberson * 1535e6168fSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1635e6168fSJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1735e6168fSJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1835e6168fSJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1935e6168fSJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2035e6168fSJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2135e6168fSJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2235e6168fSJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2335e6168fSJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2435e6168fSJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2535e6168fSJeff Roberson */ 2635e6168fSJeff Roberson 27677b542eSDavid E. O'Brien #include <sys/cdefs.h> 28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 29677b542eSDavid E. O'Brien 3035e6168fSJeff Roberson #include <sys/param.h> 3135e6168fSJeff Roberson #include <sys/systm.h> 3235e6168fSJeff Roberson #include <sys/kernel.h> 3335e6168fSJeff Roberson #include <sys/ktr.h> 3435e6168fSJeff Roberson #include <sys/lock.h> 3535e6168fSJeff Roberson #include <sys/mutex.h> 3635e6168fSJeff Roberson #include <sys/proc.h> 37245f3abfSJeff Roberson #include <sys/resource.h> 3835e6168fSJeff Roberson #include <sys/sched.h> 3935e6168fSJeff Roberson #include <sys/smp.h> 4035e6168fSJeff Roberson #include <sys/sx.h> 4135e6168fSJeff Roberson #include <sys/sysctl.h> 4235e6168fSJeff Roberson #include <sys/sysproto.h> 4335e6168fSJeff Roberson #include <sys/vmmeter.h> 4435e6168fSJeff Roberson #ifdef DDB 4535e6168fSJeff Roberson #include <ddb/ddb.h> 4635e6168fSJeff Roberson #endif 4735e6168fSJeff Roberson #ifdef KTRACE 4835e6168fSJeff Roberson #include <sys/uio.h> 4935e6168fSJeff Roberson #include <sys/ktrace.h> 5035e6168fSJeff Roberson #endif 5135e6168fSJeff Roberson 5235e6168fSJeff Roberson #include <machine/cpu.h> 5335e6168fSJeff Roberson 5415dc847eSJeff Roberson #define KTR_ULE KTR_NFS 5515dc847eSJeff Roberson 5635e6168fSJeff Roberson /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 5735e6168fSJeff Roberson /* XXX This is bogus compatability crap for ps */ 5835e6168fSJeff Roberson static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 5935e6168fSJeff Roberson SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 6035e6168fSJeff Roberson 6135e6168fSJeff Roberson static void sched_setup(void *dummy); 6235e6168fSJeff Roberson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 6335e6168fSJeff Roberson 6415dc847eSJeff Roberson static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED"); 65e1f89c22SJeff Roberson 6615dc847eSJeff Roberson static int sched_strict; 6715dc847eSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, ""); 6815dc847eSJeff Roberson 6915dc847eSJeff Roberson static int slice_min = 1; 7015dc847eSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 7115dc847eSJeff Roberson 72210491d3SJeff Roberson static int slice_max = 10; 7315dc847eSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 7415dc847eSJeff Roberson 7515dc847eSJeff Roberson int realstathz; 7615dc847eSJeff Roberson int tickincr = 1; 77783caefbSJeff Roberson 78356500a3SJeff Roberson #ifdef SMP 79356500a3SJeff Roberson /* Callout to handle load balancing SMP systems. */ 80356500a3SJeff Roberson static struct callout kseq_lb_callout; 81356500a3SJeff Roberson #endif 82356500a3SJeff Roberson 8335e6168fSJeff Roberson /* 8435e6168fSJeff Roberson * These datastructures are allocated within their parent datastructure but 8535e6168fSJeff Roberson * are scheduler specific. 8635e6168fSJeff Roberson */ 8735e6168fSJeff Roberson 8835e6168fSJeff Roberson struct ke_sched { 8935e6168fSJeff Roberson int ske_slice; 9035e6168fSJeff Roberson struct runq *ske_runq; 9135e6168fSJeff Roberson /* The following variables are only used for pctcpu calculation */ 9235e6168fSJeff Roberson int ske_ltick; /* Last tick that we were running on */ 9335e6168fSJeff Roberson int ske_ftick; /* First tick that we were running on */ 9435e6168fSJeff Roberson int ske_ticks; /* Tick count */ 9515dc847eSJeff Roberson /* CPU that we have affinity for. */ 96cd6e33dfSJeff Roberson u_char ske_cpu; 9735e6168fSJeff Roberson }; 9835e6168fSJeff Roberson #define ke_slice ke_sched->ske_slice 9935e6168fSJeff Roberson #define ke_runq ke_sched->ske_runq 10035e6168fSJeff Roberson #define ke_ltick ke_sched->ske_ltick 10135e6168fSJeff Roberson #define ke_ftick ke_sched->ske_ftick 10235e6168fSJeff Roberson #define ke_ticks ke_sched->ske_ticks 103cd6e33dfSJeff Roberson #define ke_cpu ke_sched->ske_cpu 10435e6168fSJeff Roberson 10535e6168fSJeff Roberson struct kg_sched { 106407b0157SJeff Roberson int skg_slptime; /* Number of ticks we vol. slept */ 107407b0157SJeff Roberson int skg_runtime; /* Number of ticks we were running */ 10835e6168fSJeff Roberson }; 10935e6168fSJeff Roberson #define kg_slptime kg_sched->skg_slptime 110407b0157SJeff Roberson #define kg_runtime kg_sched->skg_runtime 11135e6168fSJeff Roberson 11235e6168fSJeff Roberson struct td_sched { 11335e6168fSJeff Roberson int std_slptime; 11435e6168fSJeff Roberson }; 11535e6168fSJeff Roberson #define td_slptime td_sched->std_slptime 11635e6168fSJeff Roberson 1175d7ef00cSJeff Roberson struct td_sched td_sched; 11835e6168fSJeff Roberson struct ke_sched ke_sched; 11935e6168fSJeff Roberson struct kg_sched kg_sched; 12035e6168fSJeff Roberson 12135e6168fSJeff Roberson struct ke_sched *kse0_sched = &ke_sched; 12235e6168fSJeff Roberson struct kg_sched *ksegrp0_sched = &kg_sched; 12335e6168fSJeff Roberson struct p_sched *proc0_sched = NULL; 12435e6168fSJeff Roberson struct td_sched *thread0_sched = &td_sched; 12535e6168fSJeff Roberson 12635e6168fSJeff Roberson /* 127665cb285SJeff Roberson * The priority is primarily determined by the interactivity score. Thus, we 128665cb285SJeff Roberson * give lower(better) priorities to kse groups that use less CPU. The nice 129665cb285SJeff Roberson * value is then directly added to this to allow nice to have some effect 130665cb285SJeff Roberson * on latency. 131e1f89c22SJeff Roberson * 132e1f89c22SJeff Roberson * PRI_RANGE: Total priority range for timeshare threads. 133665cb285SJeff Roberson * PRI_NRESV: Number of nice values. 134e1f89c22SJeff Roberson * PRI_BASE: The start of the dynamic range. 13535e6168fSJeff Roberson */ 136407b0157SJeff Roberson #define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 137245f3abfSJeff Roberson #define SCHED_PRI_NRESV PRIO_TOTAL 13898c9b132SJeff Roberson #define SCHED_PRI_NHALF (PRIO_TOTAL / 2) 13915dc847eSJeff Roberson #define SCHED_PRI_NTHRESH (SCHED_PRI_NHALF - 1) 140665cb285SJeff Roberson #define SCHED_PRI_BASE (PRI_MIN_TIMESHARE) 14115dc847eSJeff Roberson #define SCHED_PRI_INTERACT(score) \ 142665cb285SJeff Roberson ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX) 14335e6168fSJeff Roberson 14435e6168fSJeff Roberson /* 145e1f89c22SJeff Roberson * These determine the interactivity of a process. 14635e6168fSJeff Roberson * 147407b0157SJeff Roberson * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 148407b0157SJeff Roberson * before throttling back. 149a91172adSJeff Roberson * SLP_RUN_THROTTLE: Divisor for reducing slp/run time at fork time. 150210491d3SJeff Roberson * INTERACT_MAX: Maximum interactivity value. Smaller is better. 151e1f89c22SJeff Roberson * INTERACT_THRESH: Threshhold for placement on the current runq. 15235e6168fSJeff Roberson */ 1534b60e324SJeff Roberson #define SCHED_SLP_RUN_MAX ((hz * 2) << 10) 154a91172adSJeff Roberson #define SCHED_SLP_RUN_THROTTLE (100) 155210491d3SJeff Roberson #define SCHED_INTERACT_MAX (100) 156210491d3SJeff Roberson #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 157210491d3SJeff Roberson #define SCHED_INTERACT_THRESH (20) 158e1f89c22SJeff Roberson 15935e6168fSJeff Roberson /* 16035e6168fSJeff Roberson * These parameters and macros determine the size of the time slice that is 16135e6168fSJeff Roberson * granted to each thread. 16235e6168fSJeff Roberson * 16335e6168fSJeff Roberson * SLICE_MIN: Minimum time slice granted, in units of ticks. 16435e6168fSJeff Roberson * SLICE_MAX: Maximum time slice granted. 16535e6168fSJeff Roberson * SLICE_RANGE: Range of available time slices scaled by hz. 166245f3abfSJeff Roberson * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 167245f3abfSJeff Roberson * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 16835e6168fSJeff Roberson */ 16915dc847eSJeff Roberson #define SCHED_SLICE_MIN (slice_min) 17015dc847eSJeff Roberson #define SCHED_SLICE_MAX (slice_max) 17135e6168fSJeff Roberson #define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 17235e6168fSJeff Roberson #define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 173245f3abfSJeff Roberson #define SCHED_SLICE_NICE(nice) \ 17415dc847eSJeff Roberson (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_PRI_NTHRESH)) 17535e6168fSJeff Roberson 17635e6168fSJeff Roberson /* 17735e6168fSJeff Roberson * This macro determines whether or not the kse belongs on the current or 17835e6168fSJeff Roberson * next run queue. 179407b0157SJeff Roberson * 180407b0157SJeff Roberson * XXX nice value should effect how interactive a kg is. 18135e6168fSJeff Roberson */ 18215dc847eSJeff Roberson #define SCHED_INTERACTIVE(kg) \ 18315dc847eSJeff Roberson (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 184a5f099d0SJeff Roberson #define SCHED_CURR(kg, ke) \ 18515dc847eSJeff Roberson (ke->ke_thread->td_priority < PRI_MIN_TIMESHARE || SCHED_INTERACTIVE(kg)) 18635e6168fSJeff Roberson 18735e6168fSJeff Roberson /* 18835e6168fSJeff Roberson * Cpu percentage computation macros and defines. 18935e6168fSJeff Roberson * 19035e6168fSJeff Roberson * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 19135e6168fSJeff Roberson * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 19235e6168fSJeff Roberson */ 19335e6168fSJeff Roberson 1945053d272SJeff Roberson #define SCHED_CPU_TIME 10 19535e6168fSJeff Roberson #define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 19635e6168fSJeff Roberson 19735e6168fSJeff Roberson /* 19815dc847eSJeff Roberson * kseq - per processor runqs and statistics. 19935e6168fSJeff Roberson */ 20035e6168fSJeff Roberson 20115dc847eSJeff Roberson #define KSEQ_NCLASS (PRI_IDLE + 1) /* Number of run classes. */ 20215dc847eSJeff Roberson 20335e6168fSJeff Roberson struct kseq { 204a8949de2SJeff Roberson struct runq ksq_idle; /* Queue of IDLE threads. */ 20515dc847eSJeff Roberson struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 20615dc847eSJeff Roberson struct runq *ksq_next; /* Next timeshare queue. */ 20715dc847eSJeff Roberson struct runq *ksq_curr; /* Current queue. */ 20815dc847eSJeff Roberson int ksq_loads[KSEQ_NCLASS]; /* Load for each class */ 20915dc847eSJeff Roberson int ksq_load; /* Aggregate load. */ 21015dc847eSJeff Roberson short ksq_nice[PRIO_TOTAL + 1]; /* KSEs in each nice bin. */ 21115dc847eSJeff Roberson short ksq_nicemin; /* Least nice. */ 2125d7ef00cSJeff Roberson #ifdef SMP 2135d7ef00cSJeff Roberson unsigned int ksq_rslices; /* Slices on run queue */ 2145d7ef00cSJeff Roberson #endif 21535e6168fSJeff Roberson }; 21635e6168fSJeff Roberson 21735e6168fSJeff Roberson /* 21835e6168fSJeff Roberson * One kse queue per processor. 21935e6168fSJeff Roberson */ 2200a016a05SJeff Roberson #ifdef SMP 22135e6168fSJeff Roberson struct kseq kseq_cpu[MAXCPU]; 2220a016a05SJeff Roberson #define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) 2230a016a05SJeff Roberson #define KSEQ_CPU(x) (&kseq_cpu[(x)]) 2240a016a05SJeff Roberson #else 2250a016a05SJeff Roberson struct kseq kseq_cpu; 2260a016a05SJeff Roberson #define KSEQ_SELF() (&kseq_cpu) 2270a016a05SJeff Roberson #define KSEQ_CPU(x) (&kseq_cpu) 2280a016a05SJeff Roberson #endif 22935e6168fSJeff Roberson 230245f3abfSJeff Roberson static void sched_slice(struct kse *ke); 23115dc847eSJeff Roberson static void sched_priority(struct ksegrp *kg); 232e1f89c22SJeff Roberson static int sched_interact_score(struct ksegrp *kg); 2334b60e324SJeff Roberson static void sched_interact_update(struct ksegrp *kg); 23435e6168fSJeff Roberson void sched_pctcpu_update(struct kse *ke); 23535e6168fSJeff Roberson int sched_pickcpu(void); 23635e6168fSJeff Roberson 2375d7ef00cSJeff Roberson /* Operations on per processor queues */ 2380a016a05SJeff Roberson static struct kse * kseq_choose(struct kseq *kseq); 2390a016a05SJeff Roberson static void kseq_setup(struct kseq *kseq); 240a8949de2SJeff Roberson static void kseq_add(struct kseq *kseq, struct kse *ke); 24115dc847eSJeff Roberson static void kseq_rem(struct kseq *kseq, struct kse *ke); 24215dc847eSJeff Roberson static void kseq_nice_add(struct kseq *kseq, int nice); 24315dc847eSJeff Roberson static void kseq_nice_rem(struct kseq *kseq, int nice); 2447cd650a9SJeff Roberson void kseq_print(int cpu); 2455d7ef00cSJeff Roberson #ifdef SMP 2465d7ef00cSJeff Roberson struct kseq * kseq_load_highest(void); 247356500a3SJeff Roberson void kseq_balance(void *arg); 248356500a3SJeff Roberson void kseq_move(struct kseq *from, int cpu); 2495d7ef00cSJeff Roberson #endif 2505d7ef00cSJeff Roberson 25115dc847eSJeff Roberson void 2527cd650a9SJeff Roberson kseq_print(int cpu) 25315dc847eSJeff Roberson { 2547cd650a9SJeff Roberson struct kseq *kseq; 25515dc847eSJeff Roberson int i; 25615dc847eSJeff Roberson 2577cd650a9SJeff Roberson kseq = KSEQ_CPU(cpu); 25815dc847eSJeff Roberson 25915dc847eSJeff Roberson printf("kseq:\n"); 26015dc847eSJeff Roberson printf("\tload: %d\n", kseq->ksq_load); 26115dc847eSJeff Roberson printf("\tload ITHD: %d\n", kseq->ksq_loads[PRI_ITHD]); 26215dc847eSJeff Roberson printf("\tload REALTIME: %d\n", kseq->ksq_loads[PRI_REALTIME]); 26315dc847eSJeff Roberson printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]); 26415dc847eSJeff Roberson printf("\tload IDLE: %d\n", kseq->ksq_loads[PRI_IDLE]); 26515dc847eSJeff Roberson printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 26615dc847eSJeff Roberson printf("\tnice counts:\n"); 26715dc847eSJeff Roberson for (i = 0; i < PRIO_TOTAL + 1; i++) 26815dc847eSJeff Roberson if (kseq->ksq_nice[i]) 26915dc847eSJeff Roberson printf("\t\t%d = %d\n", 27015dc847eSJeff Roberson i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 27115dc847eSJeff Roberson } 27215dc847eSJeff Roberson 273a8949de2SJeff Roberson static void 2745d7ef00cSJeff Roberson kseq_add(struct kseq *kseq, struct kse *ke) 2755d7ef00cSJeff Roberson { 276b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 277b5c4c4a7SJeff Roberson kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]++; 27815dc847eSJeff Roberson kseq->ksq_load++; 27915dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 28015dc847eSJeff Roberson CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 28115dc847eSJeff Roberson ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 28215dc847eSJeff Roberson ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); 28315dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 28415dc847eSJeff Roberson kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); 2855d7ef00cSJeff Roberson #ifdef SMP 2865d7ef00cSJeff Roberson kseq->ksq_rslices += ke->ke_slice; 2875d7ef00cSJeff Roberson #endif 2885d7ef00cSJeff Roberson } 28915dc847eSJeff Roberson 290a8949de2SJeff Roberson static void 2915d7ef00cSJeff Roberson kseq_rem(struct kseq *kseq, struct kse *ke) 2925d7ef00cSJeff Roberson { 293b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 294b5c4c4a7SJeff Roberson kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]--; 29515dc847eSJeff Roberson kseq->ksq_load--; 29615dc847eSJeff Roberson ke->ke_runq = NULL; 29715dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 29815dc847eSJeff Roberson kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); 2995d7ef00cSJeff Roberson #ifdef SMP 3005d7ef00cSJeff Roberson kseq->ksq_rslices -= ke->ke_slice; 3015d7ef00cSJeff Roberson #endif 3025d7ef00cSJeff Roberson } 3035d7ef00cSJeff Roberson 30415dc847eSJeff Roberson static void 30515dc847eSJeff Roberson kseq_nice_add(struct kseq *kseq, int nice) 30615dc847eSJeff Roberson { 307b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 30815dc847eSJeff Roberson /* Normalize to zero. */ 30915dc847eSJeff Roberson kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 310b90816f1SJeff Roberson if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 1) 31115dc847eSJeff Roberson kseq->ksq_nicemin = nice; 31215dc847eSJeff Roberson } 31315dc847eSJeff Roberson 31415dc847eSJeff Roberson static void 31515dc847eSJeff Roberson kseq_nice_rem(struct kseq *kseq, int nice) 31615dc847eSJeff Roberson { 31715dc847eSJeff Roberson int n; 31815dc847eSJeff Roberson 319b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 32015dc847eSJeff Roberson /* Normalize to zero. */ 32115dc847eSJeff Roberson n = nice + SCHED_PRI_NHALF; 32215dc847eSJeff Roberson kseq->ksq_nice[n]--; 32315dc847eSJeff Roberson KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 32415dc847eSJeff Roberson 32515dc847eSJeff Roberson /* 32615dc847eSJeff Roberson * If this wasn't the smallest nice value or there are more in 32715dc847eSJeff Roberson * this bucket we can just return. Otherwise we have to recalculate 32815dc847eSJeff Roberson * the smallest nice. 32915dc847eSJeff Roberson */ 33015dc847eSJeff Roberson if (nice != kseq->ksq_nicemin || 33115dc847eSJeff Roberson kseq->ksq_nice[n] != 0 || 33215dc847eSJeff Roberson kseq->ksq_loads[PRI_TIMESHARE] == 0) 33315dc847eSJeff Roberson return; 33415dc847eSJeff Roberson 33515dc847eSJeff Roberson for (; n < SCHED_PRI_NRESV + 1; n++) 33615dc847eSJeff Roberson if (kseq->ksq_nice[n]) { 33715dc847eSJeff Roberson kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 33815dc847eSJeff Roberson return; 33915dc847eSJeff Roberson } 34015dc847eSJeff Roberson } 34115dc847eSJeff Roberson 3425d7ef00cSJeff Roberson #ifdef SMP 343356500a3SJeff Roberson /* 344356500a3SJeff Roberson * kseq_balance is a simple CPU load balancing algorithm. It operates by 345356500a3SJeff Roberson * finding the least loaded and most loaded cpu and equalizing their load 346356500a3SJeff Roberson * by migrating some processes. 347356500a3SJeff Roberson * 348356500a3SJeff Roberson * Dealing only with two CPUs at a time has two advantages. Firstly, most 349356500a3SJeff Roberson * installations will only have 2 cpus. Secondly, load balancing too much at 350356500a3SJeff Roberson * once can have an unpleasant effect on the system. The scheduler rarely has 351356500a3SJeff Roberson * enough information to make perfect decisions. So this algorithm chooses 352356500a3SJeff Roberson * algorithm simplicity and more gradual effects on load in larger systems. 353356500a3SJeff Roberson * 354356500a3SJeff Roberson * It could be improved by considering the priorities and slices assigned to 355356500a3SJeff Roberson * each task prior to balancing them. There are many pathological cases with 356356500a3SJeff Roberson * any approach and so the semi random algorithm below may work as well as any. 357356500a3SJeff Roberson * 358356500a3SJeff Roberson */ 359356500a3SJeff Roberson void 360356500a3SJeff Roberson kseq_balance(void *arg) 361356500a3SJeff Roberson { 362356500a3SJeff Roberson struct kseq *kseq; 363356500a3SJeff Roberson int high_load; 364356500a3SJeff Roberson int low_load; 365356500a3SJeff Roberson int high_cpu; 366356500a3SJeff Roberson int low_cpu; 367356500a3SJeff Roberson int move; 368356500a3SJeff Roberson int diff; 369356500a3SJeff Roberson int i; 370356500a3SJeff Roberson 371356500a3SJeff Roberson high_cpu = 0; 372356500a3SJeff Roberson low_cpu = 0; 373356500a3SJeff Roberson high_load = 0; 374356500a3SJeff Roberson low_load = -1; 375356500a3SJeff Roberson 376356500a3SJeff Roberson mtx_lock_spin(&sched_lock); 37786f8ae96SJeff Roberson if (smp_started == 0) 37886f8ae96SJeff Roberson goto out; 37986f8ae96SJeff Roberson 380356500a3SJeff Roberson for (i = 0; i < mp_maxid; i++) { 381356500a3SJeff Roberson if (CPU_ABSENT(i)) 382356500a3SJeff Roberson continue; 383356500a3SJeff Roberson kseq = KSEQ_CPU(i); 384356500a3SJeff Roberson if (kseq->ksq_load > high_load) { 385356500a3SJeff Roberson high_load = kseq->ksq_load; 386356500a3SJeff Roberson high_cpu = i; 387356500a3SJeff Roberson } 388356500a3SJeff Roberson if (low_load == -1 || kseq->ksq_load < low_load) { 389356500a3SJeff Roberson low_load = kseq->ksq_load; 390356500a3SJeff Roberson low_cpu = i; 391356500a3SJeff Roberson } 392356500a3SJeff Roberson } 393356500a3SJeff Roberson 394356500a3SJeff Roberson /* 395356500a3SJeff Roberson * Nothing to do. 396356500a3SJeff Roberson */ 397356500a3SJeff Roberson if (high_load < 2 || low_load == high_load) 398356500a3SJeff Roberson goto out; 399356500a3SJeff Roberson 400356500a3SJeff Roberson diff = high_load - low_load; 401356500a3SJeff Roberson move = diff / 2; 402356500a3SJeff Roberson if (diff & 0x1) 403356500a3SJeff Roberson move++; 404356500a3SJeff Roberson 405356500a3SJeff Roberson for (i = 0; i < move; i++) 406356500a3SJeff Roberson kseq_move(KSEQ_CPU(high_cpu), low_cpu); 407356500a3SJeff Roberson 408356500a3SJeff Roberson out: 409356500a3SJeff Roberson mtx_unlock_spin(&sched_lock); 410356500a3SJeff Roberson callout_reset(&kseq_lb_callout, hz, kseq_balance, NULL); 411356500a3SJeff Roberson 412356500a3SJeff Roberson return; 413356500a3SJeff Roberson } 414356500a3SJeff Roberson 4155d7ef00cSJeff Roberson struct kseq * 4165d7ef00cSJeff Roberson kseq_load_highest(void) 4175d7ef00cSJeff Roberson { 4185d7ef00cSJeff Roberson struct kseq *kseq; 4195d7ef00cSJeff Roberson int load; 4205d7ef00cSJeff Roberson int cpu; 4215d7ef00cSJeff Roberson int i; 4225d7ef00cSJeff Roberson 423b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 4245d7ef00cSJeff Roberson cpu = 0; 4255d7ef00cSJeff Roberson load = 0; 4265d7ef00cSJeff Roberson 4275d7ef00cSJeff Roberson for (i = 0; i < mp_maxid; i++) { 4285d7ef00cSJeff Roberson if (CPU_ABSENT(i)) 4295d7ef00cSJeff Roberson continue; 4305d7ef00cSJeff Roberson kseq = KSEQ_CPU(i); 43115dc847eSJeff Roberson if (kseq->ksq_load > load) { 43215dc847eSJeff Roberson load = kseq->ksq_load; 4335d7ef00cSJeff Roberson cpu = i; 4345d7ef00cSJeff Roberson } 4355d7ef00cSJeff Roberson } 43658177de2SJeff Roberson if (load > 1) 4375d7ef00cSJeff Roberson return (KSEQ_CPU(cpu)); 4385d7ef00cSJeff Roberson 4395d7ef00cSJeff Roberson return (NULL); 4405d7ef00cSJeff Roberson } 441356500a3SJeff Roberson 442356500a3SJeff Roberson void 443356500a3SJeff Roberson kseq_move(struct kseq *from, int cpu) 444356500a3SJeff Roberson { 445356500a3SJeff Roberson struct kse *ke; 446356500a3SJeff Roberson 447356500a3SJeff Roberson ke = kseq_choose(from); 448356500a3SJeff Roberson runq_remove(ke->ke_runq, ke); 449356500a3SJeff Roberson ke->ke_state = KES_THREAD; 450356500a3SJeff Roberson kseq_rem(from, ke); 451356500a3SJeff Roberson 452356500a3SJeff Roberson ke->ke_cpu = cpu; 453356500a3SJeff Roberson sched_add(ke); 454356500a3SJeff Roberson } 4555d7ef00cSJeff Roberson #endif 4565d7ef00cSJeff Roberson 4575d7ef00cSJeff Roberson struct kse * 4585d7ef00cSJeff Roberson kseq_choose(struct kseq *kseq) 4595d7ef00cSJeff Roberson { 4605d7ef00cSJeff Roberson struct kse *ke; 4615d7ef00cSJeff Roberson struct runq *swap; 4625d7ef00cSJeff Roberson 463b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 46415dc847eSJeff Roberson swap = NULL; 465a8949de2SJeff Roberson 46615dc847eSJeff Roberson for (;;) { 46715dc847eSJeff Roberson ke = runq_choose(kseq->ksq_curr); 46815dc847eSJeff Roberson if (ke == NULL) { 46915dc847eSJeff Roberson /* 47015dc847eSJeff Roberson * We already swaped once and didn't get anywhere. 47115dc847eSJeff Roberson */ 47215dc847eSJeff Roberson if (swap) 47315dc847eSJeff Roberson break; 4745d7ef00cSJeff Roberson swap = kseq->ksq_curr; 4755d7ef00cSJeff Roberson kseq->ksq_curr = kseq->ksq_next; 4765d7ef00cSJeff Roberson kseq->ksq_next = swap; 47715dc847eSJeff Roberson continue; 478a8949de2SJeff Roberson } 47915dc847eSJeff Roberson /* 48015dc847eSJeff Roberson * If we encounter a slice of 0 the kse is in a 48115dc847eSJeff Roberson * TIMESHARE kse group and its nice was too far out 48215dc847eSJeff Roberson * of the range that receives slices. 48315dc847eSJeff Roberson */ 48415dc847eSJeff Roberson if (ke->ke_slice == 0) { 48515dc847eSJeff Roberson runq_remove(ke->ke_runq, ke); 48615dc847eSJeff Roberson sched_slice(ke); 48715dc847eSJeff Roberson ke->ke_runq = kseq->ksq_next; 48815dc847eSJeff Roberson runq_add(ke->ke_runq, ke); 48915dc847eSJeff Roberson continue; 49015dc847eSJeff Roberson } 49115dc847eSJeff Roberson return (ke); 49215dc847eSJeff Roberson } 49315dc847eSJeff Roberson 494a8949de2SJeff Roberson return (runq_choose(&kseq->ksq_idle)); 495245f3abfSJeff Roberson } 4960a016a05SJeff Roberson 4970a016a05SJeff Roberson static void 4980a016a05SJeff Roberson kseq_setup(struct kseq *kseq) 4990a016a05SJeff Roberson { 50015dc847eSJeff Roberson runq_init(&kseq->ksq_timeshare[0]); 50115dc847eSJeff Roberson runq_init(&kseq->ksq_timeshare[1]); 502a8949de2SJeff Roberson runq_init(&kseq->ksq_idle); 50315dc847eSJeff Roberson 50415dc847eSJeff Roberson kseq->ksq_curr = &kseq->ksq_timeshare[0]; 50515dc847eSJeff Roberson kseq->ksq_next = &kseq->ksq_timeshare[1]; 50615dc847eSJeff Roberson 50715dc847eSJeff Roberson kseq->ksq_loads[PRI_ITHD] = 0; 50815dc847eSJeff Roberson kseq->ksq_loads[PRI_REALTIME] = 0; 50915dc847eSJeff Roberson kseq->ksq_loads[PRI_TIMESHARE] = 0; 51015dc847eSJeff Roberson kseq->ksq_loads[PRI_IDLE] = 0; 5117cd650a9SJeff Roberson kseq->ksq_load = 0; 5125d7ef00cSJeff Roberson #ifdef SMP 5135d7ef00cSJeff Roberson kseq->ksq_rslices = 0; 5145d7ef00cSJeff Roberson #endif 5150a016a05SJeff Roberson } 5160a016a05SJeff Roberson 51735e6168fSJeff Roberson static void 51835e6168fSJeff Roberson sched_setup(void *dummy) 51935e6168fSJeff Roberson { 52035e6168fSJeff Roberson int i; 52135e6168fSJeff Roberson 522e493a5d9SJeff Roberson slice_min = (hz/100); /* 10ms */ 523e493a5d9SJeff Roberson slice_max = (hz/7); /* ~140ms */ 524e1f89c22SJeff Roberson 52535e6168fSJeff Roberson mtx_lock_spin(&sched_lock); 52635e6168fSJeff Roberson /* init kseqs */ 5270a016a05SJeff Roberson for (i = 0; i < MAXCPU; i++) 5280a016a05SJeff Roberson kseq_setup(KSEQ_CPU(i)); 52915dc847eSJeff Roberson 53015dc847eSJeff Roberson kseq_add(KSEQ_SELF(), &kse0); 53135e6168fSJeff Roberson mtx_unlock_spin(&sched_lock); 532356500a3SJeff Roberson #ifdef SMP 533356500a3SJeff Roberson callout_init(&kseq_lb_callout, 1); 534356500a3SJeff Roberson kseq_balance(NULL); 535356500a3SJeff Roberson #endif 53635e6168fSJeff Roberson } 53735e6168fSJeff Roberson 53835e6168fSJeff Roberson /* 53935e6168fSJeff Roberson * Scale the scheduling priority according to the "interactivity" of this 54035e6168fSJeff Roberson * process. 54135e6168fSJeff Roberson */ 54215dc847eSJeff Roberson static void 54335e6168fSJeff Roberson sched_priority(struct ksegrp *kg) 54435e6168fSJeff Roberson { 54535e6168fSJeff Roberson int pri; 54635e6168fSJeff Roberson 54735e6168fSJeff Roberson if (kg->kg_pri_class != PRI_TIMESHARE) 54815dc847eSJeff Roberson return; 54935e6168fSJeff Roberson 55015dc847eSJeff Roberson pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 551e1f89c22SJeff Roberson pri += SCHED_PRI_BASE; 55235e6168fSJeff Roberson pri += kg->kg_nice; 55335e6168fSJeff Roberson 55435e6168fSJeff Roberson if (pri > PRI_MAX_TIMESHARE) 55535e6168fSJeff Roberson pri = PRI_MAX_TIMESHARE; 55635e6168fSJeff Roberson else if (pri < PRI_MIN_TIMESHARE) 55735e6168fSJeff Roberson pri = PRI_MIN_TIMESHARE; 55835e6168fSJeff Roberson 55935e6168fSJeff Roberson kg->kg_user_pri = pri; 56035e6168fSJeff Roberson 56115dc847eSJeff Roberson return; 56235e6168fSJeff Roberson } 56335e6168fSJeff Roberson 56435e6168fSJeff Roberson /* 565245f3abfSJeff Roberson * Calculate a time slice based on the properties of the kseg and the runq 566a8949de2SJeff Roberson * that we're on. This is only for PRI_TIMESHARE ksegrps. 56735e6168fSJeff Roberson */ 568245f3abfSJeff Roberson static void 569245f3abfSJeff Roberson sched_slice(struct kse *ke) 57035e6168fSJeff Roberson { 57115dc847eSJeff Roberson struct kseq *kseq; 572245f3abfSJeff Roberson struct ksegrp *kg; 57335e6168fSJeff Roberson 574245f3abfSJeff Roberson kg = ke->ke_ksegrp; 57515dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 57635e6168fSJeff Roberson 577245f3abfSJeff Roberson /* 578245f3abfSJeff Roberson * Rationale: 579245f3abfSJeff Roberson * KSEs in interactive ksegs get the minimum slice so that we 580245f3abfSJeff Roberson * quickly notice if it abuses its advantage. 581245f3abfSJeff Roberson * 582245f3abfSJeff Roberson * KSEs in non-interactive ksegs are assigned a slice that is 583245f3abfSJeff Roberson * based on the ksegs nice value relative to the least nice kseg 584245f3abfSJeff Roberson * on the run queue for this cpu. 585245f3abfSJeff Roberson * 586245f3abfSJeff Roberson * If the KSE is less nice than all others it gets the maximum 587245f3abfSJeff Roberson * slice and other KSEs will adjust their slice relative to 588245f3abfSJeff Roberson * this when they first expire. 589245f3abfSJeff Roberson * 590245f3abfSJeff Roberson * There is 20 point window that starts relative to the least 591245f3abfSJeff Roberson * nice kse on the run queue. Slice size is determined by 592245f3abfSJeff Roberson * the kse distance from the last nice ksegrp. 593245f3abfSJeff Roberson * 594245f3abfSJeff Roberson * If you are outside of the window you will get no slice and 595245f3abfSJeff Roberson * you will be reevaluated each time you are selected on the 596245f3abfSJeff Roberson * run queue. 597245f3abfSJeff Roberson * 598245f3abfSJeff Roberson */ 599245f3abfSJeff Roberson 60015dc847eSJeff Roberson if (!SCHED_INTERACTIVE(kg)) { 601245f3abfSJeff Roberson int nice; 602245f3abfSJeff Roberson 60315dc847eSJeff Roberson nice = kg->kg_nice + (0 - kseq->ksq_nicemin); 60415dc847eSJeff Roberson if (kseq->ksq_loads[PRI_TIMESHARE] == 0 || 60515dc847eSJeff Roberson kg->kg_nice < kseq->ksq_nicemin) 606245f3abfSJeff Roberson ke->ke_slice = SCHED_SLICE_MAX; 60715dc847eSJeff Roberson else if (nice <= SCHED_PRI_NTHRESH) 608245f3abfSJeff Roberson ke->ke_slice = SCHED_SLICE_NICE(nice); 609245f3abfSJeff Roberson else 610245f3abfSJeff Roberson ke->ke_slice = 0; 611245f3abfSJeff Roberson } else 612245f3abfSJeff Roberson ke->ke_slice = SCHED_SLICE_MIN; 61335e6168fSJeff Roberson 61415dc847eSJeff Roberson CTR6(KTR_ULE, 61515dc847eSJeff Roberson "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 61615dc847eSJeff Roberson ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, 61715dc847eSJeff Roberson kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg)); 61815dc847eSJeff Roberson 619407b0157SJeff Roberson /* 620a8949de2SJeff Roberson * Check to see if we need to scale back the slp and run time 621a8949de2SJeff Roberson * in the kg. This will cause us to forget old interactivity 622a8949de2SJeff Roberson * while maintaining the current ratio. 623407b0157SJeff Roberson */ 6244b60e324SJeff Roberson sched_interact_update(kg); 625407b0157SJeff Roberson 626245f3abfSJeff Roberson return; 62735e6168fSJeff Roberson } 62835e6168fSJeff Roberson 6294b60e324SJeff Roberson static void 6304b60e324SJeff Roberson sched_interact_update(struct ksegrp *kg) 6314b60e324SJeff Roberson { 6327cd0f833SJeff Roberson /* XXX Fixme, use a linear algorithm and not a while loop. */ 6337cd0f833SJeff Roberson while ((kg->kg_runtime + kg->kg_slptime) > SCHED_SLP_RUN_MAX) { 6344b60e324SJeff Roberson kg->kg_runtime = (kg->kg_runtime / 5) * 4; 6354b60e324SJeff Roberson kg->kg_slptime = (kg->kg_slptime / 5) * 4; 6364b60e324SJeff Roberson } 6374b60e324SJeff Roberson } 6384b60e324SJeff Roberson 639e1f89c22SJeff Roberson static int 640e1f89c22SJeff Roberson sched_interact_score(struct ksegrp *kg) 641e1f89c22SJeff Roberson { 642210491d3SJeff Roberson int div; 643e1f89c22SJeff Roberson 644e1f89c22SJeff Roberson if (kg->kg_runtime > kg->kg_slptime) { 645210491d3SJeff Roberson div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF); 646210491d3SJeff Roberson return (SCHED_INTERACT_HALF + 647210491d3SJeff Roberson (SCHED_INTERACT_HALF - (kg->kg_slptime / div))); 648210491d3SJeff Roberson } if (kg->kg_slptime > kg->kg_runtime) { 649210491d3SJeff Roberson div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF); 650210491d3SJeff Roberson return (kg->kg_runtime / div); 651e1f89c22SJeff Roberson } 652e1f89c22SJeff Roberson 653210491d3SJeff Roberson /* 654210491d3SJeff Roberson * This can happen if slptime and runtime are 0. 655210491d3SJeff Roberson */ 656210491d3SJeff Roberson return (0); 657e1f89c22SJeff Roberson 658e1f89c22SJeff Roberson } 659e1f89c22SJeff Roberson 66015dc847eSJeff Roberson /* 66115dc847eSJeff Roberson * This is only somewhat accurate since given many processes of the same 66215dc847eSJeff Roberson * priority they will switch when their slices run out, which will be 66315dc847eSJeff Roberson * at most SCHED_SLICE_MAX. 66415dc847eSJeff Roberson */ 66535e6168fSJeff Roberson int 66635e6168fSJeff Roberson sched_rr_interval(void) 66735e6168fSJeff Roberson { 66835e6168fSJeff Roberson return (SCHED_SLICE_MAX); 66935e6168fSJeff Roberson } 67035e6168fSJeff Roberson 67135e6168fSJeff Roberson void 67235e6168fSJeff Roberson sched_pctcpu_update(struct kse *ke) 67335e6168fSJeff Roberson { 67435e6168fSJeff Roberson /* 67535e6168fSJeff Roberson * Adjust counters and watermark for pctcpu calc. 676210491d3SJeff Roberson */ 677210491d3SJeff Roberson 678210491d3SJeff Roberson /* 67965c8760dSJeff Roberson * Shift the tick count out so that the divide doesn't round away 68065c8760dSJeff Roberson * our results. 68165c8760dSJeff Roberson */ 68265c8760dSJeff Roberson ke->ke_ticks <<= 10; 68335e6168fSJeff Roberson ke->ke_ticks = (ke->ke_ticks / (ke->ke_ltick - ke->ke_ftick)) * 68435e6168fSJeff Roberson SCHED_CPU_TICKS; 68565c8760dSJeff Roberson ke->ke_ticks >>= 10; 68635e6168fSJeff Roberson ke->ke_ltick = ticks; 68735e6168fSJeff Roberson ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 68835e6168fSJeff Roberson } 68935e6168fSJeff Roberson 69035e6168fSJeff Roberson #ifdef SMP 6915d7ef00cSJeff Roberson /* XXX Should be changed to kseq_load_lowest() */ 69235e6168fSJeff Roberson int 69335e6168fSJeff Roberson sched_pickcpu(void) 69435e6168fSJeff Roberson { 6950a016a05SJeff Roberson struct kseq *kseq; 69635e6168fSJeff Roberson int load; 6970a016a05SJeff Roberson int cpu; 69835e6168fSJeff Roberson int i; 69935e6168fSJeff Roberson 700b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 70135e6168fSJeff Roberson if (!smp_started) 70235e6168fSJeff Roberson return (0); 70335e6168fSJeff Roberson 7040a016a05SJeff Roberson load = 0; 7050a016a05SJeff Roberson cpu = 0; 70635e6168fSJeff Roberson 70735e6168fSJeff Roberson for (i = 0; i < mp_maxid; i++) { 70835e6168fSJeff Roberson if (CPU_ABSENT(i)) 70935e6168fSJeff Roberson continue; 7100a016a05SJeff Roberson kseq = KSEQ_CPU(i); 71115dc847eSJeff Roberson if (kseq->ksq_load < load) { 71235e6168fSJeff Roberson cpu = i; 71315dc847eSJeff Roberson load = kseq->ksq_load; 71435e6168fSJeff Roberson } 71535e6168fSJeff Roberson } 71635e6168fSJeff Roberson 71735e6168fSJeff Roberson CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu); 71835e6168fSJeff Roberson return (cpu); 71935e6168fSJeff Roberson } 72035e6168fSJeff Roberson #else 72135e6168fSJeff Roberson int 72235e6168fSJeff Roberson sched_pickcpu(void) 72335e6168fSJeff Roberson { 72435e6168fSJeff Roberson return (0); 72535e6168fSJeff Roberson } 72635e6168fSJeff Roberson #endif 72735e6168fSJeff Roberson 72835e6168fSJeff Roberson void 72935e6168fSJeff Roberson sched_prio(struct thread *td, u_char prio) 73035e6168fSJeff Roberson { 73135e6168fSJeff Roberson struct kse *ke; 73235e6168fSJeff Roberson struct runq *rq; 73335e6168fSJeff Roberson 73435e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 73535e6168fSJeff Roberson ke = td->td_kse; 73635e6168fSJeff Roberson td->td_priority = prio; 73735e6168fSJeff Roberson 73835e6168fSJeff Roberson if (TD_ON_RUNQ(td)) { 73935e6168fSJeff Roberson rq = ke->ke_runq; 74035e6168fSJeff Roberson 74135e6168fSJeff Roberson runq_remove(rq, ke); 74235e6168fSJeff Roberson runq_add(rq, ke); 74335e6168fSJeff Roberson } 74435e6168fSJeff Roberson } 74535e6168fSJeff Roberson 74635e6168fSJeff Roberson void 74735e6168fSJeff Roberson sched_switchout(struct thread *td) 74835e6168fSJeff Roberson { 74935e6168fSJeff Roberson struct kse *ke; 75035e6168fSJeff Roberson 75135e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 75235e6168fSJeff Roberson 75335e6168fSJeff Roberson ke = td->td_kse; 75435e6168fSJeff Roberson 75535e6168fSJeff Roberson td->td_last_kse = ke; 756060563ecSJulian Elischer td->td_lastcpu = td->td_oncpu; 757060563ecSJulian Elischer td->td_oncpu = NOCPU; 7584a338afdSJulian Elischer td->td_flags &= ~TDF_NEEDRESCHED; 75935e6168fSJeff Roberson 76035e6168fSJeff Roberson if (TD_IS_RUNNING(td)) { 761210491d3SJeff Roberson /* 762210491d3SJeff Roberson * This queue is always correct except for idle threads which 763210491d3SJeff Roberson * have a higher priority due to priority propagation. 764210491d3SJeff Roberson */ 765210491d3SJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE && 766210491d3SJeff Roberson ke->ke_thread->td_priority > PRI_MIN_IDLE) 767210491d3SJeff Roberson ke->ke_runq = KSEQ_SELF()->ksq_curr; 76815dc847eSJeff Roberson runq_add(ke->ke_runq, ke); 76915dc847eSJeff Roberson /* setrunqueue(td); */ 77035e6168fSJeff Roberson return; 771e1f89c22SJeff Roberson } 77215dc847eSJeff Roberson if (ke->ke_runq) 77315dc847eSJeff Roberson kseq_rem(KSEQ_CPU(ke->ke_cpu), ke); 77435e6168fSJeff Roberson /* 77535e6168fSJeff Roberson * We will not be on the run queue. So we must be 77635e6168fSJeff Roberson * sleeping or similar. 77735e6168fSJeff Roberson */ 7780e2a4d3aSDavid Xu if (td->td_proc->p_flag & P_SA) 77935e6168fSJeff Roberson kse_reassign(ke); 78035e6168fSJeff Roberson } 78135e6168fSJeff Roberson 78235e6168fSJeff Roberson void 78335e6168fSJeff Roberson sched_switchin(struct thread *td) 78435e6168fSJeff Roberson { 78535e6168fSJeff Roberson /* struct kse *ke = td->td_kse; */ 78635e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 78735e6168fSJeff Roberson 788060563ecSJulian Elischer td->td_oncpu = PCPU_GET(cpuid); 78935e6168fSJeff Roberson } 79035e6168fSJeff Roberson 79135e6168fSJeff Roberson void 79235e6168fSJeff Roberson sched_nice(struct ksegrp *kg, int nice) 79335e6168fSJeff Roberson { 79415dc847eSJeff Roberson struct kse *ke; 79535e6168fSJeff Roberson struct thread *td; 79615dc847eSJeff Roberson struct kseq *kseq; 79735e6168fSJeff Roberson 7980b5318c8SJohn Baldwin PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); 7990b5318c8SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 80015dc847eSJeff Roberson /* 80115dc847eSJeff Roberson * We need to adjust the nice counts for running KSEs. 80215dc847eSJeff Roberson */ 80315dc847eSJeff Roberson if (kg->kg_pri_class == PRI_TIMESHARE) 80415dc847eSJeff Roberson FOREACH_KSE_IN_GROUP(kg, ke) { 805d07ac847SJeff Roberson if (ke->ke_runq == NULL) 80615dc847eSJeff Roberson continue; 80715dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 80815dc847eSJeff Roberson kseq_nice_rem(kseq, kg->kg_nice); 80915dc847eSJeff Roberson kseq_nice_add(kseq, nice); 81015dc847eSJeff Roberson } 81135e6168fSJeff Roberson kg->kg_nice = nice; 81235e6168fSJeff Roberson sched_priority(kg); 81315dc847eSJeff Roberson FOREACH_THREAD_IN_GROUP(kg, td) 8144a338afdSJulian Elischer td->td_flags |= TDF_NEEDRESCHED; 81535e6168fSJeff Roberson } 81635e6168fSJeff Roberson 81735e6168fSJeff Roberson void 81835e6168fSJeff Roberson sched_sleep(struct thread *td, u_char prio) 81935e6168fSJeff Roberson { 82035e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 82135e6168fSJeff Roberson 82235e6168fSJeff Roberson td->td_slptime = ticks; 82335e6168fSJeff Roberson td->td_priority = prio; 82435e6168fSJeff Roberson 82515dc847eSJeff Roberson CTR2(KTR_ULE, "sleep kse %p (tick: %d)", 82615dc847eSJeff Roberson td->td_kse, td->td_slptime); 82735e6168fSJeff Roberson } 82835e6168fSJeff Roberson 82935e6168fSJeff Roberson void 83035e6168fSJeff Roberson sched_wakeup(struct thread *td) 83135e6168fSJeff Roberson { 83235e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 83335e6168fSJeff Roberson 83435e6168fSJeff Roberson /* 83535e6168fSJeff Roberson * Let the kseg know how long we slept for. This is because process 83635e6168fSJeff Roberson * interactivity behavior is modeled in the kseg. 83735e6168fSJeff Roberson */ 83835e6168fSJeff Roberson if (td->td_slptime) { 839f1e8dc4aSJeff Roberson struct ksegrp *kg; 84015dc847eSJeff Roberson int hzticks; 841f1e8dc4aSJeff Roberson 842f1e8dc4aSJeff Roberson kg = td->td_ksegrp; 84315dc847eSJeff Roberson hzticks = ticks - td->td_slptime; 84415dc847eSJeff Roberson kg->kg_slptime += hzticks << 10; 8454b60e324SJeff Roberson sched_interact_update(kg); 846f1e8dc4aSJeff Roberson sched_priority(kg); 8474b60e324SJeff Roberson if (td->td_kse) 8484b60e324SJeff Roberson sched_slice(td->td_kse); 84915dc847eSJeff Roberson CTR2(KTR_ULE, "wakeup kse %p (%d ticks)", 85015dc847eSJeff Roberson td->td_kse, hzticks); 85135e6168fSJeff Roberson td->td_slptime = 0; 852f1e8dc4aSJeff Roberson } 85335e6168fSJeff Roberson setrunqueue(td); 85435e6168fSJeff Roberson if (td->td_priority < curthread->td_priority) 8554a338afdSJulian Elischer curthread->td_flags |= TDF_NEEDRESCHED; 85635e6168fSJeff Roberson } 85735e6168fSJeff Roberson 85835e6168fSJeff Roberson /* 85935e6168fSJeff Roberson * Penalize the parent for creating a new child and initialize the child's 86035e6168fSJeff Roberson * priority. 86135e6168fSJeff Roberson */ 86235e6168fSJeff Roberson void 86315dc847eSJeff Roberson sched_fork(struct proc *p, struct proc *p1) 86435e6168fSJeff Roberson { 86535e6168fSJeff Roberson 86635e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 86735e6168fSJeff Roberson 86815dc847eSJeff Roberson sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 86915dc847eSJeff Roberson sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 87015dc847eSJeff Roberson sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 87115dc847eSJeff Roberson } 87215dc847eSJeff Roberson 87315dc847eSJeff Roberson void 87415dc847eSJeff Roberson sched_fork_kse(struct kse *ke, struct kse *child) 87515dc847eSJeff Roberson { 8762056d0a1SJohn Baldwin 877210491d3SJeff Roberson child->ke_slice = 1; /* Attempt to quickly learn interactivity. */ 87815dc847eSJeff Roberson child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */ 87915dc847eSJeff Roberson child->ke_runq = NULL; 88015dc847eSJeff Roberson 88115dc847eSJeff Roberson /* 88215dc847eSJeff Roberson * Claim that we've been running for one second for statistical 88315dc847eSJeff Roberson * purposes. 88415dc847eSJeff Roberson */ 88515dc847eSJeff Roberson child->ke_ticks = 0; 88615dc847eSJeff Roberson child->ke_ltick = ticks; 88715dc847eSJeff Roberson child->ke_ftick = ticks - hz; 88815dc847eSJeff Roberson } 88915dc847eSJeff Roberson 89015dc847eSJeff Roberson void 89115dc847eSJeff Roberson sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 89215dc847eSJeff Roberson { 8932056d0a1SJohn Baldwin 8942056d0a1SJohn Baldwin PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED); 89535e6168fSJeff Roberson /* XXX Need something better here */ 896210491d3SJeff Roberson 897a91172adSJeff Roberson child->kg_slptime = kg->kg_slptime / SCHED_SLP_RUN_THROTTLE; 898a91172adSJeff Roberson child->kg_runtime = kg->kg_runtime / SCHED_SLP_RUN_THROTTLE; 8994b60e324SJeff Roberson kg->kg_runtime += tickincr << 10; 9004b60e324SJeff Roberson sched_interact_update(kg); 90115dc847eSJeff Roberson 90235e6168fSJeff Roberson child->kg_user_pri = kg->kg_user_pri; 90315dc847eSJeff Roberson child->kg_nice = kg->kg_nice; 904c9f25d8fSJeff Roberson } 905c9f25d8fSJeff Roberson 90615dc847eSJeff Roberson void 90715dc847eSJeff Roberson sched_fork_thread(struct thread *td, struct thread *child) 90815dc847eSJeff Roberson { 90915dc847eSJeff Roberson } 91015dc847eSJeff Roberson 91115dc847eSJeff Roberson void 91215dc847eSJeff Roberson sched_class(struct ksegrp *kg, int class) 91315dc847eSJeff Roberson { 91415dc847eSJeff Roberson struct kseq *kseq; 91515dc847eSJeff Roberson struct kse *ke; 91615dc847eSJeff Roberson 9172056d0a1SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 91815dc847eSJeff Roberson if (kg->kg_pri_class == class) 91915dc847eSJeff Roberson return; 92015dc847eSJeff Roberson 92115dc847eSJeff Roberson FOREACH_KSE_IN_GROUP(kg, ke) { 92215dc847eSJeff Roberson if (ke->ke_state != KES_ONRUNQ && 92315dc847eSJeff Roberson ke->ke_state != KES_THREAD) 92415dc847eSJeff Roberson continue; 92515dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 92615dc847eSJeff Roberson 927b5c4c4a7SJeff Roberson kseq->ksq_loads[PRI_BASE(kg->kg_pri_class)]--; 928b5c4c4a7SJeff Roberson kseq->ksq_loads[PRI_BASE(class)]++; 92915dc847eSJeff Roberson 93015dc847eSJeff Roberson if (kg->kg_pri_class == PRI_TIMESHARE) 93115dc847eSJeff Roberson kseq_nice_rem(kseq, kg->kg_nice); 93215dc847eSJeff Roberson else if (class == PRI_TIMESHARE) 93315dc847eSJeff Roberson kseq_nice_add(kseq, kg->kg_nice); 93415dc847eSJeff Roberson } 93515dc847eSJeff Roberson 93615dc847eSJeff Roberson kg->kg_pri_class = class; 93735e6168fSJeff Roberson } 93835e6168fSJeff Roberson 93935e6168fSJeff Roberson /* 94035e6168fSJeff Roberson * Return some of the child's priority and interactivity to the parent. 94135e6168fSJeff Roberson */ 94235e6168fSJeff Roberson void 94315dc847eSJeff Roberson sched_exit(struct proc *p, struct proc *child) 94435e6168fSJeff Roberson { 94535e6168fSJeff Roberson /* XXX Need something better here */ 94635e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 947141ad61cSJeff Roberson sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child)); 948210491d3SJeff Roberson sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child)); 949141ad61cSJeff Roberson } 950141ad61cSJeff Roberson 951141ad61cSJeff Roberson void 952141ad61cSJeff Roberson sched_exit_kse(struct kse *ke, struct kse *child) 953141ad61cSJeff Roberson { 954141ad61cSJeff Roberson kseq_rem(KSEQ_CPU(child->ke_cpu), child); 955141ad61cSJeff Roberson } 956141ad61cSJeff Roberson 957141ad61cSJeff Roberson void 958141ad61cSJeff Roberson sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 959141ad61cSJeff Roberson { 9604b60e324SJeff Roberson /* kg->kg_slptime += child->kg_slptime; */ 961210491d3SJeff Roberson kg->kg_runtime += child->kg_runtime; 9624b60e324SJeff Roberson sched_interact_update(kg); 963141ad61cSJeff Roberson } 964141ad61cSJeff Roberson 965141ad61cSJeff Roberson void 966141ad61cSJeff Roberson sched_exit_thread(struct thread *td, struct thread *child) 967141ad61cSJeff Roberson { 96835e6168fSJeff Roberson } 96935e6168fSJeff Roberson 97035e6168fSJeff Roberson void 97115dc847eSJeff Roberson sched_clock(struct kse *ke) 97235e6168fSJeff Roberson { 97335e6168fSJeff Roberson struct kseq *kseq; 9740a016a05SJeff Roberson struct ksegrp *kg; 97515dc847eSJeff Roberson struct thread *td; 97615dc847eSJeff Roberson #if 0 97715dc847eSJeff Roberson struct kse *nke; 97815dc847eSJeff Roberson #endif 97935e6168fSJeff Roberson 98015dc847eSJeff Roberson /* 98115dc847eSJeff Roberson * sched_setup() apparently happens prior to stathz being set. We 98215dc847eSJeff Roberson * need to resolve the timers earlier in the boot so we can avoid 98315dc847eSJeff Roberson * calculating this here. 98415dc847eSJeff Roberson */ 98515dc847eSJeff Roberson if (realstathz == 0) { 98615dc847eSJeff Roberson realstathz = stathz ? stathz : hz; 98715dc847eSJeff Roberson tickincr = hz / realstathz; 98815dc847eSJeff Roberson /* 98915dc847eSJeff Roberson * XXX This does not work for values of stathz that are much 99015dc847eSJeff Roberson * larger than hz. 99115dc847eSJeff Roberson */ 99215dc847eSJeff Roberson if (tickincr == 0) 99315dc847eSJeff Roberson tickincr = 1; 99415dc847eSJeff Roberson } 99535e6168fSJeff Roberson 99615dc847eSJeff Roberson td = ke->ke_thread; 99715dc847eSJeff Roberson kg = ke->ke_ksegrp; 99835e6168fSJeff Roberson 9990a016a05SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 10000a016a05SJeff Roberson KASSERT((td != NULL), ("schedclock: null thread pointer")); 10010a016a05SJeff Roberson 10020a016a05SJeff Roberson /* Adjust ticks for pctcpu */ 100365c8760dSJeff Roberson ke->ke_ticks++; 1004d465fb95SJeff Roberson ke->ke_ltick = ticks; 1005a8949de2SJeff Roberson 1006d465fb95SJeff Roberson /* Go up to one second beyond our max and then trim back down */ 1007d465fb95SJeff Roberson if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 1008d465fb95SJeff Roberson sched_pctcpu_update(ke); 1009d465fb95SJeff Roberson 101043fdafb1SJulian Elischer if (td->td_flags & TDF_IDLETD) 101135e6168fSJeff Roberson return; 10120a016a05SJeff Roberson 101315dc847eSJeff Roberson CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)", 101415dc847eSJeff Roberson ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 1015c9f25d8fSJeff Roberson 101635e6168fSJeff Roberson /* 1017a8949de2SJeff Roberson * We only do slicing code for TIMESHARE ksegrps. 1018a8949de2SJeff Roberson */ 1019a8949de2SJeff Roberson if (kg->kg_pri_class != PRI_TIMESHARE) 1020a8949de2SJeff Roberson return; 1021a8949de2SJeff Roberson /* 102215dc847eSJeff Roberson * Check for a higher priority task on the run queue. This can happen 102315dc847eSJeff Roberson * on SMP if another processor woke up a process on our runq. 102435e6168fSJeff Roberson */ 102515dc847eSJeff Roberson kseq = KSEQ_SELF(); 102615dc847eSJeff Roberson #if 0 102715dc847eSJeff Roberson if (kseq->ksq_load > 1 && (nke = kseq_choose(kseq)) != NULL) { 102815dc847eSJeff Roberson if (sched_strict && 102915dc847eSJeff Roberson nke->ke_thread->td_priority < td->td_priority) 103015dc847eSJeff Roberson td->td_flags |= TDF_NEEDRESCHED; 103115dc847eSJeff Roberson else if (nke->ke_thread->td_priority < 103215dc847eSJeff Roberson td->td_priority SCHED_PRIO_SLOP) 103315dc847eSJeff Roberson 103415dc847eSJeff Roberson if (nke->ke_thread->td_priority < td->td_priority) 103515dc847eSJeff Roberson td->td_flags |= TDF_NEEDRESCHED; 103615dc847eSJeff Roberson } 103715dc847eSJeff Roberson #endif 103815dc847eSJeff Roberson /* 103915dc847eSJeff Roberson * We used a tick charge it to the ksegrp so that we can compute our 104015dc847eSJeff Roberson * interactivity. 104115dc847eSJeff Roberson */ 104215dc847eSJeff Roberson kg->kg_runtime += tickincr << 10; 10434b60e324SJeff Roberson sched_interact_update(kg); 1044407b0157SJeff Roberson 104535e6168fSJeff Roberson /* 104635e6168fSJeff Roberson * We used up one time slice. 104735e6168fSJeff Roberson */ 104835e6168fSJeff Roberson ke->ke_slice--; 104915dc847eSJeff Roberson #ifdef SMP 1050c36ccfa2SJeff Roberson kseq->ksq_rslices--; 105115dc847eSJeff Roberson #endif 105215dc847eSJeff Roberson 105315dc847eSJeff Roberson if (ke->ke_slice > 0) 105415dc847eSJeff Roberson return; 105535e6168fSJeff Roberson /* 105615dc847eSJeff Roberson * We're out of time, recompute priorities and requeue. 105735e6168fSJeff Roberson */ 105815dc847eSJeff Roberson kseq_rem(kseq, ke); 1059e1f89c22SJeff Roberson sched_priority(kg); 106015dc847eSJeff Roberson sched_slice(ke); 106115dc847eSJeff Roberson if (SCHED_CURR(kg, ke)) 106215dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 106315dc847eSJeff Roberson else 106415dc847eSJeff Roberson ke->ke_runq = kseq->ksq_next; 106515dc847eSJeff Roberson kseq_add(kseq, ke); 10664a338afdSJulian Elischer td->td_flags |= TDF_NEEDRESCHED; 106735e6168fSJeff Roberson } 106835e6168fSJeff Roberson 106935e6168fSJeff Roberson int 107035e6168fSJeff Roberson sched_runnable(void) 107135e6168fSJeff Roberson { 107235e6168fSJeff Roberson struct kseq *kseq; 1073b90816f1SJeff Roberson int load; 107435e6168fSJeff Roberson 1075b90816f1SJeff Roberson load = 1; 1076b90816f1SJeff Roberson 1077b90816f1SJeff Roberson mtx_lock_spin(&sched_lock); 10780a016a05SJeff Roberson kseq = KSEQ_SELF(); 107935e6168fSJeff Roberson 108015dc847eSJeff Roberson if (kseq->ksq_load) 1081b90816f1SJeff Roberson goto out; 1082c9f25d8fSJeff Roberson #ifdef SMP 10830a016a05SJeff Roberson /* 10840a016a05SJeff Roberson * For SMP we may steal other processor's KSEs. Just search until we 10850a016a05SJeff Roberson * verify that at least on other cpu has a runnable task. 10860a016a05SJeff Roberson */ 1087c9f25d8fSJeff Roberson if (smp_started) { 1088c9f25d8fSJeff Roberson int i; 1089c9f25d8fSJeff Roberson 1090c9f25d8fSJeff Roberson for (i = 0; i < mp_maxid; i++) { 1091c9f25d8fSJeff Roberson if (CPU_ABSENT(i)) 1092c9f25d8fSJeff Roberson continue; 10930a016a05SJeff Roberson kseq = KSEQ_CPU(i); 10947cd650a9SJeff Roberson if (kseq->ksq_load > 1) 1095b90816f1SJeff Roberson goto out; 1096c9f25d8fSJeff Roberson } 1097c9f25d8fSJeff Roberson } 1098c9f25d8fSJeff Roberson #endif 1099b90816f1SJeff Roberson load = 0; 1100b90816f1SJeff Roberson out: 1101b90816f1SJeff Roberson mtx_unlock_spin(&sched_lock); 1102b90816f1SJeff Roberson return (load); 110335e6168fSJeff Roberson } 110435e6168fSJeff Roberson 110535e6168fSJeff Roberson void 110635e6168fSJeff Roberson sched_userret(struct thread *td) 110735e6168fSJeff Roberson { 110835e6168fSJeff Roberson struct ksegrp *kg; 1109210491d3SJeff Roberson struct kseq *kseq; 1110210491d3SJeff Roberson struct kse *ke; 111135e6168fSJeff Roberson 111235e6168fSJeff Roberson kg = td->td_ksegrp; 111335e6168fSJeff Roberson 111435e6168fSJeff Roberson if (td->td_priority != kg->kg_user_pri) { 111535e6168fSJeff Roberson mtx_lock_spin(&sched_lock); 111635e6168fSJeff Roberson td->td_priority = kg->kg_user_pri; 1117210491d3SJeff Roberson kseq = KSEQ_SELF(); 1118210491d3SJeff Roberson if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE && 1119210491d3SJeff Roberson kseq->ksq_load > 1 && 1120210491d3SJeff Roberson (ke = kseq_choose(kseq)) != NULL && 1121210491d3SJeff Roberson ke->ke_thread->td_priority < td->td_priority) 1122210491d3SJeff Roberson curthread->td_flags |= TDF_NEEDRESCHED; 112335e6168fSJeff Roberson mtx_unlock_spin(&sched_lock); 112435e6168fSJeff Roberson } 112535e6168fSJeff Roberson } 112635e6168fSJeff Roberson 1127c9f25d8fSJeff Roberson struct kse * 1128c9f25d8fSJeff Roberson sched_choose(void) 1129c9f25d8fSJeff Roberson { 11300a016a05SJeff Roberson struct kseq *kseq; 1131c9f25d8fSJeff Roberson struct kse *ke; 113215dc847eSJeff Roberson 1133b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 113415dc847eSJeff Roberson #ifdef SMP 1135245f3abfSJeff Roberson retry: 113615dc847eSJeff Roberson #endif 1137c36ccfa2SJeff Roberson kseq = KSEQ_SELF(); 11380a016a05SJeff Roberson ke = kseq_choose(kseq); 113935e6168fSJeff Roberson if (ke) { 114015dc847eSJeff Roberson runq_remove(ke->ke_runq, ke); 114135e6168fSJeff Roberson ke->ke_state = KES_THREAD; 1142245f3abfSJeff Roberson 114315dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 114415dc847eSJeff Roberson CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)", 114515dc847eSJeff Roberson ke, ke->ke_runq, ke->ke_slice, 114615dc847eSJeff Roberson ke->ke_thread->td_priority); 1147245f3abfSJeff Roberson } 114815dc847eSJeff Roberson return (ke); 114935e6168fSJeff Roberson } 115035e6168fSJeff Roberson 1151c9f25d8fSJeff Roberson #ifdef SMP 1152c36ccfa2SJeff Roberson if (smp_started) { 1153c9f25d8fSJeff Roberson /* 1154c9f25d8fSJeff Roberson * Find the cpu with the highest load and steal one proc. 1155c9f25d8fSJeff Roberson */ 1156c36ccfa2SJeff Roberson if ((kseq = kseq_load_highest()) == NULL) 1157c36ccfa2SJeff Roberson return (NULL); 1158c36ccfa2SJeff Roberson 1159c36ccfa2SJeff Roberson /* 1160c36ccfa2SJeff Roberson * Remove this kse from this kseq and runq and then requeue 1161c36ccfa2SJeff Roberson * on the current processor. Then we will dequeue it 1162c36ccfa2SJeff Roberson * normally above. 1163c36ccfa2SJeff Roberson */ 1164356500a3SJeff Roberson kseq_move(kseq, PCPU_GET(cpuid)); 116515dc847eSJeff Roberson goto retry; 1166c9f25d8fSJeff Roberson } 1167c9f25d8fSJeff Roberson #endif 116815dc847eSJeff Roberson 116915dc847eSJeff Roberson return (NULL); 117035e6168fSJeff Roberson } 117135e6168fSJeff Roberson 117235e6168fSJeff Roberson void 117335e6168fSJeff Roberson sched_add(struct kse *ke) 117435e6168fSJeff Roberson { 1175c9f25d8fSJeff Roberson struct kseq *kseq; 117615dc847eSJeff Roberson struct ksegrp *kg; 1177c9f25d8fSJeff Roberson 11785d7ef00cSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 11795d7ef00cSJeff Roberson KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 11805d7ef00cSJeff Roberson KASSERT((ke->ke_thread->td_kse != NULL), 11815d7ef00cSJeff Roberson ("sched_add: No KSE on thread")); 11825d7ef00cSJeff Roberson KASSERT(ke->ke_state != KES_ONRUNQ, 11835d7ef00cSJeff Roberson ("sched_add: kse %p (%s) already in run queue", ke, 11845d7ef00cSJeff Roberson ke->ke_proc->p_comm)); 11855d7ef00cSJeff Roberson KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 11865d7ef00cSJeff Roberson ("sched_add: process swapped out")); 11879bca28a7SJeff Roberson KASSERT(ke->ke_runq == NULL, 11889bca28a7SJeff Roberson ("sched_add: KSE %p is still assigned to a run queue", ke)); 11895d7ef00cSJeff Roberson 119015dc847eSJeff Roberson kg = ke->ke_ksegrp; 119115dc847eSJeff Roberson 1192b5c4c4a7SJeff Roberson switch (PRI_BASE(kg->kg_pri_class)) { 1193a8949de2SJeff Roberson case PRI_ITHD: 1194a8949de2SJeff Roberson case PRI_REALTIME: 1195a6ed4186SJeff Roberson kseq = KSEQ_SELF(); 119615dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 119715dc847eSJeff Roberson ke->ke_slice = SCHED_SLICE_MAX; 11987cd650a9SJeff Roberson ke->ke_cpu = PCPU_GET(cpuid); 1199a8949de2SJeff Roberson break; 1200a8949de2SJeff Roberson case PRI_TIMESHARE: 1201a8949de2SJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 120215dc847eSJeff Roberson if (SCHED_CURR(kg, ke)) 120315dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 120415dc847eSJeff Roberson else 120515dc847eSJeff Roberson ke->ke_runq = kseq->ksq_next; 120615dc847eSJeff Roberson break; 120715dc847eSJeff Roberson case PRI_IDLE: 120815dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 120915dc847eSJeff Roberson /* 121015dc847eSJeff Roberson * This is for priority prop. 121115dc847eSJeff Roberson */ 1212210491d3SJeff Roberson if (ke->ke_thread->td_priority > PRI_MIN_IDLE) 121315dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 121415dc847eSJeff Roberson else 121515dc847eSJeff Roberson ke->ke_runq = &kseq->ksq_idle; 121615dc847eSJeff Roberson ke->ke_slice = SCHED_SLICE_MIN; 121715dc847eSJeff Roberson break; 121815dc847eSJeff Roberson default: 121915dc847eSJeff Roberson panic("Unknown pri class.\n"); 1220a8949de2SJeff Roberson break; 1221a6ed4186SJeff Roberson } 1222a8949de2SJeff Roberson 122335e6168fSJeff Roberson ke->ke_ksegrp->kg_runq_kses++; 122435e6168fSJeff Roberson ke->ke_state = KES_ONRUNQ; 122535e6168fSJeff Roberson 122615dc847eSJeff Roberson runq_add(ke->ke_runq, ke); 12279bca28a7SJeff Roberson kseq_add(kseq, ke); 122835e6168fSJeff Roberson } 122935e6168fSJeff Roberson 123035e6168fSJeff Roberson void 123135e6168fSJeff Roberson sched_rem(struct kse *ke) 123235e6168fSJeff Roberson { 123315dc847eSJeff Roberson struct kseq *kseq; 123415dc847eSJeff Roberson 123535e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 12369bca28a7SJeff Roberson KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 123735e6168fSJeff Roberson 123835e6168fSJeff Roberson ke->ke_state = KES_THREAD; 123935e6168fSJeff Roberson ke->ke_ksegrp->kg_runq_kses--; 124015dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 124115dc847eSJeff Roberson runq_remove(ke->ke_runq, ke); 124215dc847eSJeff Roberson kseq_rem(kseq, ke); 124335e6168fSJeff Roberson } 124435e6168fSJeff Roberson 124535e6168fSJeff Roberson fixpt_t 124635e6168fSJeff Roberson sched_pctcpu(struct kse *ke) 124735e6168fSJeff Roberson { 124835e6168fSJeff Roberson fixpt_t pctcpu; 124935e6168fSJeff Roberson 125035e6168fSJeff Roberson pctcpu = 0; 125135e6168fSJeff Roberson 1252b90816f1SJeff Roberson mtx_lock_spin(&sched_lock); 125335e6168fSJeff Roberson if (ke->ke_ticks) { 125435e6168fSJeff Roberson int rtick; 125535e6168fSJeff Roberson 1256210491d3SJeff Roberson /* 1257210491d3SJeff Roberson * Don't update more frequently than twice a second. Allowing 1258210491d3SJeff Roberson * this causes the cpu usage to decay away too quickly due to 1259210491d3SJeff Roberson * rounding errors. 1260210491d3SJeff Roberson */ 1261210491d3SJeff Roberson if (ke->ke_ltick < (ticks - (hz / 2))) 126235e6168fSJeff Roberson sched_pctcpu_update(ke); 126335e6168fSJeff Roberson 126435e6168fSJeff Roberson /* How many rtick per second ? */ 1265210491d3SJeff Roberson rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); 12667121cce5SScott Long pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 126735e6168fSJeff Roberson } 126835e6168fSJeff Roberson 126935e6168fSJeff Roberson ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1270828e7683SJohn Baldwin mtx_unlock_spin(&sched_lock); 127135e6168fSJeff Roberson 127235e6168fSJeff Roberson return (pctcpu); 127335e6168fSJeff Roberson } 127435e6168fSJeff Roberson 127535e6168fSJeff Roberson int 127635e6168fSJeff Roberson sched_sizeof_kse(void) 127735e6168fSJeff Roberson { 127835e6168fSJeff Roberson return (sizeof(struct kse) + sizeof(struct ke_sched)); 127935e6168fSJeff Roberson } 128035e6168fSJeff Roberson 128135e6168fSJeff Roberson int 128235e6168fSJeff Roberson sched_sizeof_ksegrp(void) 128335e6168fSJeff Roberson { 128435e6168fSJeff Roberson return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 128535e6168fSJeff Roberson } 128635e6168fSJeff Roberson 128735e6168fSJeff Roberson int 128835e6168fSJeff Roberson sched_sizeof_proc(void) 128935e6168fSJeff Roberson { 129035e6168fSJeff Roberson return (sizeof(struct proc)); 129135e6168fSJeff Roberson } 129235e6168fSJeff Roberson 129335e6168fSJeff Roberson int 129435e6168fSJeff Roberson sched_sizeof_thread(void) 129535e6168fSJeff Roberson { 129635e6168fSJeff Roberson return (sizeof(struct thread) + sizeof(struct td_sched)); 129735e6168fSJeff Roberson } 1298