135e6168fSJeff Roberson /*- 215dc847eSJeff Roberson * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 335e6168fSJeff Roberson * All rights reserved. 435e6168fSJeff Roberson * 535e6168fSJeff Roberson * Redistribution and use in source and binary forms, with or without 635e6168fSJeff Roberson * modification, are permitted provided that the following conditions 735e6168fSJeff Roberson * are met: 835e6168fSJeff Roberson * 1. Redistributions of source code must retain the above copyright 935e6168fSJeff Roberson * notice unmodified, this list of conditions, and the following 1035e6168fSJeff Roberson * disclaimer. 1135e6168fSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 1235e6168fSJeff Roberson * notice, this list of conditions and the following disclaimer in the 1335e6168fSJeff Roberson * documentation and/or other materials provided with the distribution. 1435e6168fSJeff Roberson * 1535e6168fSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1635e6168fSJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1735e6168fSJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1835e6168fSJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1935e6168fSJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2035e6168fSJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2135e6168fSJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2235e6168fSJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2335e6168fSJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2435e6168fSJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2535e6168fSJeff Roberson * 2635e6168fSJeff Roberson * $FreeBSD$ 2735e6168fSJeff Roberson */ 2835e6168fSJeff Roberson 2935e6168fSJeff Roberson #include <sys/param.h> 3035e6168fSJeff Roberson #include <sys/systm.h> 3135e6168fSJeff Roberson #include <sys/kernel.h> 3235e6168fSJeff Roberson #include <sys/ktr.h> 3335e6168fSJeff Roberson #include <sys/lock.h> 3435e6168fSJeff Roberson #include <sys/mutex.h> 3535e6168fSJeff Roberson #include <sys/proc.h> 36245f3abfSJeff Roberson #include <sys/resource.h> 3735e6168fSJeff Roberson #include <sys/sched.h> 3835e6168fSJeff Roberson #include <sys/smp.h> 3935e6168fSJeff Roberson #include <sys/sx.h> 4035e6168fSJeff Roberson #include <sys/sysctl.h> 4135e6168fSJeff Roberson #include <sys/sysproto.h> 4235e6168fSJeff Roberson #include <sys/vmmeter.h> 4335e6168fSJeff Roberson #ifdef DDB 4435e6168fSJeff Roberson #include <ddb/ddb.h> 4535e6168fSJeff Roberson #endif 4635e6168fSJeff Roberson #ifdef KTRACE 4735e6168fSJeff Roberson #include <sys/uio.h> 4835e6168fSJeff Roberson #include <sys/ktrace.h> 4935e6168fSJeff Roberson #endif 5035e6168fSJeff Roberson 5135e6168fSJeff Roberson #include <machine/cpu.h> 5235e6168fSJeff Roberson 5315dc847eSJeff Roberson #define KTR_ULE KTR_NFS 5415dc847eSJeff Roberson 5535e6168fSJeff Roberson /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 5635e6168fSJeff Roberson /* XXX This is bogus compatability crap for ps */ 5735e6168fSJeff Roberson static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 5835e6168fSJeff Roberson SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 5935e6168fSJeff Roberson 6035e6168fSJeff Roberson static void sched_setup(void *dummy); 6135e6168fSJeff Roberson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 6235e6168fSJeff Roberson 6315dc847eSJeff Roberson static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED"); 64e1f89c22SJeff Roberson 6515dc847eSJeff Roberson static int sched_strict; 6615dc847eSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, ""); 6715dc847eSJeff Roberson 6815dc847eSJeff Roberson static int slice_min = 1; 6915dc847eSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 7015dc847eSJeff Roberson 7115dc847eSJeff Roberson static int slice_max = 2; 7215dc847eSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 7315dc847eSJeff Roberson 7415dc847eSJeff Roberson int realstathz; 7515dc847eSJeff Roberson int tickincr = 1; 76783caefbSJeff Roberson 77356500a3SJeff Roberson #ifdef SMP 78356500a3SJeff Roberson /* Callout to handle load balancing SMP systems. */ 79356500a3SJeff Roberson static struct callout kseq_lb_callout; 80356500a3SJeff Roberson #endif 81356500a3SJeff Roberson 8235e6168fSJeff Roberson /* 8335e6168fSJeff Roberson * These datastructures are allocated within their parent datastructure but 8435e6168fSJeff Roberson * are scheduler specific. 8535e6168fSJeff Roberson */ 8635e6168fSJeff Roberson 8735e6168fSJeff Roberson struct ke_sched { 8835e6168fSJeff Roberson int ske_slice; 8935e6168fSJeff Roberson struct runq *ske_runq; 9035e6168fSJeff Roberson /* The following variables are only used for pctcpu calculation */ 9135e6168fSJeff Roberson int ske_ltick; /* Last tick that we were running on */ 9235e6168fSJeff Roberson int ske_ftick; /* First tick that we were running on */ 9335e6168fSJeff Roberson int ske_ticks; /* Tick count */ 9415dc847eSJeff Roberson /* CPU that we have affinity for. */ 95cd6e33dfSJeff Roberson u_char ske_cpu; 9635e6168fSJeff Roberson }; 9735e6168fSJeff Roberson #define ke_slice ke_sched->ske_slice 9835e6168fSJeff Roberson #define ke_runq ke_sched->ske_runq 9935e6168fSJeff Roberson #define ke_ltick ke_sched->ske_ltick 10035e6168fSJeff Roberson #define ke_ftick ke_sched->ske_ftick 10135e6168fSJeff Roberson #define ke_ticks ke_sched->ske_ticks 102cd6e33dfSJeff Roberson #define ke_cpu ke_sched->ske_cpu 10335e6168fSJeff Roberson 10435e6168fSJeff Roberson struct kg_sched { 105407b0157SJeff Roberson int skg_slptime; /* Number of ticks we vol. slept */ 106407b0157SJeff Roberson int skg_runtime; /* Number of ticks we were running */ 10735e6168fSJeff Roberson }; 10835e6168fSJeff Roberson #define kg_slptime kg_sched->skg_slptime 109407b0157SJeff Roberson #define kg_runtime kg_sched->skg_runtime 11035e6168fSJeff Roberson 11135e6168fSJeff Roberson struct td_sched { 11235e6168fSJeff Roberson int std_slptime; 11335e6168fSJeff Roberson }; 11435e6168fSJeff Roberson #define td_slptime td_sched->std_slptime 11535e6168fSJeff Roberson 1165d7ef00cSJeff Roberson struct td_sched td_sched; 11735e6168fSJeff Roberson struct ke_sched ke_sched; 11835e6168fSJeff Roberson struct kg_sched kg_sched; 11935e6168fSJeff Roberson 12035e6168fSJeff Roberson struct ke_sched *kse0_sched = &ke_sched; 12135e6168fSJeff Roberson struct kg_sched *ksegrp0_sched = &kg_sched; 12235e6168fSJeff Roberson struct p_sched *proc0_sched = NULL; 12335e6168fSJeff Roberson struct td_sched *thread0_sched = &td_sched; 12435e6168fSJeff Roberson 12535e6168fSJeff Roberson /* 12635e6168fSJeff Roberson * This priority range has 20 priorities on either end that are reachable 12735e6168fSJeff Roberson * only through nice values. 128e1f89c22SJeff Roberson * 129e1f89c22SJeff Roberson * PRI_RANGE: Total priority range for timeshare threads. 130e1f89c22SJeff Roberson * PRI_NRESV: Reserved priorities for nice. 131e1f89c22SJeff Roberson * PRI_BASE: The start of the dynamic range. 132e1f89c22SJeff Roberson * DYN_RANGE: Number of priorities that are available int the dynamic 133e1f89c22SJeff Roberson * priority range. 13435e6168fSJeff Roberson */ 135407b0157SJeff Roberson #define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 136245f3abfSJeff Roberson #define SCHED_PRI_NRESV PRIO_TOTAL 13798c9b132SJeff Roberson #define SCHED_PRI_NHALF (PRIO_TOTAL / 2) 13815dc847eSJeff Roberson #define SCHED_PRI_NTHRESH (SCHED_PRI_NHALF - 1) 139e1f89c22SJeff Roberson #define SCHED_PRI_BASE ((SCHED_PRI_NRESV / 2) + PRI_MIN_TIMESHARE) 140e1f89c22SJeff Roberson #define SCHED_DYN_RANGE (SCHED_PRI_RANGE - SCHED_PRI_NRESV) 14115dc847eSJeff Roberson #define SCHED_PRI_INTERACT(score) \ 14215dc847eSJeff Roberson ((score) * SCHED_DYN_RANGE / SCHED_INTERACT_RANGE) 14335e6168fSJeff Roberson 14435e6168fSJeff Roberson /* 145e1f89c22SJeff Roberson * These determine the interactivity of a process. 14635e6168fSJeff Roberson * 147407b0157SJeff Roberson * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 148407b0157SJeff Roberson * before throttling back. 149e1f89c22SJeff Roberson * SLP_RUN_THROTTLE: Divisor for reducing slp/run time. 150e1f89c22SJeff Roberson * INTERACT_RANGE: Range of interactivity values. Smaller is better. 151e1f89c22SJeff Roberson * INTERACT_HALF: Convenience define, half of the interactivity range. 152e1f89c22SJeff Roberson * INTERACT_THRESH: Threshhold for placement on the current runq. 15335e6168fSJeff Roberson */ 15415dc847eSJeff Roberson #define SCHED_SLP_RUN_MAX ((hz / 10) << 10) 155407b0157SJeff Roberson #define SCHED_SLP_RUN_THROTTLE (10) 156e1f89c22SJeff Roberson #define SCHED_INTERACT_RANGE (100) 157e1f89c22SJeff Roberson #define SCHED_INTERACT_HALF (SCHED_INTERACT_RANGE / 2) 158e1f89c22SJeff Roberson #define SCHED_INTERACT_THRESH (10) 159e1f89c22SJeff Roberson 16035e6168fSJeff Roberson /* 16135e6168fSJeff Roberson * These parameters and macros determine the size of the time slice that is 16235e6168fSJeff Roberson * granted to each thread. 16335e6168fSJeff Roberson * 16435e6168fSJeff Roberson * SLICE_MIN: Minimum time slice granted, in units of ticks. 16535e6168fSJeff Roberson * SLICE_MAX: Maximum time slice granted. 16635e6168fSJeff Roberson * SLICE_RANGE: Range of available time slices scaled by hz. 167245f3abfSJeff Roberson * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 168245f3abfSJeff Roberson * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 16935e6168fSJeff Roberson */ 17015dc847eSJeff Roberson #define SCHED_SLICE_MIN (slice_min) 17115dc847eSJeff Roberson #define SCHED_SLICE_MAX (slice_max) 17235e6168fSJeff Roberson #define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 17335e6168fSJeff Roberson #define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 174245f3abfSJeff Roberson #define SCHED_SLICE_NICE(nice) \ 17515dc847eSJeff Roberson (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_PRI_NTHRESH)) 17635e6168fSJeff Roberson 17735e6168fSJeff Roberson /* 17835e6168fSJeff Roberson * This macro determines whether or not the kse belongs on the current or 17935e6168fSJeff Roberson * next run queue. 180407b0157SJeff Roberson * 181407b0157SJeff Roberson * XXX nice value should effect how interactive a kg is. 18235e6168fSJeff Roberson */ 18315dc847eSJeff Roberson #define SCHED_INTERACTIVE(kg) \ 18415dc847eSJeff Roberson (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 185a5f099d0SJeff Roberson #define SCHED_CURR(kg, ke) \ 18615dc847eSJeff Roberson (ke->ke_thread->td_priority < PRI_MIN_TIMESHARE || SCHED_INTERACTIVE(kg)) 18735e6168fSJeff Roberson 18835e6168fSJeff Roberson /* 18935e6168fSJeff Roberson * Cpu percentage computation macros and defines. 19035e6168fSJeff Roberson * 19135e6168fSJeff Roberson * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 19235e6168fSJeff Roberson * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 19335e6168fSJeff Roberson */ 19435e6168fSJeff Roberson 1955053d272SJeff Roberson #define SCHED_CPU_TIME 10 19635e6168fSJeff Roberson #define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 19735e6168fSJeff Roberson 19835e6168fSJeff Roberson /* 19915dc847eSJeff Roberson * kseq - per processor runqs and statistics. 20035e6168fSJeff Roberson */ 20135e6168fSJeff Roberson 20215dc847eSJeff Roberson #define KSEQ_NCLASS (PRI_IDLE + 1) /* Number of run classes. */ 20315dc847eSJeff Roberson 20435e6168fSJeff Roberson struct kseq { 205a8949de2SJeff Roberson struct runq ksq_idle; /* Queue of IDLE threads. */ 20615dc847eSJeff Roberson struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 20715dc847eSJeff Roberson struct runq *ksq_next; /* Next timeshare queue. */ 20815dc847eSJeff Roberson struct runq *ksq_curr; /* Current queue. */ 20915dc847eSJeff Roberson int ksq_loads[KSEQ_NCLASS]; /* Load for each class */ 21015dc847eSJeff Roberson int ksq_load; /* Aggregate load. */ 21115dc847eSJeff Roberson short ksq_nice[PRIO_TOTAL + 1]; /* KSEs in each nice bin. */ 21215dc847eSJeff Roberson short ksq_nicemin; /* Least nice. */ 2135d7ef00cSJeff Roberson #ifdef SMP 2145d7ef00cSJeff Roberson unsigned int ksq_rslices; /* Slices on run queue */ 2155d7ef00cSJeff Roberson #endif 21635e6168fSJeff Roberson }; 21735e6168fSJeff Roberson 21835e6168fSJeff Roberson /* 21935e6168fSJeff Roberson * One kse queue per processor. 22035e6168fSJeff Roberson */ 2210a016a05SJeff Roberson #ifdef SMP 22235e6168fSJeff Roberson struct kseq kseq_cpu[MAXCPU]; 2230a016a05SJeff Roberson #define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) 2240a016a05SJeff Roberson #define KSEQ_CPU(x) (&kseq_cpu[(x)]) 2250a016a05SJeff Roberson #else 2260a016a05SJeff Roberson struct kseq kseq_cpu; 2270a016a05SJeff Roberson #define KSEQ_SELF() (&kseq_cpu) 2280a016a05SJeff Roberson #define KSEQ_CPU(x) (&kseq_cpu) 2290a016a05SJeff Roberson #endif 23035e6168fSJeff Roberson 231245f3abfSJeff Roberson static void sched_slice(struct kse *ke); 23215dc847eSJeff Roberson static void sched_priority(struct ksegrp *kg); 233e1f89c22SJeff Roberson static int sched_interact_score(struct ksegrp *kg); 23435e6168fSJeff Roberson void sched_pctcpu_update(struct kse *ke); 23535e6168fSJeff Roberson int sched_pickcpu(void); 23635e6168fSJeff Roberson 2375d7ef00cSJeff Roberson /* Operations on per processor queues */ 2380a016a05SJeff Roberson static struct kse * kseq_choose(struct kseq *kseq); 2390a016a05SJeff Roberson static void kseq_setup(struct kseq *kseq); 240a8949de2SJeff Roberson static void kseq_add(struct kseq *kseq, struct kse *ke); 24115dc847eSJeff Roberson static void kseq_rem(struct kseq *kseq, struct kse *ke); 24215dc847eSJeff Roberson static void kseq_nice_add(struct kseq *kseq, int nice); 24315dc847eSJeff Roberson static void kseq_nice_rem(struct kseq *kseq, int nice); 2447cd650a9SJeff Roberson void kseq_print(int cpu); 2455d7ef00cSJeff Roberson #ifdef SMP 2465d7ef00cSJeff Roberson struct kseq * kseq_load_highest(void); 247356500a3SJeff Roberson void kseq_balance(void *arg); 248356500a3SJeff Roberson void kseq_move(struct kseq *from, int cpu); 2495d7ef00cSJeff Roberson #endif 2505d7ef00cSJeff Roberson 25115dc847eSJeff Roberson void 2527cd650a9SJeff Roberson kseq_print(int cpu) 25315dc847eSJeff Roberson { 2547cd650a9SJeff Roberson struct kseq *kseq; 25515dc847eSJeff Roberson int i; 25615dc847eSJeff Roberson 2577cd650a9SJeff Roberson kseq = KSEQ_CPU(cpu); 25815dc847eSJeff Roberson 25915dc847eSJeff Roberson printf("kseq:\n"); 26015dc847eSJeff Roberson printf("\tload: %d\n", kseq->ksq_load); 26115dc847eSJeff Roberson printf("\tload ITHD: %d\n", kseq->ksq_loads[PRI_ITHD]); 26215dc847eSJeff Roberson printf("\tload REALTIME: %d\n", kseq->ksq_loads[PRI_REALTIME]); 26315dc847eSJeff Roberson printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]); 26415dc847eSJeff Roberson printf("\tload IDLE: %d\n", kseq->ksq_loads[PRI_IDLE]); 26515dc847eSJeff Roberson printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 26615dc847eSJeff Roberson printf("\tnice counts:\n"); 26715dc847eSJeff Roberson for (i = 0; i < PRIO_TOTAL + 1; i++) 26815dc847eSJeff Roberson if (kseq->ksq_nice[i]) 26915dc847eSJeff Roberson printf("\t\t%d = %d\n", 27015dc847eSJeff Roberson i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 27115dc847eSJeff Roberson } 27215dc847eSJeff Roberson 273a8949de2SJeff Roberson static void 2745d7ef00cSJeff Roberson kseq_add(struct kseq *kseq, struct kse *ke) 2755d7ef00cSJeff Roberson { 276b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 277b5c4c4a7SJeff Roberson kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]++; 27815dc847eSJeff Roberson kseq->ksq_load++; 27915dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 28015dc847eSJeff Roberson CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 28115dc847eSJeff Roberson ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 28215dc847eSJeff Roberson ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); 28315dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 28415dc847eSJeff Roberson kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); 2855d7ef00cSJeff Roberson #ifdef SMP 2865d7ef00cSJeff Roberson kseq->ksq_rslices += ke->ke_slice; 2875d7ef00cSJeff Roberson #endif 2885d7ef00cSJeff Roberson } 28915dc847eSJeff Roberson 290a8949de2SJeff Roberson static void 2915d7ef00cSJeff Roberson kseq_rem(struct kseq *kseq, struct kse *ke) 2925d7ef00cSJeff Roberson { 293b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 294b5c4c4a7SJeff Roberson kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]--; 29515dc847eSJeff Roberson kseq->ksq_load--; 29615dc847eSJeff Roberson ke->ke_runq = NULL; 29715dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 29815dc847eSJeff Roberson kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); 2995d7ef00cSJeff Roberson #ifdef SMP 3005d7ef00cSJeff Roberson kseq->ksq_rslices -= ke->ke_slice; 3015d7ef00cSJeff Roberson #endif 3025d7ef00cSJeff Roberson } 3035d7ef00cSJeff Roberson 30415dc847eSJeff Roberson static void 30515dc847eSJeff Roberson kseq_nice_add(struct kseq *kseq, int nice) 30615dc847eSJeff Roberson { 307b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 30815dc847eSJeff Roberson /* Normalize to zero. */ 30915dc847eSJeff Roberson kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 310b90816f1SJeff Roberson if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 1) 31115dc847eSJeff Roberson kseq->ksq_nicemin = nice; 31215dc847eSJeff Roberson } 31315dc847eSJeff Roberson 31415dc847eSJeff Roberson static void 31515dc847eSJeff Roberson kseq_nice_rem(struct kseq *kseq, int nice) 31615dc847eSJeff Roberson { 31715dc847eSJeff Roberson int n; 31815dc847eSJeff Roberson 319b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 32015dc847eSJeff Roberson /* Normalize to zero. */ 32115dc847eSJeff Roberson n = nice + SCHED_PRI_NHALF; 32215dc847eSJeff Roberson kseq->ksq_nice[n]--; 32315dc847eSJeff Roberson KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 32415dc847eSJeff Roberson 32515dc847eSJeff Roberson /* 32615dc847eSJeff Roberson * If this wasn't the smallest nice value or there are more in 32715dc847eSJeff Roberson * this bucket we can just return. Otherwise we have to recalculate 32815dc847eSJeff Roberson * the smallest nice. 32915dc847eSJeff Roberson */ 33015dc847eSJeff Roberson if (nice != kseq->ksq_nicemin || 33115dc847eSJeff Roberson kseq->ksq_nice[n] != 0 || 33215dc847eSJeff Roberson kseq->ksq_loads[PRI_TIMESHARE] == 0) 33315dc847eSJeff Roberson return; 33415dc847eSJeff Roberson 33515dc847eSJeff Roberson for (; n < SCHED_PRI_NRESV + 1; n++) 33615dc847eSJeff Roberson if (kseq->ksq_nice[n]) { 33715dc847eSJeff Roberson kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 33815dc847eSJeff Roberson return; 33915dc847eSJeff Roberson } 34015dc847eSJeff Roberson } 34115dc847eSJeff Roberson 3425d7ef00cSJeff Roberson #ifdef SMP 343356500a3SJeff Roberson /* 344356500a3SJeff Roberson * kseq_balance is a simple CPU load balancing algorithm. It operates by 345356500a3SJeff Roberson * finding the least loaded and most loaded cpu and equalizing their load 346356500a3SJeff Roberson * by migrating some processes. 347356500a3SJeff Roberson * 348356500a3SJeff Roberson * Dealing only with two CPUs at a time has two advantages. Firstly, most 349356500a3SJeff Roberson * installations will only have 2 cpus. Secondly, load balancing too much at 350356500a3SJeff Roberson * once can have an unpleasant effect on the system. The scheduler rarely has 351356500a3SJeff Roberson * enough information to make perfect decisions. So this algorithm chooses 352356500a3SJeff Roberson * algorithm simplicity and more gradual effects on load in larger systems. 353356500a3SJeff Roberson * 354356500a3SJeff Roberson * It could be improved by considering the priorities and slices assigned to 355356500a3SJeff Roberson * each task prior to balancing them. There are many pathological cases with 356356500a3SJeff Roberson * any approach and so the semi random algorithm below may work as well as any. 357356500a3SJeff Roberson * 358356500a3SJeff Roberson */ 359356500a3SJeff Roberson void 360356500a3SJeff Roberson kseq_balance(void *arg) 361356500a3SJeff Roberson { 362356500a3SJeff Roberson struct kseq *kseq; 363356500a3SJeff Roberson int high_load; 364356500a3SJeff Roberson int low_load; 365356500a3SJeff Roberson int high_cpu; 366356500a3SJeff Roberson int low_cpu; 367356500a3SJeff Roberson int move; 368356500a3SJeff Roberson int diff; 369356500a3SJeff Roberson int i; 370356500a3SJeff Roberson 371356500a3SJeff Roberson high_cpu = 0; 372356500a3SJeff Roberson low_cpu = 0; 373356500a3SJeff Roberson high_load = 0; 374356500a3SJeff Roberson low_load = -1; 375356500a3SJeff Roberson 376356500a3SJeff Roberson mtx_lock_spin(&sched_lock); 377356500a3SJeff Roberson for (i = 0; i < mp_maxid; i++) { 378356500a3SJeff Roberson if (CPU_ABSENT(i)) 379356500a3SJeff Roberson continue; 380356500a3SJeff Roberson kseq = KSEQ_CPU(i); 381356500a3SJeff Roberson if (kseq->ksq_load > high_load) { 382356500a3SJeff Roberson high_load = kseq->ksq_load; 383356500a3SJeff Roberson high_cpu = i; 384356500a3SJeff Roberson } 385356500a3SJeff Roberson if (low_load == -1 || kseq->ksq_load < low_load) { 386356500a3SJeff Roberson low_load = kseq->ksq_load; 387356500a3SJeff Roberson low_cpu = i; 388356500a3SJeff Roberson } 389356500a3SJeff Roberson } 390356500a3SJeff Roberson 391356500a3SJeff Roberson /* 392356500a3SJeff Roberson * Nothing to do. 393356500a3SJeff Roberson */ 394356500a3SJeff Roberson if (high_load < 2 || low_load == high_load) 395356500a3SJeff Roberson goto out; 396356500a3SJeff Roberson 397356500a3SJeff Roberson diff = high_load - low_load; 398356500a3SJeff Roberson move = diff / 2; 399356500a3SJeff Roberson if (diff & 0x1) 400356500a3SJeff Roberson move++; 401356500a3SJeff Roberson 402356500a3SJeff Roberson for (i = 0; i < move; i++) 403356500a3SJeff Roberson kseq_move(KSEQ_CPU(high_cpu), low_cpu); 404356500a3SJeff Roberson 405356500a3SJeff Roberson out: 406356500a3SJeff Roberson mtx_unlock_spin(&sched_lock); 407356500a3SJeff Roberson callout_reset(&kseq_lb_callout, hz, kseq_balance, NULL); 408356500a3SJeff Roberson 409356500a3SJeff Roberson return; 410356500a3SJeff Roberson } 411356500a3SJeff Roberson 4125d7ef00cSJeff Roberson struct kseq * 4135d7ef00cSJeff Roberson kseq_load_highest(void) 4145d7ef00cSJeff Roberson { 4155d7ef00cSJeff Roberson struct kseq *kseq; 4165d7ef00cSJeff Roberson int load; 4175d7ef00cSJeff Roberson int cpu; 4185d7ef00cSJeff Roberson int i; 4195d7ef00cSJeff Roberson 420b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 4215d7ef00cSJeff Roberson cpu = 0; 4225d7ef00cSJeff Roberson load = 0; 4235d7ef00cSJeff Roberson 4245d7ef00cSJeff Roberson for (i = 0; i < mp_maxid; i++) { 4255d7ef00cSJeff Roberson if (CPU_ABSENT(i)) 4265d7ef00cSJeff Roberson continue; 4275d7ef00cSJeff Roberson kseq = KSEQ_CPU(i); 42815dc847eSJeff Roberson if (kseq->ksq_load > load) { 42915dc847eSJeff Roberson load = kseq->ksq_load; 4305d7ef00cSJeff Roberson cpu = i; 4315d7ef00cSJeff Roberson } 4325d7ef00cSJeff Roberson } 43358177de2SJeff Roberson if (load > 1) 4345d7ef00cSJeff Roberson return (KSEQ_CPU(cpu)); 4355d7ef00cSJeff Roberson 4365d7ef00cSJeff Roberson return (NULL); 4375d7ef00cSJeff Roberson } 438356500a3SJeff Roberson 439356500a3SJeff Roberson void 440356500a3SJeff Roberson kseq_move(struct kseq *from, int cpu) 441356500a3SJeff Roberson { 442356500a3SJeff Roberson struct kse *ke; 443356500a3SJeff Roberson 444356500a3SJeff Roberson ke = kseq_choose(from); 445356500a3SJeff Roberson runq_remove(ke->ke_runq, ke); 446356500a3SJeff Roberson ke->ke_state = KES_THREAD; 447356500a3SJeff Roberson kseq_rem(from, ke); 448356500a3SJeff Roberson 449356500a3SJeff Roberson ke->ke_cpu = cpu; 450356500a3SJeff Roberson sched_add(ke); 451356500a3SJeff Roberson } 4525d7ef00cSJeff Roberson #endif 4535d7ef00cSJeff Roberson 4545d7ef00cSJeff Roberson struct kse * 4555d7ef00cSJeff Roberson kseq_choose(struct kseq *kseq) 4565d7ef00cSJeff Roberson { 4575d7ef00cSJeff Roberson struct kse *ke; 4585d7ef00cSJeff Roberson struct runq *swap; 4595d7ef00cSJeff Roberson 460b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 46115dc847eSJeff Roberson swap = NULL; 462a8949de2SJeff Roberson 46315dc847eSJeff Roberson for (;;) { 46415dc847eSJeff Roberson ke = runq_choose(kseq->ksq_curr); 46515dc847eSJeff Roberson if (ke == NULL) { 46615dc847eSJeff Roberson /* 46715dc847eSJeff Roberson * We already swaped once and didn't get anywhere. 46815dc847eSJeff Roberson */ 46915dc847eSJeff Roberson if (swap) 47015dc847eSJeff Roberson break; 4715d7ef00cSJeff Roberson swap = kseq->ksq_curr; 4725d7ef00cSJeff Roberson kseq->ksq_curr = kseq->ksq_next; 4735d7ef00cSJeff Roberson kseq->ksq_next = swap; 47415dc847eSJeff Roberson continue; 475a8949de2SJeff Roberson } 47615dc847eSJeff Roberson /* 47715dc847eSJeff Roberson * If we encounter a slice of 0 the kse is in a 47815dc847eSJeff Roberson * TIMESHARE kse group and its nice was too far out 47915dc847eSJeff Roberson * of the range that receives slices. 48015dc847eSJeff Roberson */ 48115dc847eSJeff Roberson if (ke->ke_slice == 0) { 48215dc847eSJeff Roberson runq_remove(ke->ke_runq, ke); 48315dc847eSJeff Roberson sched_slice(ke); 48415dc847eSJeff Roberson ke->ke_runq = kseq->ksq_next; 48515dc847eSJeff Roberson runq_add(ke->ke_runq, ke); 48615dc847eSJeff Roberson continue; 48715dc847eSJeff Roberson } 48815dc847eSJeff Roberson return (ke); 48915dc847eSJeff Roberson } 49015dc847eSJeff Roberson 491a8949de2SJeff Roberson return (runq_choose(&kseq->ksq_idle)); 492245f3abfSJeff Roberson } 4930a016a05SJeff Roberson 4940a016a05SJeff Roberson static void 4950a016a05SJeff Roberson kseq_setup(struct kseq *kseq) 4960a016a05SJeff Roberson { 49715dc847eSJeff Roberson runq_init(&kseq->ksq_timeshare[0]); 49815dc847eSJeff Roberson runq_init(&kseq->ksq_timeshare[1]); 499a8949de2SJeff Roberson runq_init(&kseq->ksq_idle); 50015dc847eSJeff Roberson 50115dc847eSJeff Roberson kseq->ksq_curr = &kseq->ksq_timeshare[0]; 50215dc847eSJeff Roberson kseq->ksq_next = &kseq->ksq_timeshare[1]; 50315dc847eSJeff Roberson 50415dc847eSJeff Roberson kseq->ksq_loads[PRI_ITHD] = 0; 50515dc847eSJeff Roberson kseq->ksq_loads[PRI_REALTIME] = 0; 50615dc847eSJeff Roberson kseq->ksq_loads[PRI_TIMESHARE] = 0; 50715dc847eSJeff Roberson kseq->ksq_loads[PRI_IDLE] = 0; 5087cd650a9SJeff Roberson kseq->ksq_load = 0; 5095d7ef00cSJeff Roberson #ifdef SMP 5105d7ef00cSJeff Roberson kseq->ksq_rslices = 0; 5115d7ef00cSJeff Roberson #endif 5120a016a05SJeff Roberson } 5130a016a05SJeff Roberson 51435e6168fSJeff Roberson static void 51535e6168fSJeff Roberson sched_setup(void *dummy) 51635e6168fSJeff Roberson { 51735e6168fSJeff Roberson int i; 51835e6168fSJeff Roberson 51915dc847eSJeff Roberson slice_min = (hz/100); 52015dc847eSJeff Roberson slice_max = (hz/10); 521e1f89c22SJeff Roberson 52235e6168fSJeff Roberson mtx_lock_spin(&sched_lock); 52335e6168fSJeff Roberson /* init kseqs */ 5240a016a05SJeff Roberson for (i = 0; i < MAXCPU; i++) 5250a016a05SJeff Roberson kseq_setup(KSEQ_CPU(i)); 52615dc847eSJeff Roberson 52715dc847eSJeff Roberson kseq_add(KSEQ_SELF(), &kse0); 52835e6168fSJeff Roberson mtx_unlock_spin(&sched_lock); 529356500a3SJeff Roberson #ifdef SMP 530356500a3SJeff Roberson callout_init(&kseq_lb_callout, 1); 531356500a3SJeff Roberson kseq_balance(NULL); 532356500a3SJeff Roberson #endif 53335e6168fSJeff Roberson } 53435e6168fSJeff Roberson 53535e6168fSJeff Roberson /* 53635e6168fSJeff Roberson * Scale the scheduling priority according to the "interactivity" of this 53735e6168fSJeff Roberson * process. 53835e6168fSJeff Roberson */ 53915dc847eSJeff Roberson static void 54035e6168fSJeff Roberson sched_priority(struct ksegrp *kg) 54135e6168fSJeff Roberson { 54235e6168fSJeff Roberson int pri; 54335e6168fSJeff Roberson 54435e6168fSJeff Roberson if (kg->kg_pri_class != PRI_TIMESHARE) 54515dc847eSJeff Roberson return; 54635e6168fSJeff Roberson 54715dc847eSJeff Roberson pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 548e1f89c22SJeff Roberson pri += SCHED_PRI_BASE; 54935e6168fSJeff Roberson pri += kg->kg_nice; 55035e6168fSJeff Roberson 55135e6168fSJeff Roberson if (pri > PRI_MAX_TIMESHARE) 55235e6168fSJeff Roberson pri = PRI_MAX_TIMESHARE; 55335e6168fSJeff Roberson else if (pri < PRI_MIN_TIMESHARE) 55435e6168fSJeff Roberson pri = PRI_MIN_TIMESHARE; 55535e6168fSJeff Roberson 55635e6168fSJeff Roberson kg->kg_user_pri = pri; 55735e6168fSJeff Roberson 55815dc847eSJeff Roberson return; 55935e6168fSJeff Roberson } 56035e6168fSJeff Roberson 56135e6168fSJeff Roberson /* 562245f3abfSJeff Roberson * Calculate a time slice based on the properties of the kseg and the runq 563a8949de2SJeff Roberson * that we're on. This is only for PRI_TIMESHARE ksegrps. 56435e6168fSJeff Roberson */ 565245f3abfSJeff Roberson static void 566245f3abfSJeff Roberson sched_slice(struct kse *ke) 56735e6168fSJeff Roberson { 56815dc847eSJeff Roberson struct kseq *kseq; 569245f3abfSJeff Roberson struct ksegrp *kg; 57035e6168fSJeff Roberson 571245f3abfSJeff Roberson kg = ke->ke_ksegrp; 57215dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 57335e6168fSJeff Roberson 574245f3abfSJeff Roberson /* 575245f3abfSJeff Roberson * Rationale: 576245f3abfSJeff Roberson * KSEs in interactive ksegs get the minimum slice so that we 577245f3abfSJeff Roberson * quickly notice if it abuses its advantage. 578245f3abfSJeff Roberson * 579245f3abfSJeff Roberson * KSEs in non-interactive ksegs are assigned a slice that is 580245f3abfSJeff Roberson * based on the ksegs nice value relative to the least nice kseg 581245f3abfSJeff Roberson * on the run queue for this cpu. 582245f3abfSJeff Roberson * 583245f3abfSJeff Roberson * If the KSE is less nice than all others it gets the maximum 584245f3abfSJeff Roberson * slice and other KSEs will adjust their slice relative to 585245f3abfSJeff Roberson * this when they first expire. 586245f3abfSJeff Roberson * 587245f3abfSJeff Roberson * There is 20 point window that starts relative to the least 588245f3abfSJeff Roberson * nice kse on the run queue. Slice size is determined by 589245f3abfSJeff Roberson * the kse distance from the last nice ksegrp. 590245f3abfSJeff Roberson * 591245f3abfSJeff Roberson * If you are outside of the window you will get no slice and 592245f3abfSJeff Roberson * you will be reevaluated each time you are selected on the 593245f3abfSJeff Roberson * run queue. 594245f3abfSJeff Roberson * 595245f3abfSJeff Roberson */ 596245f3abfSJeff Roberson 59715dc847eSJeff Roberson if (!SCHED_INTERACTIVE(kg)) { 598245f3abfSJeff Roberson int nice; 599245f3abfSJeff Roberson 60015dc847eSJeff Roberson nice = kg->kg_nice + (0 - kseq->ksq_nicemin); 60115dc847eSJeff Roberson if (kseq->ksq_loads[PRI_TIMESHARE] == 0 || 60215dc847eSJeff Roberson kg->kg_nice < kseq->ksq_nicemin) 603245f3abfSJeff Roberson ke->ke_slice = SCHED_SLICE_MAX; 60415dc847eSJeff Roberson else if (nice <= SCHED_PRI_NTHRESH) 605245f3abfSJeff Roberson ke->ke_slice = SCHED_SLICE_NICE(nice); 606245f3abfSJeff Roberson else 607245f3abfSJeff Roberson ke->ke_slice = 0; 608245f3abfSJeff Roberson } else 609245f3abfSJeff Roberson ke->ke_slice = SCHED_SLICE_MIN; 61035e6168fSJeff Roberson 61115dc847eSJeff Roberson CTR6(KTR_ULE, 61215dc847eSJeff Roberson "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 61315dc847eSJeff Roberson ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, 61415dc847eSJeff Roberson kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg)); 61515dc847eSJeff Roberson 616407b0157SJeff Roberson /* 617a8949de2SJeff Roberson * Check to see if we need to scale back the slp and run time 618a8949de2SJeff Roberson * in the kg. This will cause us to forget old interactivity 619a8949de2SJeff Roberson * while maintaining the current ratio. 620407b0157SJeff Roberson */ 62115dc847eSJeff Roberson CTR4(KTR_ULE, "Slp vs Run %p (Slp %d, Run %d, Score %d)", 62215dc847eSJeff Roberson ke, kg->kg_slptime >> 10, kg->kg_runtime >> 10, 62315dc847eSJeff Roberson sched_interact_score(kg)); 62415dc847eSJeff Roberson 625407b0157SJeff Roberson if ((kg->kg_runtime + kg->kg_slptime) > SCHED_SLP_RUN_MAX) { 626407b0157SJeff Roberson kg->kg_runtime /= SCHED_SLP_RUN_THROTTLE; 627407b0157SJeff Roberson kg->kg_slptime /= SCHED_SLP_RUN_THROTTLE; 628407b0157SJeff Roberson } 62915dc847eSJeff Roberson CTR4(KTR_ULE, "Slp vs Run(2) %p (Slp %d, Run %d, Score %d)", 63015dc847eSJeff Roberson ke, kg->kg_slptime >> 10, kg->kg_runtime >> 10, 63115dc847eSJeff Roberson sched_interact_score(kg)); 632407b0157SJeff Roberson 633245f3abfSJeff Roberson return; 63435e6168fSJeff Roberson } 63535e6168fSJeff Roberson 636e1f89c22SJeff Roberson static int 637e1f89c22SJeff Roberson sched_interact_score(struct ksegrp *kg) 638e1f89c22SJeff Roberson { 639e1f89c22SJeff Roberson int big; 640e1f89c22SJeff Roberson int small; 641e1f89c22SJeff Roberson int base; 642e1f89c22SJeff Roberson 643e1f89c22SJeff Roberson if (kg->kg_runtime > kg->kg_slptime) { 644e1f89c22SJeff Roberson big = kg->kg_runtime; 645e1f89c22SJeff Roberson small = kg->kg_slptime; 646e1f89c22SJeff Roberson base = SCHED_INTERACT_HALF; 647e1f89c22SJeff Roberson } else { 648e1f89c22SJeff Roberson big = kg->kg_slptime; 649e1f89c22SJeff Roberson small = kg->kg_runtime; 650e1f89c22SJeff Roberson base = 0; 651e1f89c22SJeff Roberson } 652e1f89c22SJeff Roberson 653e1f89c22SJeff Roberson big /= SCHED_INTERACT_HALF; 654e1f89c22SJeff Roberson if (big != 0) 655e1f89c22SJeff Roberson small /= big; 656e1f89c22SJeff Roberson else 657e1f89c22SJeff Roberson small = 0; 658e1f89c22SJeff Roberson 659e1f89c22SJeff Roberson small += base; 660e1f89c22SJeff Roberson /* XXX Factor in nice */ 661e1f89c22SJeff Roberson return (small); 662e1f89c22SJeff Roberson } 663e1f89c22SJeff Roberson 66415dc847eSJeff Roberson /* 66515dc847eSJeff Roberson * This is only somewhat accurate since given many processes of the same 66615dc847eSJeff Roberson * priority they will switch when their slices run out, which will be 66715dc847eSJeff Roberson * at most SCHED_SLICE_MAX. 66815dc847eSJeff Roberson */ 66935e6168fSJeff Roberson int 67035e6168fSJeff Roberson sched_rr_interval(void) 67135e6168fSJeff Roberson { 67235e6168fSJeff Roberson return (SCHED_SLICE_MAX); 67335e6168fSJeff Roberson } 67435e6168fSJeff Roberson 67535e6168fSJeff Roberson void 67635e6168fSJeff Roberson sched_pctcpu_update(struct kse *ke) 67735e6168fSJeff Roberson { 67835e6168fSJeff Roberson /* 67935e6168fSJeff Roberson * Adjust counters and watermark for pctcpu calc. 68015dc847eSJeff Roberson * 68165c8760dSJeff Roberson * Shift the tick count out so that the divide doesn't round away 68265c8760dSJeff Roberson * our results. 68365c8760dSJeff Roberson */ 68465c8760dSJeff Roberson ke->ke_ticks <<= 10; 68535e6168fSJeff Roberson ke->ke_ticks = (ke->ke_ticks / (ke->ke_ltick - ke->ke_ftick)) * 68635e6168fSJeff Roberson SCHED_CPU_TICKS; 68765c8760dSJeff Roberson ke->ke_ticks >>= 10; 68835e6168fSJeff Roberson ke->ke_ltick = ticks; 68935e6168fSJeff Roberson ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 69035e6168fSJeff Roberson } 69135e6168fSJeff Roberson 69235e6168fSJeff Roberson #ifdef SMP 6935d7ef00cSJeff Roberson /* XXX Should be changed to kseq_load_lowest() */ 69435e6168fSJeff Roberson int 69535e6168fSJeff Roberson sched_pickcpu(void) 69635e6168fSJeff Roberson { 6970a016a05SJeff Roberson struct kseq *kseq; 69835e6168fSJeff Roberson int load; 6990a016a05SJeff Roberson int cpu; 70035e6168fSJeff Roberson int i; 70135e6168fSJeff Roberson 702b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 70335e6168fSJeff Roberson if (!smp_started) 70435e6168fSJeff Roberson return (0); 70535e6168fSJeff Roberson 7060a016a05SJeff Roberson load = 0; 7070a016a05SJeff Roberson cpu = 0; 70835e6168fSJeff Roberson 70935e6168fSJeff Roberson for (i = 0; i < mp_maxid; i++) { 71035e6168fSJeff Roberson if (CPU_ABSENT(i)) 71135e6168fSJeff Roberson continue; 7120a016a05SJeff Roberson kseq = KSEQ_CPU(i); 71315dc847eSJeff Roberson if (kseq->ksq_load < load) { 71435e6168fSJeff Roberson cpu = i; 71515dc847eSJeff Roberson load = kseq->ksq_load; 71635e6168fSJeff Roberson } 71735e6168fSJeff Roberson } 71835e6168fSJeff Roberson 71935e6168fSJeff Roberson CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu); 72035e6168fSJeff Roberson return (cpu); 72135e6168fSJeff Roberson } 72235e6168fSJeff Roberson #else 72335e6168fSJeff Roberson int 72435e6168fSJeff Roberson sched_pickcpu(void) 72535e6168fSJeff Roberson { 72635e6168fSJeff Roberson return (0); 72735e6168fSJeff Roberson } 72835e6168fSJeff Roberson #endif 72935e6168fSJeff Roberson 73035e6168fSJeff Roberson void 73135e6168fSJeff Roberson sched_prio(struct thread *td, u_char prio) 73235e6168fSJeff Roberson { 73335e6168fSJeff Roberson struct kse *ke; 73435e6168fSJeff Roberson struct runq *rq; 73535e6168fSJeff Roberson 73635e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 73735e6168fSJeff Roberson ke = td->td_kse; 73835e6168fSJeff Roberson td->td_priority = prio; 73935e6168fSJeff Roberson 74035e6168fSJeff Roberson if (TD_ON_RUNQ(td)) { 74135e6168fSJeff Roberson rq = ke->ke_runq; 74235e6168fSJeff Roberson 74335e6168fSJeff Roberson runq_remove(rq, ke); 74435e6168fSJeff Roberson runq_add(rq, ke); 74535e6168fSJeff Roberson } 74635e6168fSJeff Roberson } 74735e6168fSJeff Roberson 74835e6168fSJeff Roberson void 74935e6168fSJeff Roberson sched_switchout(struct thread *td) 75035e6168fSJeff Roberson { 75135e6168fSJeff Roberson struct kse *ke; 75235e6168fSJeff Roberson 75335e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 75435e6168fSJeff Roberson 75535e6168fSJeff Roberson ke = td->td_kse; 75635e6168fSJeff Roberson 75735e6168fSJeff Roberson td->td_last_kse = ke; 758060563ecSJulian Elischer td->td_lastcpu = td->td_oncpu; 759060563ecSJulian Elischer td->td_oncpu = NOCPU; 7604a338afdSJulian Elischer td->td_flags &= ~TDF_NEEDRESCHED; 76135e6168fSJeff Roberson 76235e6168fSJeff Roberson if (TD_IS_RUNNING(td)) { 76315dc847eSJeff Roberson runq_add(ke->ke_runq, ke); 76415dc847eSJeff Roberson /* setrunqueue(td); */ 76535e6168fSJeff Roberson return; 766e1f89c22SJeff Roberson } 76715dc847eSJeff Roberson if (ke->ke_runq) 76815dc847eSJeff Roberson kseq_rem(KSEQ_CPU(ke->ke_cpu), ke); 76935e6168fSJeff Roberson /* 77035e6168fSJeff Roberson * We will not be on the run queue. So we must be 77135e6168fSJeff Roberson * sleeping or similar. 77235e6168fSJeff Roberson */ 773ac2e4153SJulian Elischer if (td->td_proc->p_flag & P_THREADED) 77435e6168fSJeff Roberson kse_reassign(ke); 77535e6168fSJeff Roberson } 77635e6168fSJeff Roberson 77735e6168fSJeff Roberson void 77835e6168fSJeff Roberson sched_switchin(struct thread *td) 77935e6168fSJeff Roberson { 78035e6168fSJeff Roberson /* struct kse *ke = td->td_kse; */ 78135e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 78235e6168fSJeff Roberson 783060563ecSJulian Elischer td->td_oncpu = PCPU_GET(cpuid); 78415dc847eSJeff Roberson 78535e6168fSJeff Roberson if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE && 78635e6168fSJeff Roberson td->td_priority != td->td_ksegrp->kg_user_pri) 7874a338afdSJulian Elischer curthread->td_flags |= TDF_NEEDRESCHED; 78835e6168fSJeff Roberson } 78935e6168fSJeff Roberson 79035e6168fSJeff Roberson void 79135e6168fSJeff Roberson sched_nice(struct ksegrp *kg, int nice) 79235e6168fSJeff Roberson { 79315dc847eSJeff Roberson struct kse *ke; 79435e6168fSJeff Roberson struct thread *td; 79515dc847eSJeff Roberson struct kseq *kseq; 79635e6168fSJeff Roberson 7970b5318c8SJohn Baldwin PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); 7980b5318c8SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 79915dc847eSJeff Roberson /* 80015dc847eSJeff Roberson * We need to adjust the nice counts for running KSEs. 80115dc847eSJeff Roberson */ 80215dc847eSJeff Roberson if (kg->kg_pri_class == PRI_TIMESHARE) 80315dc847eSJeff Roberson FOREACH_KSE_IN_GROUP(kg, ke) { 80415dc847eSJeff Roberson if (ke->ke_state != KES_ONRUNQ && 80515dc847eSJeff Roberson ke->ke_state != KES_THREAD) 80615dc847eSJeff Roberson continue; 80715dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 80815dc847eSJeff Roberson kseq_nice_rem(kseq, kg->kg_nice); 80915dc847eSJeff Roberson kseq_nice_add(kseq, nice); 81015dc847eSJeff Roberson } 81135e6168fSJeff Roberson kg->kg_nice = nice; 81235e6168fSJeff Roberson sched_priority(kg); 81315dc847eSJeff Roberson FOREACH_THREAD_IN_GROUP(kg, td) 8144a338afdSJulian Elischer td->td_flags |= TDF_NEEDRESCHED; 81535e6168fSJeff Roberson } 81635e6168fSJeff Roberson 81735e6168fSJeff Roberson void 81835e6168fSJeff Roberson sched_sleep(struct thread *td, u_char prio) 81935e6168fSJeff Roberson { 82035e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 82135e6168fSJeff Roberson 82235e6168fSJeff Roberson td->td_slptime = ticks; 82335e6168fSJeff Roberson td->td_priority = prio; 82435e6168fSJeff Roberson 82515dc847eSJeff Roberson CTR2(KTR_ULE, "sleep kse %p (tick: %d)", 82615dc847eSJeff Roberson td->td_kse, td->td_slptime); 82735e6168fSJeff Roberson } 82835e6168fSJeff Roberson 82935e6168fSJeff Roberson void 83035e6168fSJeff Roberson sched_wakeup(struct thread *td) 83135e6168fSJeff Roberson { 83235e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 83335e6168fSJeff Roberson 83435e6168fSJeff Roberson /* 83535e6168fSJeff Roberson * Let the kseg know how long we slept for. This is because process 83635e6168fSJeff Roberson * interactivity behavior is modeled in the kseg. 83735e6168fSJeff Roberson */ 83835e6168fSJeff Roberson if (td->td_slptime) { 839f1e8dc4aSJeff Roberson struct ksegrp *kg; 84015dc847eSJeff Roberson int hzticks; 841f1e8dc4aSJeff Roberson 842f1e8dc4aSJeff Roberson kg = td->td_ksegrp; 84315dc847eSJeff Roberson hzticks = ticks - td->td_slptime; 84415dc847eSJeff Roberson kg->kg_slptime += hzticks << 10; 845f1e8dc4aSJeff Roberson sched_priority(kg); 84615dc847eSJeff Roberson CTR2(KTR_ULE, "wakeup kse %p (%d ticks)", 84715dc847eSJeff Roberson td->td_kse, hzticks); 84835e6168fSJeff Roberson td->td_slptime = 0; 849f1e8dc4aSJeff Roberson } 85035e6168fSJeff Roberson setrunqueue(td); 85135e6168fSJeff Roberson if (td->td_priority < curthread->td_priority) 8524a338afdSJulian Elischer curthread->td_flags |= TDF_NEEDRESCHED; 85335e6168fSJeff Roberson } 85435e6168fSJeff Roberson 85535e6168fSJeff Roberson /* 85635e6168fSJeff Roberson * Penalize the parent for creating a new child and initialize the child's 85735e6168fSJeff Roberson * priority. 85835e6168fSJeff Roberson */ 85935e6168fSJeff Roberson void 86015dc847eSJeff Roberson sched_fork(struct proc *p, struct proc *p1) 86135e6168fSJeff Roberson { 86235e6168fSJeff Roberson 86335e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 86435e6168fSJeff Roberson 86515dc847eSJeff Roberson sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 86615dc847eSJeff Roberson sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 86715dc847eSJeff Roberson sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 86815dc847eSJeff Roberson } 86915dc847eSJeff Roberson 87015dc847eSJeff Roberson void 87115dc847eSJeff Roberson sched_fork_kse(struct kse *ke, struct kse *child) 87215dc847eSJeff Roberson { 8732056d0a1SJohn Baldwin 87415dc847eSJeff Roberson child->ke_slice = ke->ke_slice; 87515dc847eSJeff Roberson child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */ 87615dc847eSJeff Roberson child->ke_runq = NULL; 87715dc847eSJeff Roberson 87815dc847eSJeff Roberson /* 87915dc847eSJeff Roberson * Claim that we've been running for one second for statistical 88015dc847eSJeff Roberson * purposes. 88115dc847eSJeff Roberson */ 88215dc847eSJeff Roberson child->ke_ticks = 0; 88315dc847eSJeff Roberson child->ke_ltick = ticks; 88415dc847eSJeff Roberson child->ke_ftick = ticks - hz; 88515dc847eSJeff Roberson } 88615dc847eSJeff Roberson 88715dc847eSJeff Roberson void 88815dc847eSJeff Roberson sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 88915dc847eSJeff Roberson { 8902056d0a1SJohn Baldwin 8912056d0a1SJohn Baldwin PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED); 89235e6168fSJeff Roberson /* XXX Need something better here */ 893407b0157SJeff Roberson if (kg->kg_slptime > kg->kg_runtime) { 894e1f89c22SJeff Roberson child->kg_slptime = SCHED_DYN_RANGE; 895e1f89c22SJeff Roberson child->kg_runtime = kg->kg_slptime / SCHED_DYN_RANGE; 896407b0157SJeff Roberson } else { 897e1f89c22SJeff Roberson child->kg_runtime = SCHED_DYN_RANGE; 898e1f89c22SJeff Roberson child->kg_slptime = kg->kg_runtime / SCHED_DYN_RANGE; 899407b0157SJeff Roberson } 90015dc847eSJeff Roberson 90135e6168fSJeff Roberson child->kg_user_pri = kg->kg_user_pri; 90215dc847eSJeff Roberson child->kg_nice = kg->kg_nice; 903c9f25d8fSJeff Roberson } 904c9f25d8fSJeff Roberson 90515dc847eSJeff Roberson void 90615dc847eSJeff Roberson sched_fork_thread(struct thread *td, struct thread *child) 90715dc847eSJeff Roberson { 90815dc847eSJeff Roberson } 90915dc847eSJeff Roberson 91015dc847eSJeff Roberson void 91115dc847eSJeff Roberson sched_class(struct ksegrp *kg, int class) 91215dc847eSJeff Roberson { 91315dc847eSJeff Roberson struct kseq *kseq; 91415dc847eSJeff Roberson struct kse *ke; 91515dc847eSJeff Roberson 9162056d0a1SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 91715dc847eSJeff Roberson if (kg->kg_pri_class == class) 91815dc847eSJeff Roberson return; 91915dc847eSJeff Roberson 92015dc847eSJeff Roberson FOREACH_KSE_IN_GROUP(kg, ke) { 92115dc847eSJeff Roberson if (ke->ke_state != KES_ONRUNQ && 92215dc847eSJeff Roberson ke->ke_state != KES_THREAD) 92315dc847eSJeff Roberson continue; 92415dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 92515dc847eSJeff Roberson 926b5c4c4a7SJeff Roberson kseq->ksq_loads[PRI_BASE(kg->kg_pri_class)]--; 927b5c4c4a7SJeff Roberson kseq->ksq_loads[PRI_BASE(class)]++; 92815dc847eSJeff Roberson 92915dc847eSJeff Roberson if (kg->kg_pri_class == PRI_TIMESHARE) 93015dc847eSJeff Roberson kseq_nice_rem(kseq, kg->kg_nice); 93115dc847eSJeff Roberson else if (class == PRI_TIMESHARE) 93215dc847eSJeff Roberson kseq_nice_add(kseq, kg->kg_nice); 93315dc847eSJeff Roberson } 93415dc847eSJeff Roberson 93515dc847eSJeff Roberson kg->kg_pri_class = class; 93635e6168fSJeff Roberson } 93735e6168fSJeff Roberson 93835e6168fSJeff Roberson /* 93935e6168fSJeff Roberson * Return some of the child's priority and interactivity to the parent. 94035e6168fSJeff Roberson */ 94135e6168fSJeff Roberson void 94215dc847eSJeff Roberson sched_exit(struct proc *p, struct proc *child) 94335e6168fSJeff Roberson { 94435e6168fSJeff Roberson /* XXX Need something better here */ 94535e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 946141ad61cSJeff Roberson sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child)); 947141ad61cSJeff Roberson } 948141ad61cSJeff Roberson 949141ad61cSJeff Roberson void 950141ad61cSJeff Roberson sched_exit_kse(struct kse *ke, struct kse *child) 951141ad61cSJeff Roberson { 952141ad61cSJeff Roberson kseq_rem(KSEQ_CPU(child->ke_cpu), child); 953141ad61cSJeff Roberson } 954141ad61cSJeff Roberson 955141ad61cSJeff Roberson void 956141ad61cSJeff Roberson sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 957141ad61cSJeff Roberson { 958141ad61cSJeff Roberson } 959141ad61cSJeff Roberson 960141ad61cSJeff Roberson void 961141ad61cSJeff Roberson sched_exit_thread(struct thread *td, struct thread *child) 962141ad61cSJeff Roberson { 96335e6168fSJeff Roberson } 96435e6168fSJeff Roberson 96535e6168fSJeff Roberson void 96615dc847eSJeff Roberson sched_clock(struct kse *ke) 96735e6168fSJeff Roberson { 96835e6168fSJeff Roberson struct kseq *kseq; 9690a016a05SJeff Roberson struct ksegrp *kg; 97015dc847eSJeff Roberson struct thread *td; 97115dc847eSJeff Roberson #if 0 97215dc847eSJeff Roberson struct kse *nke; 97315dc847eSJeff Roberson #endif 97435e6168fSJeff Roberson 97515dc847eSJeff Roberson /* 97615dc847eSJeff Roberson * sched_setup() apparently happens prior to stathz being set. We 97715dc847eSJeff Roberson * need to resolve the timers earlier in the boot so we can avoid 97815dc847eSJeff Roberson * calculating this here. 97915dc847eSJeff Roberson */ 98015dc847eSJeff Roberson if (realstathz == 0) { 98115dc847eSJeff Roberson realstathz = stathz ? stathz : hz; 98215dc847eSJeff Roberson tickincr = hz / realstathz; 98315dc847eSJeff Roberson /* 98415dc847eSJeff Roberson * XXX This does not work for values of stathz that are much 98515dc847eSJeff Roberson * larger than hz. 98615dc847eSJeff Roberson */ 98715dc847eSJeff Roberson if (tickincr == 0) 98815dc847eSJeff Roberson tickincr = 1; 98915dc847eSJeff Roberson } 99035e6168fSJeff Roberson 99115dc847eSJeff Roberson td = ke->ke_thread; 99215dc847eSJeff Roberson kg = ke->ke_ksegrp; 99335e6168fSJeff Roberson 9940a016a05SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 9950a016a05SJeff Roberson KASSERT((td != NULL), ("schedclock: null thread pointer")); 9960a016a05SJeff Roberson 9970a016a05SJeff Roberson /* Adjust ticks for pctcpu */ 99865c8760dSJeff Roberson ke->ke_ticks++; 999d465fb95SJeff Roberson ke->ke_ltick = ticks; 1000a8949de2SJeff Roberson 1001d465fb95SJeff Roberson /* Go up to one second beyond our max and then trim back down */ 1002d465fb95SJeff Roberson if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 1003d465fb95SJeff Roberson sched_pctcpu_update(ke); 1004d465fb95SJeff Roberson 100543fdafb1SJulian Elischer if (td->td_flags & TDF_IDLETD) 100635e6168fSJeff Roberson return; 10070a016a05SJeff Roberson 100815dc847eSJeff Roberson CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)", 100915dc847eSJeff Roberson ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 1010c9f25d8fSJeff Roberson 101135e6168fSJeff Roberson /* 1012a8949de2SJeff Roberson * We only do slicing code for TIMESHARE ksegrps. 1013a8949de2SJeff Roberson */ 1014a8949de2SJeff Roberson if (kg->kg_pri_class != PRI_TIMESHARE) 1015a8949de2SJeff Roberson return; 1016a8949de2SJeff Roberson /* 101715dc847eSJeff Roberson * Check for a higher priority task on the run queue. This can happen 101815dc847eSJeff Roberson * on SMP if another processor woke up a process on our runq. 101935e6168fSJeff Roberson */ 102015dc847eSJeff Roberson kseq = KSEQ_SELF(); 102115dc847eSJeff Roberson #if 0 102215dc847eSJeff Roberson if (kseq->ksq_load > 1 && (nke = kseq_choose(kseq)) != NULL) { 102315dc847eSJeff Roberson if (sched_strict && 102415dc847eSJeff Roberson nke->ke_thread->td_priority < td->td_priority) 102515dc847eSJeff Roberson td->td_flags |= TDF_NEEDRESCHED; 102615dc847eSJeff Roberson else if (nke->ke_thread->td_priority < 102715dc847eSJeff Roberson td->td_priority SCHED_PRIO_SLOP) 102815dc847eSJeff Roberson 102915dc847eSJeff Roberson if (nke->ke_thread->td_priority < td->td_priority) 103015dc847eSJeff Roberson td->td_flags |= TDF_NEEDRESCHED; 103115dc847eSJeff Roberson } 103215dc847eSJeff Roberson #endif 103315dc847eSJeff Roberson /* 103415dc847eSJeff Roberson * We used a tick charge it to the ksegrp so that we can compute our 103515dc847eSJeff Roberson * interactivity. 103615dc847eSJeff Roberson */ 103715dc847eSJeff Roberson kg->kg_runtime += tickincr << 10; 1038407b0157SJeff Roberson 103935e6168fSJeff Roberson /* 104035e6168fSJeff Roberson * We used up one time slice. 104135e6168fSJeff Roberson */ 104235e6168fSJeff Roberson ke->ke_slice--; 104315dc847eSJeff Roberson #ifdef SMP 1044c36ccfa2SJeff Roberson kseq->ksq_rslices--; 104515dc847eSJeff Roberson #endif 104615dc847eSJeff Roberson 104715dc847eSJeff Roberson if (ke->ke_slice > 0) 104815dc847eSJeff Roberson return; 104935e6168fSJeff Roberson /* 105015dc847eSJeff Roberson * We're out of time, recompute priorities and requeue. 105135e6168fSJeff Roberson */ 105215dc847eSJeff Roberson kseq_rem(kseq, ke); 1053e1f89c22SJeff Roberson sched_priority(kg); 105415dc847eSJeff Roberson sched_slice(ke); 105515dc847eSJeff Roberson if (SCHED_CURR(kg, ke)) 105615dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 105715dc847eSJeff Roberson else 105815dc847eSJeff Roberson ke->ke_runq = kseq->ksq_next; 105915dc847eSJeff Roberson kseq_add(kseq, ke); 10604a338afdSJulian Elischer td->td_flags |= TDF_NEEDRESCHED; 106135e6168fSJeff Roberson } 106235e6168fSJeff Roberson 106335e6168fSJeff Roberson int 106435e6168fSJeff Roberson sched_runnable(void) 106535e6168fSJeff Roberson { 106635e6168fSJeff Roberson struct kseq *kseq; 1067b90816f1SJeff Roberson int load; 106835e6168fSJeff Roberson 1069b90816f1SJeff Roberson load = 1; 1070b90816f1SJeff Roberson 1071b90816f1SJeff Roberson mtx_lock_spin(&sched_lock); 10720a016a05SJeff Roberson kseq = KSEQ_SELF(); 107335e6168fSJeff Roberson 107415dc847eSJeff Roberson if (kseq->ksq_load) 1075b90816f1SJeff Roberson goto out; 1076c9f25d8fSJeff Roberson #ifdef SMP 10770a016a05SJeff Roberson /* 10780a016a05SJeff Roberson * For SMP we may steal other processor's KSEs. Just search until we 10790a016a05SJeff Roberson * verify that at least on other cpu has a runnable task. 10800a016a05SJeff Roberson */ 1081c9f25d8fSJeff Roberson if (smp_started) { 1082c9f25d8fSJeff Roberson int i; 1083c9f25d8fSJeff Roberson 1084c9f25d8fSJeff Roberson for (i = 0; i < mp_maxid; i++) { 1085c9f25d8fSJeff Roberson if (CPU_ABSENT(i)) 1086c9f25d8fSJeff Roberson continue; 10870a016a05SJeff Roberson kseq = KSEQ_CPU(i); 10887cd650a9SJeff Roberson if (kseq->ksq_load > 1) 1089b90816f1SJeff Roberson goto out; 1090c9f25d8fSJeff Roberson } 1091c9f25d8fSJeff Roberson } 1092c9f25d8fSJeff Roberson #endif 1093b90816f1SJeff Roberson load = 0; 1094b90816f1SJeff Roberson out: 1095b90816f1SJeff Roberson mtx_unlock_spin(&sched_lock); 1096b90816f1SJeff Roberson return (load); 109735e6168fSJeff Roberson } 109835e6168fSJeff Roberson 109935e6168fSJeff Roberson void 110035e6168fSJeff Roberson sched_userret(struct thread *td) 110135e6168fSJeff Roberson { 110235e6168fSJeff Roberson struct ksegrp *kg; 110335e6168fSJeff Roberson 110435e6168fSJeff Roberson kg = td->td_ksegrp; 110535e6168fSJeff Roberson 110635e6168fSJeff Roberson if (td->td_priority != kg->kg_user_pri) { 110735e6168fSJeff Roberson mtx_lock_spin(&sched_lock); 110835e6168fSJeff Roberson td->td_priority = kg->kg_user_pri; 110935e6168fSJeff Roberson mtx_unlock_spin(&sched_lock); 111035e6168fSJeff Roberson } 111135e6168fSJeff Roberson } 111235e6168fSJeff Roberson 1113c9f25d8fSJeff Roberson struct kse * 1114c9f25d8fSJeff Roberson sched_choose(void) 1115c9f25d8fSJeff Roberson { 11160a016a05SJeff Roberson struct kseq *kseq; 1117c9f25d8fSJeff Roberson struct kse *ke; 111815dc847eSJeff Roberson 1119b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 112015dc847eSJeff Roberson #ifdef SMP 1121245f3abfSJeff Roberson retry: 112215dc847eSJeff Roberson #endif 1123c36ccfa2SJeff Roberson kseq = KSEQ_SELF(); 11240a016a05SJeff Roberson ke = kseq_choose(kseq); 112535e6168fSJeff Roberson if (ke) { 112615dc847eSJeff Roberson runq_remove(ke->ke_runq, ke); 112735e6168fSJeff Roberson ke->ke_state = KES_THREAD; 1128245f3abfSJeff Roberson 112915dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 113015dc847eSJeff Roberson CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)", 113115dc847eSJeff Roberson ke, ke->ke_runq, ke->ke_slice, 113215dc847eSJeff Roberson ke->ke_thread->td_priority); 1133245f3abfSJeff Roberson } 113415dc847eSJeff Roberson return (ke); 113535e6168fSJeff Roberson } 113635e6168fSJeff Roberson 1137c9f25d8fSJeff Roberson #ifdef SMP 1138c36ccfa2SJeff Roberson if (smp_started) { 1139c9f25d8fSJeff Roberson /* 1140c9f25d8fSJeff Roberson * Find the cpu with the highest load and steal one proc. 1141c9f25d8fSJeff Roberson */ 1142c36ccfa2SJeff Roberson if ((kseq = kseq_load_highest()) == NULL) 1143c36ccfa2SJeff Roberson return (NULL); 1144c36ccfa2SJeff Roberson 1145c36ccfa2SJeff Roberson /* 1146c36ccfa2SJeff Roberson * Remove this kse from this kseq and runq and then requeue 1147c36ccfa2SJeff Roberson * on the current processor. Then we will dequeue it 1148c36ccfa2SJeff Roberson * normally above. 1149c36ccfa2SJeff Roberson */ 1150356500a3SJeff Roberson kseq_move(kseq, PCPU_GET(cpuid)); 115115dc847eSJeff Roberson goto retry; 1152c9f25d8fSJeff Roberson } 1153c9f25d8fSJeff Roberson #endif 115415dc847eSJeff Roberson 115515dc847eSJeff Roberson return (NULL); 115635e6168fSJeff Roberson } 115735e6168fSJeff Roberson 115835e6168fSJeff Roberson void 115935e6168fSJeff Roberson sched_add(struct kse *ke) 116035e6168fSJeff Roberson { 1161c9f25d8fSJeff Roberson struct kseq *kseq; 116215dc847eSJeff Roberson struct ksegrp *kg; 1163c9f25d8fSJeff Roberson 11645d7ef00cSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 11655d7ef00cSJeff Roberson KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 11665d7ef00cSJeff Roberson KASSERT((ke->ke_thread->td_kse != NULL), 11675d7ef00cSJeff Roberson ("sched_add: No KSE on thread")); 11685d7ef00cSJeff Roberson KASSERT(ke->ke_state != KES_ONRUNQ, 11695d7ef00cSJeff Roberson ("sched_add: kse %p (%s) already in run queue", ke, 11705d7ef00cSJeff Roberson ke->ke_proc->p_comm)); 11715d7ef00cSJeff Roberson KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 11725d7ef00cSJeff Roberson ("sched_add: process swapped out")); 11739bca28a7SJeff Roberson KASSERT(ke->ke_runq == NULL, 11749bca28a7SJeff Roberson ("sched_add: KSE %p is still assigned to a run queue", ke)); 11755d7ef00cSJeff Roberson 117615dc847eSJeff Roberson kg = ke->ke_ksegrp; 117715dc847eSJeff Roberson 1178b5c4c4a7SJeff Roberson switch (PRI_BASE(kg->kg_pri_class)) { 1179a8949de2SJeff Roberson case PRI_ITHD: 1180a8949de2SJeff Roberson case PRI_REALTIME: 1181a6ed4186SJeff Roberson kseq = KSEQ_SELF(); 118215dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 118315dc847eSJeff Roberson ke->ke_slice = SCHED_SLICE_MAX; 11847cd650a9SJeff Roberson ke->ke_cpu = PCPU_GET(cpuid); 1185a8949de2SJeff Roberson break; 1186a8949de2SJeff Roberson case PRI_TIMESHARE: 1187a8949de2SJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 118815dc847eSJeff Roberson if (SCHED_CURR(kg, ke)) 118915dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 119015dc847eSJeff Roberson else 119115dc847eSJeff Roberson ke->ke_runq = kseq->ksq_next; 119215dc847eSJeff Roberson break; 119315dc847eSJeff Roberson case PRI_IDLE: 119415dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 119515dc847eSJeff Roberson /* 119615dc847eSJeff Roberson * This is for priority prop. 119715dc847eSJeff Roberson */ 119815dc847eSJeff Roberson if (ke->ke_thread->td_priority < PRI_MAX_TIMESHARE) 119915dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 120015dc847eSJeff Roberson else 120115dc847eSJeff Roberson ke->ke_runq = &kseq->ksq_idle; 120215dc847eSJeff Roberson ke->ke_slice = SCHED_SLICE_MIN; 120315dc847eSJeff Roberson break; 120415dc847eSJeff Roberson default: 120515dc847eSJeff Roberson panic("Unknown pri class.\n"); 1206a8949de2SJeff Roberson break; 1207a6ed4186SJeff Roberson } 1208a8949de2SJeff Roberson 120935e6168fSJeff Roberson ke->ke_ksegrp->kg_runq_kses++; 121035e6168fSJeff Roberson ke->ke_state = KES_ONRUNQ; 121135e6168fSJeff Roberson 121215dc847eSJeff Roberson runq_add(ke->ke_runq, ke); 12139bca28a7SJeff Roberson kseq_add(kseq, ke); 121435e6168fSJeff Roberson } 121535e6168fSJeff Roberson 121635e6168fSJeff Roberson void 121735e6168fSJeff Roberson sched_rem(struct kse *ke) 121835e6168fSJeff Roberson { 121915dc847eSJeff Roberson struct kseq *kseq; 122015dc847eSJeff Roberson 122135e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 12229bca28a7SJeff Roberson KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 122335e6168fSJeff Roberson 122435e6168fSJeff Roberson ke->ke_state = KES_THREAD; 122535e6168fSJeff Roberson ke->ke_ksegrp->kg_runq_kses--; 122615dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 122715dc847eSJeff Roberson runq_remove(ke->ke_runq, ke); 122815dc847eSJeff Roberson kseq_rem(kseq, ke); 122935e6168fSJeff Roberson } 123035e6168fSJeff Roberson 123135e6168fSJeff Roberson fixpt_t 123235e6168fSJeff Roberson sched_pctcpu(struct kse *ke) 123335e6168fSJeff Roberson { 123435e6168fSJeff Roberson fixpt_t pctcpu; 123535e6168fSJeff Roberson 123635e6168fSJeff Roberson pctcpu = 0; 123735e6168fSJeff Roberson 1238b90816f1SJeff Roberson mtx_lock_spin(&sched_lock); 123935e6168fSJeff Roberson if (ke->ke_ticks) { 124035e6168fSJeff Roberson int rtick; 124135e6168fSJeff Roberson 124235e6168fSJeff Roberson /* Update to account for time potentially spent sleeping */ 124335e6168fSJeff Roberson ke->ke_ltick = ticks; 124435e6168fSJeff Roberson sched_pctcpu_update(ke); 124535e6168fSJeff Roberson 124635e6168fSJeff Roberson /* How many rtick per second ? */ 124765c8760dSJeff Roberson rtick = ke->ke_ticks / SCHED_CPU_TIME; 12487121cce5SScott Long pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 124935e6168fSJeff Roberson } 125035e6168fSJeff Roberson 125135e6168fSJeff Roberson ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1252828e7683SJohn Baldwin mtx_unlock_spin(&sched_lock); 125335e6168fSJeff Roberson 125435e6168fSJeff Roberson return (pctcpu); 125535e6168fSJeff Roberson } 125635e6168fSJeff Roberson 125735e6168fSJeff Roberson int 125835e6168fSJeff Roberson sched_sizeof_kse(void) 125935e6168fSJeff Roberson { 126035e6168fSJeff Roberson return (sizeof(struct kse) + sizeof(struct ke_sched)); 126135e6168fSJeff Roberson } 126235e6168fSJeff Roberson 126335e6168fSJeff Roberson int 126435e6168fSJeff Roberson sched_sizeof_ksegrp(void) 126535e6168fSJeff Roberson { 126635e6168fSJeff Roberson return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 126735e6168fSJeff Roberson } 126835e6168fSJeff Roberson 126935e6168fSJeff Roberson int 127035e6168fSJeff Roberson sched_sizeof_proc(void) 127135e6168fSJeff Roberson { 127235e6168fSJeff Roberson return (sizeof(struct proc)); 127335e6168fSJeff Roberson } 127435e6168fSJeff Roberson 127535e6168fSJeff Roberson int 127635e6168fSJeff Roberson sched_sizeof_thread(void) 127735e6168fSJeff Roberson { 127835e6168fSJeff Roberson return (sizeof(struct thread) + sizeof(struct td_sched)); 127935e6168fSJeff Roberson } 1280