135e6168fSJeff Roberson /*- 215dc847eSJeff Roberson * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 335e6168fSJeff Roberson * All rights reserved. 435e6168fSJeff Roberson * 535e6168fSJeff Roberson * Redistribution and use in source and binary forms, with or without 635e6168fSJeff Roberson * modification, are permitted provided that the following conditions 735e6168fSJeff Roberson * are met: 835e6168fSJeff Roberson * 1. Redistributions of source code must retain the above copyright 935e6168fSJeff Roberson * notice unmodified, this list of conditions, and the following 1035e6168fSJeff Roberson * disclaimer. 1135e6168fSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 1235e6168fSJeff Roberson * notice, this list of conditions and the following disclaimer in the 1335e6168fSJeff Roberson * documentation and/or other materials provided with the distribution. 1435e6168fSJeff Roberson * 1535e6168fSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1635e6168fSJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1735e6168fSJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1835e6168fSJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1935e6168fSJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2035e6168fSJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2135e6168fSJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2235e6168fSJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2335e6168fSJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2435e6168fSJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2535e6168fSJeff Roberson * 2635e6168fSJeff Roberson * $FreeBSD$ 2735e6168fSJeff Roberson */ 2835e6168fSJeff Roberson 2935e6168fSJeff Roberson #include <sys/param.h> 3035e6168fSJeff Roberson #include <sys/systm.h> 3135e6168fSJeff Roberson #include <sys/kernel.h> 3235e6168fSJeff Roberson #include <sys/ktr.h> 3335e6168fSJeff Roberson #include <sys/lock.h> 3435e6168fSJeff Roberson #include <sys/mutex.h> 3535e6168fSJeff Roberson #include <sys/proc.h> 36245f3abfSJeff Roberson #include <sys/resource.h> 3735e6168fSJeff Roberson #include <sys/sched.h> 3835e6168fSJeff Roberson #include <sys/smp.h> 3935e6168fSJeff Roberson #include <sys/sx.h> 4035e6168fSJeff Roberson #include <sys/sysctl.h> 4135e6168fSJeff Roberson #include <sys/sysproto.h> 4235e6168fSJeff Roberson #include <sys/vmmeter.h> 4335e6168fSJeff Roberson #ifdef DDB 4435e6168fSJeff Roberson #include <ddb/ddb.h> 4535e6168fSJeff Roberson #endif 4635e6168fSJeff Roberson #ifdef KTRACE 4735e6168fSJeff Roberson #include <sys/uio.h> 4835e6168fSJeff Roberson #include <sys/ktrace.h> 4935e6168fSJeff Roberson #endif 5035e6168fSJeff Roberson 5135e6168fSJeff Roberson #include <machine/cpu.h> 5235e6168fSJeff Roberson 5315dc847eSJeff Roberson #define KTR_ULE KTR_NFS 5415dc847eSJeff Roberson 5535e6168fSJeff Roberson /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 5635e6168fSJeff Roberson /* XXX This is bogus compatability crap for ps */ 5735e6168fSJeff Roberson static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 5835e6168fSJeff Roberson SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 5935e6168fSJeff Roberson 6035e6168fSJeff Roberson static void sched_setup(void *dummy); 6135e6168fSJeff Roberson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 6235e6168fSJeff Roberson 6315dc847eSJeff Roberson static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED"); 64e1f89c22SJeff Roberson 6515dc847eSJeff Roberson static int sched_strict; 6615dc847eSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, ""); 6715dc847eSJeff Roberson 6815dc847eSJeff Roberson static int slice_min = 1; 6915dc847eSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 7015dc847eSJeff Roberson 7115dc847eSJeff Roberson static int slice_max = 2; 7215dc847eSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 7315dc847eSJeff Roberson 7415dc847eSJeff Roberson int realstathz; 7515dc847eSJeff Roberson int tickincr = 1; 76783caefbSJeff Roberson 7735e6168fSJeff Roberson /* 7835e6168fSJeff Roberson * These datastructures are allocated within their parent datastructure but 7935e6168fSJeff Roberson * are scheduler specific. 8035e6168fSJeff Roberson */ 8135e6168fSJeff Roberson 8235e6168fSJeff Roberson struct ke_sched { 8335e6168fSJeff Roberson int ske_slice; 8435e6168fSJeff Roberson struct runq *ske_runq; 8535e6168fSJeff Roberson /* The following variables are only used for pctcpu calculation */ 8635e6168fSJeff Roberson int ske_ltick; /* Last tick that we were running on */ 8735e6168fSJeff Roberson int ske_ftick; /* First tick that we were running on */ 8835e6168fSJeff Roberson int ske_ticks; /* Tick count */ 8915dc847eSJeff Roberson /* CPU that we have affinity for. */ 90cd6e33dfSJeff Roberson u_char ske_cpu; 9135e6168fSJeff Roberson }; 9235e6168fSJeff Roberson #define ke_slice ke_sched->ske_slice 9335e6168fSJeff Roberson #define ke_runq ke_sched->ske_runq 9435e6168fSJeff Roberson #define ke_ltick ke_sched->ske_ltick 9535e6168fSJeff Roberson #define ke_ftick ke_sched->ske_ftick 9635e6168fSJeff Roberson #define ke_ticks ke_sched->ske_ticks 97cd6e33dfSJeff Roberson #define ke_cpu ke_sched->ske_cpu 9835e6168fSJeff Roberson 9935e6168fSJeff Roberson struct kg_sched { 100407b0157SJeff Roberson int skg_slptime; /* Number of ticks we vol. slept */ 101407b0157SJeff Roberson int skg_runtime; /* Number of ticks we were running */ 10235e6168fSJeff Roberson }; 10335e6168fSJeff Roberson #define kg_slptime kg_sched->skg_slptime 104407b0157SJeff Roberson #define kg_runtime kg_sched->skg_runtime 10535e6168fSJeff Roberson 10635e6168fSJeff Roberson struct td_sched { 10735e6168fSJeff Roberson int std_slptime; 10835e6168fSJeff Roberson }; 10935e6168fSJeff Roberson #define td_slptime td_sched->std_slptime 11035e6168fSJeff Roberson 1115d7ef00cSJeff Roberson struct td_sched td_sched; 11235e6168fSJeff Roberson struct ke_sched ke_sched; 11335e6168fSJeff Roberson struct kg_sched kg_sched; 11435e6168fSJeff Roberson 11535e6168fSJeff Roberson struct ke_sched *kse0_sched = &ke_sched; 11635e6168fSJeff Roberson struct kg_sched *ksegrp0_sched = &kg_sched; 11735e6168fSJeff Roberson struct p_sched *proc0_sched = NULL; 11835e6168fSJeff Roberson struct td_sched *thread0_sched = &td_sched; 11935e6168fSJeff Roberson 12035e6168fSJeff Roberson /* 12135e6168fSJeff Roberson * This priority range has 20 priorities on either end that are reachable 12235e6168fSJeff Roberson * only through nice values. 123e1f89c22SJeff Roberson * 124e1f89c22SJeff Roberson * PRI_RANGE: Total priority range for timeshare threads. 125e1f89c22SJeff Roberson * PRI_NRESV: Reserved priorities for nice. 126e1f89c22SJeff Roberson * PRI_BASE: The start of the dynamic range. 127e1f89c22SJeff Roberson * DYN_RANGE: Number of priorities that are available int the dynamic 128e1f89c22SJeff Roberson * priority range. 12935e6168fSJeff Roberson */ 130407b0157SJeff Roberson #define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 131245f3abfSJeff Roberson #define SCHED_PRI_NRESV PRIO_TOTAL 13298c9b132SJeff Roberson #define SCHED_PRI_NHALF (PRIO_TOTAL / 2) 13315dc847eSJeff Roberson #define SCHED_PRI_NTHRESH (SCHED_PRI_NHALF - 1) 134e1f89c22SJeff Roberson #define SCHED_PRI_BASE ((SCHED_PRI_NRESV / 2) + PRI_MIN_TIMESHARE) 135e1f89c22SJeff Roberson #define SCHED_DYN_RANGE (SCHED_PRI_RANGE - SCHED_PRI_NRESV) 13615dc847eSJeff Roberson #define SCHED_PRI_INTERACT(score) \ 13715dc847eSJeff Roberson ((score) * SCHED_DYN_RANGE / SCHED_INTERACT_RANGE) 13835e6168fSJeff Roberson 13935e6168fSJeff Roberson /* 140e1f89c22SJeff Roberson * These determine the interactivity of a process. 14135e6168fSJeff Roberson * 142407b0157SJeff Roberson * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 143407b0157SJeff Roberson * before throttling back. 144e1f89c22SJeff Roberson * SLP_RUN_THROTTLE: Divisor for reducing slp/run time. 145e1f89c22SJeff Roberson * INTERACT_RANGE: Range of interactivity values. Smaller is better. 146e1f89c22SJeff Roberson * INTERACT_HALF: Convenience define, half of the interactivity range. 147e1f89c22SJeff Roberson * INTERACT_THRESH: Threshhold for placement on the current runq. 14835e6168fSJeff Roberson */ 14915dc847eSJeff Roberson #define SCHED_SLP_RUN_MAX ((hz / 10) << 10) 150407b0157SJeff Roberson #define SCHED_SLP_RUN_THROTTLE (10) 151e1f89c22SJeff Roberson #define SCHED_INTERACT_RANGE (100) 152e1f89c22SJeff Roberson #define SCHED_INTERACT_HALF (SCHED_INTERACT_RANGE / 2) 153e1f89c22SJeff Roberson #define SCHED_INTERACT_THRESH (10) 154e1f89c22SJeff Roberson 15535e6168fSJeff Roberson /* 15635e6168fSJeff Roberson * These parameters and macros determine the size of the time slice that is 15735e6168fSJeff Roberson * granted to each thread. 15835e6168fSJeff Roberson * 15935e6168fSJeff Roberson * SLICE_MIN: Minimum time slice granted, in units of ticks. 16035e6168fSJeff Roberson * SLICE_MAX: Maximum time slice granted. 16135e6168fSJeff Roberson * SLICE_RANGE: Range of available time slices scaled by hz. 162245f3abfSJeff Roberson * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 163245f3abfSJeff Roberson * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 16435e6168fSJeff Roberson */ 16515dc847eSJeff Roberson #define SCHED_SLICE_MIN (slice_min) 16615dc847eSJeff Roberson #define SCHED_SLICE_MAX (slice_max) 16735e6168fSJeff Roberson #define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 16835e6168fSJeff Roberson #define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 169245f3abfSJeff Roberson #define SCHED_SLICE_NICE(nice) \ 17015dc847eSJeff Roberson (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_PRI_NTHRESH)) 17135e6168fSJeff Roberson 17235e6168fSJeff Roberson /* 17335e6168fSJeff Roberson * This macro determines whether or not the kse belongs on the current or 17435e6168fSJeff Roberson * next run queue. 175407b0157SJeff Roberson * 176407b0157SJeff Roberson * XXX nice value should effect how interactive a kg is. 17735e6168fSJeff Roberson */ 17815dc847eSJeff Roberson #define SCHED_INTERACTIVE(kg) \ 17915dc847eSJeff Roberson (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 180a5f099d0SJeff Roberson #define SCHED_CURR(kg, ke) \ 18115dc847eSJeff Roberson (ke->ke_thread->td_priority < PRI_MIN_TIMESHARE || SCHED_INTERACTIVE(kg)) 18235e6168fSJeff Roberson 18335e6168fSJeff Roberson /* 18435e6168fSJeff Roberson * Cpu percentage computation macros and defines. 18535e6168fSJeff Roberson * 18635e6168fSJeff Roberson * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 18735e6168fSJeff Roberson * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 18835e6168fSJeff Roberson */ 18935e6168fSJeff Roberson 1905053d272SJeff Roberson #define SCHED_CPU_TIME 10 19135e6168fSJeff Roberson #define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 19235e6168fSJeff Roberson 19335e6168fSJeff Roberson /* 19415dc847eSJeff Roberson * kseq - per processor runqs and statistics. 19535e6168fSJeff Roberson */ 19635e6168fSJeff Roberson 19715dc847eSJeff Roberson #define KSEQ_NCLASS (PRI_IDLE + 1) /* Number of run classes. */ 19815dc847eSJeff Roberson 19935e6168fSJeff Roberson struct kseq { 200a8949de2SJeff Roberson struct runq ksq_idle; /* Queue of IDLE threads. */ 20115dc847eSJeff Roberson struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 20215dc847eSJeff Roberson struct runq *ksq_next; /* Next timeshare queue. */ 20315dc847eSJeff Roberson struct runq *ksq_curr; /* Current queue. */ 20415dc847eSJeff Roberson int ksq_loads[KSEQ_NCLASS]; /* Load for each class */ 20515dc847eSJeff Roberson int ksq_load; /* Aggregate load. */ 20615dc847eSJeff Roberson short ksq_nice[PRIO_TOTAL + 1]; /* KSEs in each nice bin. */ 20715dc847eSJeff Roberson short ksq_nicemin; /* Least nice. */ 2085d7ef00cSJeff Roberson #ifdef SMP 2095d7ef00cSJeff Roberson unsigned int ksq_rslices; /* Slices on run queue */ 2105d7ef00cSJeff Roberson #endif 21135e6168fSJeff Roberson }; 21235e6168fSJeff Roberson 21335e6168fSJeff Roberson /* 21435e6168fSJeff Roberson * One kse queue per processor. 21535e6168fSJeff Roberson */ 2160a016a05SJeff Roberson #ifdef SMP 21735e6168fSJeff Roberson struct kseq kseq_cpu[MAXCPU]; 2180a016a05SJeff Roberson #define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) 2190a016a05SJeff Roberson #define KSEQ_CPU(x) (&kseq_cpu[(x)]) 2200a016a05SJeff Roberson #else 2210a016a05SJeff Roberson struct kseq kseq_cpu; 2220a016a05SJeff Roberson #define KSEQ_SELF() (&kseq_cpu) 2230a016a05SJeff Roberson #define KSEQ_CPU(x) (&kseq_cpu) 2240a016a05SJeff Roberson #endif 22535e6168fSJeff Roberson 226245f3abfSJeff Roberson static void sched_slice(struct kse *ke); 22715dc847eSJeff Roberson static void sched_priority(struct ksegrp *kg); 228e1f89c22SJeff Roberson static int sched_interact_score(struct ksegrp *kg); 22935e6168fSJeff Roberson void sched_pctcpu_update(struct kse *ke); 23035e6168fSJeff Roberson int sched_pickcpu(void); 23135e6168fSJeff Roberson 2325d7ef00cSJeff Roberson /* Operations on per processor queues */ 2330a016a05SJeff Roberson static struct kse * kseq_choose(struct kseq *kseq); 2340a016a05SJeff Roberson static void kseq_setup(struct kseq *kseq); 235a8949de2SJeff Roberson static void kseq_add(struct kseq *kseq, struct kse *ke); 23615dc847eSJeff Roberson static void kseq_rem(struct kseq *kseq, struct kse *ke); 23715dc847eSJeff Roberson static void kseq_nice_add(struct kseq *kseq, int nice); 23815dc847eSJeff Roberson static void kseq_nice_rem(struct kseq *kseq, int nice); 2397cd650a9SJeff Roberson void kseq_print(int cpu); 2405d7ef00cSJeff Roberson #ifdef SMP 2415d7ef00cSJeff Roberson struct kseq * kseq_load_highest(void); 2425d7ef00cSJeff Roberson #endif 2435d7ef00cSJeff Roberson 24415dc847eSJeff Roberson void 2457cd650a9SJeff Roberson kseq_print(int cpu) 24615dc847eSJeff Roberson { 2477cd650a9SJeff Roberson struct kseq *kseq; 24815dc847eSJeff Roberson int i; 24915dc847eSJeff Roberson 2507cd650a9SJeff Roberson kseq = KSEQ_CPU(cpu); 25115dc847eSJeff Roberson 25215dc847eSJeff Roberson printf("kseq:\n"); 25315dc847eSJeff Roberson printf("\tload: %d\n", kseq->ksq_load); 25415dc847eSJeff Roberson printf("\tload ITHD: %d\n", kseq->ksq_loads[PRI_ITHD]); 25515dc847eSJeff Roberson printf("\tload REALTIME: %d\n", kseq->ksq_loads[PRI_REALTIME]); 25615dc847eSJeff Roberson printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]); 25715dc847eSJeff Roberson printf("\tload IDLE: %d\n", kseq->ksq_loads[PRI_IDLE]); 25815dc847eSJeff Roberson printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 25915dc847eSJeff Roberson printf("\tnice counts:\n"); 26015dc847eSJeff Roberson for (i = 0; i < PRIO_TOTAL + 1; i++) 26115dc847eSJeff Roberson if (kseq->ksq_nice[i]) 26215dc847eSJeff Roberson printf("\t\t%d = %d\n", 26315dc847eSJeff Roberson i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 26415dc847eSJeff Roberson } 26515dc847eSJeff Roberson 266a8949de2SJeff Roberson static void 2675d7ef00cSJeff Roberson kseq_add(struct kseq *kseq, struct kse *ke) 2685d7ef00cSJeff Roberson { 269b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 270b5c4c4a7SJeff Roberson kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]++; 27115dc847eSJeff Roberson kseq->ksq_load++; 27215dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 27315dc847eSJeff Roberson CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 27415dc847eSJeff Roberson ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 27515dc847eSJeff Roberson ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); 27615dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 27715dc847eSJeff Roberson kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); 2785d7ef00cSJeff Roberson #ifdef SMP 2795d7ef00cSJeff Roberson kseq->ksq_rslices += ke->ke_slice; 2805d7ef00cSJeff Roberson #endif 2815d7ef00cSJeff Roberson } 28215dc847eSJeff Roberson 283a8949de2SJeff Roberson static void 2845d7ef00cSJeff Roberson kseq_rem(struct kseq *kseq, struct kse *ke) 2855d7ef00cSJeff Roberson { 286b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 287b5c4c4a7SJeff Roberson kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]--; 28815dc847eSJeff Roberson kseq->ksq_load--; 28915dc847eSJeff Roberson ke->ke_runq = NULL; 29015dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 29115dc847eSJeff Roberson kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); 2925d7ef00cSJeff Roberson #ifdef SMP 2935d7ef00cSJeff Roberson kseq->ksq_rslices -= ke->ke_slice; 2945d7ef00cSJeff Roberson #endif 2955d7ef00cSJeff Roberson } 2965d7ef00cSJeff Roberson 29715dc847eSJeff Roberson static void 29815dc847eSJeff Roberson kseq_nice_add(struct kseq *kseq, int nice) 29915dc847eSJeff Roberson { 300b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 30115dc847eSJeff Roberson /* Normalize to zero. */ 30215dc847eSJeff Roberson kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 303b90816f1SJeff Roberson if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 1) 30415dc847eSJeff Roberson kseq->ksq_nicemin = nice; 30515dc847eSJeff Roberson } 30615dc847eSJeff Roberson 30715dc847eSJeff Roberson static void 30815dc847eSJeff Roberson kseq_nice_rem(struct kseq *kseq, int nice) 30915dc847eSJeff Roberson { 31015dc847eSJeff Roberson int n; 31115dc847eSJeff Roberson 312b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 31315dc847eSJeff Roberson /* Normalize to zero. */ 31415dc847eSJeff Roberson n = nice + SCHED_PRI_NHALF; 31515dc847eSJeff Roberson kseq->ksq_nice[n]--; 31615dc847eSJeff Roberson KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 31715dc847eSJeff Roberson 31815dc847eSJeff Roberson /* 31915dc847eSJeff Roberson * If this wasn't the smallest nice value or there are more in 32015dc847eSJeff Roberson * this bucket we can just return. Otherwise we have to recalculate 32115dc847eSJeff Roberson * the smallest nice. 32215dc847eSJeff Roberson */ 32315dc847eSJeff Roberson if (nice != kseq->ksq_nicemin || 32415dc847eSJeff Roberson kseq->ksq_nice[n] != 0 || 32515dc847eSJeff Roberson kseq->ksq_loads[PRI_TIMESHARE] == 0) 32615dc847eSJeff Roberson return; 32715dc847eSJeff Roberson 32815dc847eSJeff Roberson for (; n < SCHED_PRI_NRESV + 1; n++) 32915dc847eSJeff Roberson if (kseq->ksq_nice[n]) { 33015dc847eSJeff Roberson kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 33115dc847eSJeff Roberson return; 33215dc847eSJeff Roberson } 33315dc847eSJeff Roberson } 33415dc847eSJeff Roberson 3355d7ef00cSJeff Roberson #ifdef SMP 3365d7ef00cSJeff Roberson struct kseq * 3375d7ef00cSJeff Roberson kseq_load_highest(void) 3385d7ef00cSJeff Roberson { 3395d7ef00cSJeff Roberson struct kseq *kseq; 3405d7ef00cSJeff Roberson int load; 3415d7ef00cSJeff Roberson int cpu; 3425d7ef00cSJeff Roberson int i; 3435d7ef00cSJeff Roberson 344b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 3455d7ef00cSJeff Roberson cpu = 0; 3465d7ef00cSJeff Roberson load = 0; 3475d7ef00cSJeff Roberson 3485d7ef00cSJeff Roberson for (i = 0; i < mp_maxid; i++) { 3495d7ef00cSJeff Roberson if (CPU_ABSENT(i)) 3505d7ef00cSJeff Roberson continue; 3515d7ef00cSJeff Roberson kseq = KSEQ_CPU(i); 35215dc847eSJeff Roberson if (kseq->ksq_load > load) { 35315dc847eSJeff Roberson load = kseq->ksq_load; 3545d7ef00cSJeff Roberson cpu = i; 3555d7ef00cSJeff Roberson } 3565d7ef00cSJeff Roberson } 35758177de2SJeff Roberson if (load > 1) 3585d7ef00cSJeff Roberson return (KSEQ_CPU(cpu)); 3595d7ef00cSJeff Roberson 3605d7ef00cSJeff Roberson return (NULL); 3615d7ef00cSJeff Roberson } 3625d7ef00cSJeff Roberson #endif 3635d7ef00cSJeff Roberson 3645d7ef00cSJeff Roberson struct kse * 3655d7ef00cSJeff Roberson kseq_choose(struct kseq *kseq) 3665d7ef00cSJeff Roberson { 3675d7ef00cSJeff Roberson struct kse *ke; 3685d7ef00cSJeff Roberson struct runq *swap; 3695d7ef00cSJeff Roberson 370b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 37115dc847eSJeff Roberson swap = NULL; 372a8949de2SJeff Roberson 37315dc847eSJeff Roberson for (;;) { 37415dc847eSJeff Roberson ke = runq_choose(kseq->ksq_curr); 37515dc847eSJeff Roberson if (ke == NULL) { 37615dc847eSJeff Roberson /* 37715dc847eSJeff Roberson * We already swaped once and didn't get anywhere. 37815dc847eSJeff Roberson */ 37915dc847eSJeff Roberson if (swap) 38015dc847eSJeff Roberson break; 3815d7ef00cSJeff Roberson swap = kseq->ksq_curr; 3825d7ef00cSJeff Roberson kseq->ksq_curr = kseq->ksq_next; 3835d7ef00cSJeff Roberson kseq->ksq_next = swap; 38415dc847eSJeff Roberson continue; 385a8949de2SJeff Roberson } 38615dc847eSJeff Roberson /* 38715dc847eSJeff Roberson * If we encounter a slice of 0 the kse is in a 38815dc847eSJeff Roberson * TIMESHARE kse group and its nice was too far out 38915dc847eSJeff Roberson * of the range that receives slices. 39015dc847eSJeff Roberson */ 39115dc847eSJeff Roberson if (ke->ke_slice == 0) { 39215dc847eSJeff Roberson runq_remove(ke->ke_runq, ke); 39315dc847eSJeff Roberson sched_slice(ke); 39415dc847eSJeff Roberson ke->ke_runq = kseq->ksq_next; 39515dc847eSJeff Roberson runq_add(ke->ke_runq, ke); 39615dc847eSJeff Roberson continue; 39715dc847eSJeff Roberson } 39815dc847eSJeff Roberson return (ke); 39915dc847eSJeff Roberson } 40015dc847eSJeff Roberson 401a8949de2SJeff Roberson return (runq_choose(&kseq->ksq_idle)); 402245f3abfSJeff Roberson } 4030a016a05SJeff Roberson 4040a016a05SJeff Roberson static void 4050a016a05SJeff Roberson kseq_setup(struct kseq *kseq) 4060a016a05SJeff Roberson { 40715dc847eSJeff Roberson runq_init(&kseq->ksq_timeshare[0]); 40815dc847eSJeff Roberson runq_init(&kseq->ksq_timeshare[1]); 409a8949de2SJeff Roberson runq_init(&kseq->ksq_idle); 41015dc847eSJeff Roberson 41115dc847eSJeff Roberson kseq->ksq_curr = &kseq->ksq_timeshare[0]; 41215dc847eSJeff Roberson kseq->ksq_next = &kseq->ksq_timeshare[1]; 41315dc847eSJeff Roberson 41415dc847eSJeff Roberson kseq->ksq_loads[PRI_ITHD] = 0; 41515dc847eSJeff Roberson kseq->ksq_loads[PRI_REALTIME] = 0; 41615dc847eSJeff Roberson kseq->ksq_loads[PRI_TIMESHARE] = 0; 41715dc847eSJeff Roberson kseq->ksq_loads[PRI_IDLE] = 0; 4187cd650a9SJeff Roberson kseq->ksq_load = 0; 4195d7ef00cSJeff Roberson #ifdef SMP 4205d7ef00cSJeff Roberson kseq->ksq_rslices = 0; 4215d7ef00cSJeff Roberson #endif 4220a016a05SJeff Roberson } 4230a016a05SJeff Roberson 42435e6168fSJeff Roberson static void 42535e6168fSJeff Roberson sched_setup(void *dummy) 42635e6168fSJeff Roberson { 42735e6168fSJeff Roberson int i; 42835e6168fSJeff Roberson 42915dc847eSJeff Roberson slice_min = (hz/100); 43015dc847eSJeff Roberson slice_max = (hz/10); 431e1f89c22SJeff Roberson 43235e6168fSJeff Roberson mtx_lock_spin(&sched_lock); 43335e6168fSJeff Roberson /* init kseqs */ 4340a016a05SJeff Roberson for (i = 0; i < MAXCPU; i++) 4350a016a05SJeff Roberson kseq_setup(KSEQ_CPU(i)); 43615dc847eSJeff Roberson 43715dc847eSJeff Roberson kseq_add(KSEQ_SELF(), &kse0); 43835e6168fSJeff Roberson mtx_unlock_spin(&sched_lock); 43935e6168fSJeff Roberson } 44035e6168fSJeff Roberson 44135e6168fSJeff Roberson /* 44235e6168fSJeff Roberson * Scale the scheduling priority according to the "interactivity" of this 44335e6168fSJeff Roberson * process. 44435e6168fSJeff Roberson */ 44515dc847eSJeff Roberson static void 44635e6168fSJeff Roberson sched_priority(struct ksegrp *kg) 44735e6168fSJeff Roberson { 44835e6168fSJeff Roberson int pri; 44935e6168fSJeff Roberson 45035e6168fSJeff Roberson if (kg->kg_pri_class != PRI_TIMESHARE) 45115dc847eSJeff Roberson return; 45235e6168fSJeff Roberson 45315dc847eSJeff Roberson pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 454e1f89c22SJeff Roberson pri += SCHED_PRI_BASE; 45535e6168fSJeff Roberson pri += kg->kg_nice; 45635e6168fSJeff Roberson 45735e6168fSJeff Roberson if (pri > PRI_MAX_TIMESHARE) 45835e6168fSJeff Roberson pri = PRI_MAX_TIMESHARE; 45935e6168fSJeff Roberson else if (pri < PRI_MIN_TIMESHARE) 46035e6168fSJeff Roberson pri = PRI_MIN_TIMESHARE; 46135e6168fSJeff Roberson 46235e6168fSJeff Roberson kg->kg_user_pri = pri; 46335e6168fSJeff Roberson 46415dc847eSJeff Roberson return; 46535e6168fSJeff Roberson } 46635e6168fSJeff Roberson 46735e6168fSJeff Roberson /* 468245f3abfSJeff Roberson * Calculate a time slice based on the properties of the kseg and the runq 469a8949de2SJeff Roberson * that we're on. This is only for PRI_TIMESHARE ksegrps. 47035e6168fSJeff Roberson */ 471245f3abfSJeff Roberson static void 472245f3abfSJeff Roberson sched_slice(struct kse *ke) 47335e6168fSJeff Roberson { 47415dc847eSJeff Roberson struct kseq *kseq; 475245f3abfSJeff Roberson struct ksegrp *kg; 47635e6168fSJeff Roberson 477245f3abfSJeff Roberson kg = ke->ke_ksegrp; 47815dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 47935e6168fSJeff Roberson 480245f3abfSJeff Roberson /* 481245f3abfSJeff Roberson * Rationale: 482245f3abfSJeff Roberson * KSEs in interactive ksegs get the minimum slice so that we 483245f3abfSJeff Roberson * quickly notice if it abuses its advantage. 484245f3abfSJeff Roberson * 485245f3abfSJeff Roberson * KSEs in non-interactive ksegs are assigned a slice that is 486245f3abfSJeff Roberson * based on the ksegs nice value relative to the least nice kseg 487245f3abfSJeff Roberson * on the run queue for this cpu. 488245f3abfSJeff Roberson * 489245f3abfSJeff Roberson * If the KSE is less nice than all others it gets the maximum 490245f3abfSJeff Roberson * slice and other KSEs will adjust their slice relative to 491245f3abfSJeff Roberson * this when they first expire. 492245f3abfSJeff Roberson * 493245f3abfSJeff Roberson * There is 20 point window that starts relative to the least 494245f3abfSJeff Roberson * nice kse on the run queue. Slice size is determined by 495245f3abfSJeff Roberson * the kse distance from the last nice ksegrp. 496245f3abfSJeff Roberson * 497245f3abfSJeff Roberson * If you are outside of the window you will get no slice and 498245f3abfSJeff Roberson * you will be reevaluated each time you are selected on the 499245f3abfSJeff Roberson * run queue. 500245f3abfSJeff Roberson * 501245f3abfSJeff Roberson */ 502245f3abfSJeff Roberson 50315dc847eSJeff Roberson if (!SCHED_INTERACTIVE(kg)) { 504245f3abfSJeff Roberson int nice; 505245f3abfSJeff Roberson 50615dc847eSJeff Roberson nice = kg->kg_nice + (0 - kseq->ksq_nicemin); 50715dc847eSJeff Roberson if (kseq->ksq_loads[PRI_TIMESHARE] == 0 || 50815dc847eSJeff Roberson kg->kg_nice < kseq->ksq_nicemin) 509245f3abfSJeff Roberson ke->ke_slice = SCHED_SLICE_MAX; 51015dc847eSJeff Roberson else if (nice <= SCHED_PRI_NTHRESH) 511245f3abfSJeff Roberson ke->ke_slice = SCHED_SLICE_NICE(nice); 512245f3abfSJeff Roberson else 513245f3abfSJeff Roberson ke->ke_slice = 0; 514245f3abfSJeff Roberson } else 515245f3abfSJeff Roberson ke->ke_slice = SCHED_SLICE_MIN; 51635e6168fSJeff Roberson 51715dc847eSJeff Roberson CTR6(KTR_ULE, 51815dc847eSJeff Roberson "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 51915dc847eSJeff Roberson ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, 52015dc847eSJeff Roberson kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg)); 52115dc847eSJeff Roberson 522407b0157SJeff Roberson /* 523a8949de2SJeff Roberson * Check to see if we need to scale back the slp and run time 524a8949de2SJeff Roberson * in the kg. This will cause us to forget old interactivity 525a8949de2SJeff Roberson * while maintaining the current ratio. 526407b0157SJeff Roberson */ 52715dc847eSJeff Roberson CTR4(KTR_ULE, "Slp vs Run %p (Slp %d, Run %d, Score %d)", 52815dc847eSJeff Roberson ke, kg->kg_slptime >> 10, kg->kg_runtime >> 10, 52915dc847eSJeff Roberson sched_interact_score(kg)); 53015dc847eSJeff Roberson 531407b0157SJeff Roberson if ((kg->kg_runtime + kg->kg_slptime) > SCHED_SLP_RUN_MAX) { 532407b0157SJeff Roberson kg->kg_runtime /= SCHED_SLP_RUN_THROTTLE; 533407b0157SJeff Roberson kg->kg_slptime /= SCHED_SLP_RUN_THROTTLE; 534407b0157SJeff Roberson } 53515dc847eSJeff Roberson CTR4(KTR_ULE, "Slp vs Run(2) %p (Slp %d, Run %d, Score %d)", 53615dc847eSJeff Roberson ke, kg->kg_slptime >> 10, kg->kg_runtime >> 10, 53715dc847eSJeff Roberson sched_interact_score(kg)); 538407b0157SJeff Roberson 539245f3abfSJeff Roberson return; 54035e6168fSJeff Roberson } 54135e6168fSJeff Roberson 542e1f89c22SJeff Roberson static int 543e1f89c22SJeff Roberson sched_interact_score(struct ksegrp *kg) 544e1f89c22SJeff Roberson { 545e1f89c22SJeff Roberson int big; 546e1f89c22SJeff Roberson int small; 547e1f89c22SJeff Roberson int base; 548e1f89c22SJeff Roberson 549e1f89c22SJeff Roberson if (kg->kg_runtime > kg->kg_slptime) { 550e1f89c22SJeff Roberson big = kg->kg_runtime; 551e1f89c22SJeff Roberson small = kg->kg_slptime; 552e1f89c22SJeff Roberson base = SCHED_INTERACT_HALF; 553e1f89c22SJeff Roberson } else { 554e1f89c22SJeff Roberson big = kg->kg_slptime; 555e1f89c22SJeff Roberson small = kg->kg_runtime; 556e1f89c22SJeff Roberson base = 0; 557e1f89c22SJeff Roberson } 558e1f89c22SJeff Roberson 559e1f89c22SJeff Roberson big /= SCHED_INTERACT_HALF; 560e1f89c22SJeff Roberson if (big != 0) 561e1f89c22SJeff Roberson small /= big; 562e1f89c22SJeff Roberson else 563e1f89c22SJeff Roberson small = 0; 564e1f89c22SJeff Roberson 565e1f89c22SJeff Roberson small += base; 566e1f89c22SJeff Roberson /* XXX Factor in nice */ 567e1f89c22SJeff Roberson return (small); 568e1f89c22SJeff Roberson } 569e1f89c22SJeff Roberson 57015dc847eSJeff Roberson /* 57115dc847eSJeff Roberson * This is only somewhat accurate since given many processes of the same 57215dc847eSJeff Roberson * priority they will switch when their slices run out, which will be 57315dc847eSJeff Roberson * at most SCHED_SLICE_MAX. 57415dc847eSJeff Roberson */ 57535e6168fSJeff Roberson int 57635e6168fSJeff Roberson sched_rr_interval(void) 57735e6168fSJeff Roberson { 57835e6168fSJeff Roberson return (SCHED_SLICE_MAX); 57935e6168fSJeff Roberson } 58035e6168fSJeff Roberson 58135e6168fSJeff Roberson void 58235e6168fSJeff Roberson sched_pctcpu_update(struct kse *ke) 58335e6168fSJeff Roberson { 58435e6168fSJeff Roberson /* 58535e6168fSJeff Roberson * Adjust counters and watermark for pctcpu calc. 58615dc847eSJeff Roberson * 58765c8760dSJeff Roberson * Shift the tick count out so that the divide doesn't round away 58865c8760dSJeff Roberson * our results. 58965c8760dSJeff Roberson */ 59065c8760dSJeff Roberson ke->ke_ticks <<= 10; 59135e6168fSJeff Roberson ke->ke_ticks = (ke->ke_ticks / (ke->ke_ltick - ke->ke_ftick)) * 59235e6168fSJeff Roberson SCHED_CPU_TICKS; 59365c8760dSJeff Roberson ke->ke_ticks >>= 10; 59435e6168fSJeff Roberson ke->ke_ltick = ticks; 59535e6168fSJeff Roberson ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 59635e6168fSJeff Roberson } 59735e6168fSJeff Roberson 59835e6168fSJeff Roberson #ifdef SMP 5995d7ef00cSJeff Roberson /* XXX Should be changed to kseq_load_lowest() */ 60035e6168fSJeff Roberson int 60135e6168fSJeff Roberson sched_pickcpu(void) 60235e6168fSJeff Roberson { 6030a016a05SJeff Roberson struct kseq *kseq; 60435e6168fSJeff Roberson int load; 6050a016a05SJeff Roberson int cpu; 60635e6168fSJeff Roberson int i; 60735e6168fSJeff Roberson 608b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 60935e6168fSJeff Roberson if (!smp_started) 61035e6168fSJeff Roberson return (0); 61135e6168fSJeff Roberson 6120a016a05SJeff Roberson load = 0; 6130a016a05SJeff Roberson cpu = 0; 61435e6168fSJeff Roberson 61535e6168fSJeff Roberson for (i = 0; i < mp_maxid; i++) { 61635e6168fSJeff Roberson if (CPU_ABSENT(i)) 61735e6168fSJeff Roberson continue; 6180a016a05SJeff Roberson kseq = KSEQ_CPU(i); 61915dc847eSJeff Roberson if (kseq->ksq_load < load) { 62035e6168fSJeff Roberson cpu = i; 62115dc847eSJeff Roberson load = kseq->ksq_load; 62235e6168fSJeff Roberson } 62335e6168fSJeff Roberson } 62435e6168fSJeff Roberson 62535e6168fSJeff Roberson CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu); 62635e6168fSJeff Roberson return (cpu); 62735e6168fSJeff Roberson } 62835e6168fSJeff Roberson #else 62935e6168fSJeff Roberson int 63035e6168fSJeff Roberson sched_pickcpu(void) 63135e6168fSJeff Roberson { 63235e6168fSJeff Roberson return (0); 63335e6168fSJeff Roberson } 63435e6168fSJeff Roberson #endif 63535e6168fSJeff Roberson 63635e6168fSJeff Roberson void 63735e6168fSJeff Roberson sched_prio(struct thread *td, u_char prio) 63835e6168fSJeff Roberson { 63935e6168fSJeff Roberson struct kse *ke; 64035e6168fSJeff Roberson struct runq *rq; 64135e6168fSJeff Roberson 64235e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 64335e6168fSJeff Roberson ke = td->td_kse; 64435e6168fSJeff Roberson td->td_priority = prio; 64535e6168fSJeff Roberson 64635e6168fSJeff Roberson if (TD_ON_RUNQ(td)) { 64735e6168fSJeff Roberson rq = ke->ke_runq; 64835e6168fSJeff Roberson 64935e6168fSJeff Roberson runq_remove(rq, ke); 65035e6168fSJeff Roberson runq_add(rq, ke); 65135e6168fSJeff Roberson } 65235e6168fSJeff Roberson } 65335e6168fSJeff Roberson 65435e6168fSJeff Roberson void 65535e6168fSJeff Roberson sched_switchout(struct thread *td) 65635e6168fSJeff Roberson { 65735e6168fSJeff Roberson struct kse *ke; 65835e6168fSJeff Roberson 65935e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 66035e6168fSJeff Roberson 66135e6168fSJeff Roberson ke = td->td_kse; 66235e6168fSJeff Roberson 66335e6168fSJeff Roberson td->td_last_kse = ke; 664060563ecSJulian Elischer td->td_lastcpu = td->td_oncpu; 665060563ecSJulian Elischer td->td_oncpu = NOCPU; 6664a338afdSJulian Elischer td->td_flags &= ~TDF_NEEDRESCHED; 66735e6168fSJeff Roberson 66835e6168fSJeff Roberson if (TD_IS_RUNNING(td)) { 66915dc847eSJeff Roberson runq_add(ke->ke_runq, ke); 67015dc847eSJeff Roberson /* setrunqueue(td); */ 67135e6168fSJeff Roberson return; 672e1f89c22SJeff Roberson } 67315dc847eSJeff Roberson if (ke->ke_runq) 67415dc847eSJeff Roberson kseq_rem(KSEQ_CPU(ke->ke_cpu), ke); 67535e6168fSJeff Roberson /* 67635e6168fSJeff Roberson * We will not be on the run queue. So we must be 67735e6168fSJeff Roberson * sleeping or similar. 67835e6168fSJeff Roberson */ 679ac2e4153SJulian Elischer if (td->td_proc->p_flag & P_THREADED) 68035e6168fSJeff Roberson kse_reassign(ke); 68135e6168fSJeff Roberson } 68235e6168fSJeff Roberson 68335e6168fSJeff Roberson void 68435e6168fSJeff Roberson sched_switchin(struct thread *td) 68535e6168fSJeff Roberson { 68635e6168fSJeff Roberson /* struct kse *ke = td->td_kse; */ 68735e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 68835e6168fSJeff Roberson 689060563ecSJulian Elischer td->td_oncpu = PCPU_GET(cpuid); 69015dc847eSJeff Roberson 69135e6168fSJeff Roberson if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE && 69235e6168fSJeff Roberson td->td_priority != td->td_ksegrp->kg_user_pri) 6934a338afdSJulian Elischer curthread->td_flags |= TDF_NEEDRESCHED; 69435e6168fSJeff Roberson } 69535e6168fSJeff Roberson 69635e6168fSJeff Roberson void 69735e6168fSJeff Roberson sched_nice(struct ksegrp *kg, int nice) 69835e6168fSJeff Roberson { 69915dc847eSJeff Roberson struct kse *ke; 70035e6168fSJeff Roberson struct thread *td; 70115dc847eSJeff Roberson struct kseq *kseq; 70235e6168fSJeff Roberson 7030b5318c8SJohn Baldwin PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); 7040b5318c8SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 70515dc847eSJeff Roberson /* 70615dc847eSJeff Roberson * We need to adjust the nice counts for running KSEs. 70715dc847eSJeff Roberson */ 70815dc847eSJeff Roberson if (kg->kg_pri_class == PRI_TIMESHARE) 70915dc847eSJeff Roberson FOREACH_KSE_IN_GROUP(kg, ke) { 71015dc847eSJeff Roberson if (ke->ke_state != KES_ONRUNQ && 71115dc847eSJeff Roberson ke->ke_state != KES_THREAD) 71215dc847eSJeff Roberson continue; 71315dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 71415dc847eSJeff Roberson kseq_nice_rem(kseq, kg->kg_nice); 71515dc847eSJeff Roberson kseq_nice_add(kseq, nice); 71615dc847eSJeff Roberson } 71735e6168fSJeff Roberson kg->kg_nice = nice; 71835e6168fSJeff Roberson sched_priority(kg); 71915dc847eSJeff Roberson FOREACH_THREAD_IN_GROUP(kg, td) 7204a338afdSJulian Elischer td->td_flags |= TDF_NEEDRESCHED; 72135e6168fSJeff Roberson } 72235e6168fSJeff Roberson 72335e6168fSJeff Roberson void 72435e6168fSJeff Roberson sched_sleep(struct thread *td, u_char prio) 72535e6168fSJeff Roberson { 72635e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 72735e6168fSJeff Roberson 72835e6168fSJeff Roberson td->td_slptime = ticks; 72935e6168fSJeff Roberson td->td_priority = prio; 73035e6168fSJeff Roberson 73115dc847eSJeff Roberson CTR2(KTR_ULE, "sleep kse %p (tick: %d)", 73215dc847eSJeff Roberson td->td_kse, td->td_slptime); 73335e6168fSJeff Roberson } 73435e6168fSJeff Roberson 73535e6168fSJeff Roberson void 73635e6168fSJeff Roberson sched_wakeup(struct thread *td) 73735e6168fSJeff Roberson { 73835e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 73935e6168fSJeff Roberson 74035e6168fSJeff Roberson /* 74135e6168fSJeff Roberson * Let the kseg know how long we slept for. This is because process 74235e6168fSJeff Roberson * interactivity behavior is modeled in the kseg. 74335e6168fSJeff Roberson */ 74435e6168fSJeff Roberson if (td->td_slptime) { 745f1e8dc4aSJeff Roberson struct ksegrp *kg; 74615dc847eSJeff Roberson int hzticks; 747f1e8dc4aSJeff Roberson 748f1e8dc4aSJeff Roberson kg = td->td_ksegrp; 74915dc847eSJeff Roberson hzticks = ticks - td->td_slptime; 75015dc847eSJeff Roberson kg->kg_slptime += hzticks << 10; 751f1e8dc4aSJeff Roberson sched_priority(kg); 75215dc847eSJeff Roberson CTR2(KTR_ULE, "wakeup kse %p (%d ticks)", 75315dc847eSJeff Roberson td->td_kse, hzticks); 75435e6168fSJeff Roberson td->td_slptime = 0; 755f1e8dc4aSJeff Roberson } 75635e6168fSJeff Roberson setrunqueue(td); 75735e6168fSJeff Roberson if (td->td_priority < curthread->td_priority) 7584a338afdSJulian Elischer curthread->td_flags |= TDF_NEEDRESCHED; 75935e6168fSJeff Roberson } 76035e6168fSJeff Roberson 76135e6168fSJeff Roberson /* 76235e6168fSJeff Roberson * Penalize the parent for creating a new child and initialize the child's 76335e6168fSJeff Roberson * priority. 76435e6168fSJeff Roberson */ 76535e6168fSJeff Roberson void 76615dc847eSJeff Roberson sched_fork(struct proc *p, struct proc *p1) 76735e6168fSJeff Roberson { 76835e6168fSJeff Roberson 76935e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 77035e6168fSJeff Roberson 77115dc847eSJeff Roberson sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 77215dc847eSJeff Roberson sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 77315dc847eSJeff Roberson sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 77415dc847eSJeff Roberson } 77515dc847eSJeff Roberson 77615dc847eSJeff Roberson void 77715dc847eSJeff Roberson sched_fork_kse(struct kse *ke, struct kse *child) 77815dc847eSJeff Roberson { 7792056d0a1SJohn Baldwin 78015dc847eSJeff Roberson child->ke_slice = ke->ke_slice; 78115dc847eSJeff Roberson child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */ 78215dc847eSJeff Roberson child->ke_runq = NULL; 78315dc847eSJeff Roberson 78415dc847eSJeff Roberson /* 78515dc847eSJeff Roberson * Claim that we've been running for one second for statistical 78615dc847eSJeff Roberson * purposes. 78715dc847eSJeff Roberson */ 78815dc847eSJeff Roberson child->ke_ticks = 0; 78915dc847eSJeff Roberson child->ke_ltick = ticks; 79015dc847eSJeff Roberson child->ke_ftick = ticks - hz; 79115dc847eSJeff Roberson } 79215dc847eSJeff Roberson 79315dc847eSJeff Roberson void 79415dc847eSJeff Roberson sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 79515dc847eSJeff Roberson { 7962056d0a1SJohn Baldwin 7972056d0a1SJohn Baldwin PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED); 79835e6168fSJeff Roberson /* XXX Need something better here */ 799407b0157SJeff Roberson if (kg->kg_slptime > kg->kg_runtime) { 800e1f89c22SJeff Roberson child->kg_slptime = SCHED_DYN_RANGE; 801e1f89c22SJeff Roberson child->kg_runtime = kg->kg_slptime / SCHED_DYN_RANGE; 802407b0157SJeff Roberson } else { 803e1f89c22SJeff Roberson child->kg_runtime = SCHED_DYN_RANGE; 804e1f89c22SJeff Roberson child->kg_slptime = kg->kg_runtime / SCHED_DYN_RANGE; 805407b0157SJeff Roberson } 80615dc847eSJeff Roberson 80735e6168fSJeff Roberson child->kg_user_pri = kg->kg_user_pri; 80815dc847eSJeff Roberson child->kg_nice = kg->kg_nice; 809c9f25d8fSJeff Roberson } 810c9f25d8fSJeff Roberson 81115dc847eSJeff Roberson void 81215dc847eSJeff Roberson sched_fork_thread(struct thread *td, struct thread *child) 81315dc847eSJeff Roberson { 81415dc847eSJeff Roberson } 81515dc847eSJeff Roberson 81615dc847eSJeff Roberson void 81715dc847eSJeff Roberson sched_class(struct ksegrp *kg, int class) 81815dc847eSJeff Roberson { 81915dc847eSJeff Roberson struct kseq *kseq; 82015dc847eSJeff Roberson struct kse *ke; 82115dc847eSJeff Roberson 8222056d0a1SJohn Baldwin mtx_assert(&sched_lock, MA_OWNED); 82315dc847eSJeff Roberson if (kg->kg_pri_class == class) 82415dc847eSJeff Roberson return; 82515dc847eSJeff Roberson 82615dc847eSJeff Roberson FOREACH_KSE_IN_GROUP(kg, ke) { 82715dc847eSJeff Roberson if (ke->ke_state != KES_ONRUNQ && 82815dc847eSJeff Roberson ke->ke_state != KES_THREAD) 82915dc847eSJeff Roberson continue; 83015dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 83115dc847eSJeff Roberson 832b5c4c4a7SJeff Roberson kseq->ksq_loads[PRI_BASE(kg->kg_pri_class)]--; 833b5c4c4a7SJeff Roberson kseq->ksq_loads[PRI_BASE(class)]++; 83415dc847eSJeff Roberson 83515dc847eSJeff Roberson if (kg->kg_pri_class == PRI_TIMESHARE) 83615dc847eSJeff Roberson kseq_nice_rem(kseq, kg->kg_nice); 83715dc847eSJeff Roberson else if (class == PRI_TIMESHARE) 83815dc847eSJeff Roberson kseq_nice_add(kseq, kg->kg_nice); 83915dc847eSJeff Roberson } 84015dc847eSJeff Roberson 84115dc847eSJeff Roberson kg->kg_pri_class = class; 84235e6168fSJeff Roberson } 84335e6168fSJeff Roberson 84435e6168fSJeff Roberson /* 84535e6168fSJeff Roberson * Return some of the child's priority and interactivity to the parent. 84635e6168fSJeff Roberson */ 84735e6168fSJeff Roberson void 84815dc847eSJeff Roberson sched_exit(struct proc *p, struct proc *child) 84935e6168fSJeff Roberson { 85035e6168fSJeff Roberson /* XXX Need something better here */ 85135e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 852141ad61cSJeff Roberson sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child)); 853141ad61cSJeff Roberson } 854141ad61cSJeff Roberson 855141ad61cSJeff Roberson void 856141ad61cSJeff Roberson sched_exit_kse(struct kse *ke, struct kse *child) 857141ad61cSJeff Roberson { 858141ad61cSJeff Roberson kseq_rem(KSEQ_CPU(child->ke_cpu), child); 859141ad61cSJeff Roberson } 860141ad61cSJeff Roberson 861141ad61cSJeff Roberson void 862141ad61cSJeff Roberson sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 863141ad61cSJeff Roberson { 864141ad61cSJeff Roberson } 865141ad61cSJeff Roberson 866141ad61cSJeff Roberson void 867141ad61cSJeff Roberson sched_exit_thread(struct thread *td, struct thread *child) 868141ad61cSJeff Roberson { 86935e6168fSJeff Roberson } 87035e6168fSJeff Roberson 87135e6168fSJeff Roberson void 87215dc847eSJeff Roberson sched_clock(struct kse *ke) 87335e6168fSJeff Roberson { 87435e6168fSJeff Roberson struct kseq *kseq; 8750a016a05SJeff Roberson struct ksegrp *kg; 87615dc847eSJeff Roberson struct thread *td; 87715dc847eSJeff Roberson #if 0 87815dc847eSJeff Roberson struct kse *nke; 87915dc847eSJeff Roberson #endif 88035e6168fSJeff Roberson 88115dc847eSJeff Roberson /* 88215dc847eSJeff Roberson * sched_setup() apparently happens prior to stathz being set. We 88315dc847eSJeff Roberson * need to resolve the timers earlier in the boot so we can avoid 88415dc847eSJeff Roberson * calculating this here. 88515dc847eSJeff Roberson */ 88615dc847eSJeff Roberson if (realstathz == 0) { 88715dc847eSJeff Roberson realstathz = stathz ? stathz : hz; 88815dc847eSJeff Roberson tickincr = hz / realstathz; 88915dc847eSJeff Roberson /* 89015dc847eSJeff Roberson * XXX This does not work for values of stathz that are much 89115dc847eSJeff Roberson * larger than hz. 89215dc847eSJeff Roberson */ 89315dc847eSJeff Roberson if (tickincr == 0) 89415dc847eSJeff Roberson tickincr = 1; 89515dc847eSJeff Roberson } 89635e6168fSJeff Roberson 89715dc847eSJeff Roberson td = ke->ke_thread; 89815dc847eSJeff Roberson kg = ke->ke_ksegrp; 89935e6168fSJeff Roberson 9000a016a05SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 9010a016a05SJeff Roberson KASSERT((td != NULL), ("schedclock: null thread pointer")); 9020a016a05SJeff Roberson 9030a016a05SJeff Roberson /* Adjust ticks for pctcpu */ 90465c8760dSJeff Roberson ke->ke_ticks++; 905d465fb95SJeff Roberson ke->ke_ltick = ticks; 906a8949de2SJeff Roberson 907d465fb95SJeff Roberson /* Go up to one second beyond our max and then trim back down */ 908d465fb95SJeff Roberson if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 909d465fb95SJeff Roberson sched_pctcpu_update(ke); 910d465fb95SJeff Roberson 91143fdafb1SJulian Elischer if (td->td_flags & TDF_IDLETD) 91235e6168fSJeff Roberson return; 9130a016a05SJeff Roberson 91415dc847eSJeff Roberson CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)", 91515dc847eSJeff Roberson ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 916c9f25d8fSJeff Roberson 91735e6168fSJeff Roberson /* 918a8949de2SJeff Roberson * We only do slicing code for TIMESHARE ksegrps. 919a8949de2SJeff Roberson */ 920a8949de2SJeff Roberson if (kg->kg_pri_class != PRI_TIMESHARE) 921a8949de2SJeff Roberson return; 922a8949de2SJeff Roberson /* 92315dc847eSJeff Roberson * Check for a higher priority task on the run queue. This can happen 92415dc847eSJeff Roberson * on SMP if another processor woke up a process on our runq. 92535e6168fSJeff Roberson */ 92615dc847eSJeff Roberson kseq = KSEQ_SELF(); 92715dc847eSJeff Roberson #if 0 92815dc847eSJeff Roberson if (kseq->ksq_load > 1 && (nke = kseq_choose(kseq)) != NULL) { 92915dc847eSJeff Roberson if (sched_strict && 93015dc847eSJeff Roberson nke->ke_thread->td_priority < td->td_priority) 93115dc847eSJeff Roberson td->td_flags |= TDF_NEEDRESCHED; 93215dc847eSJeff Roberson else if (nke->ke_thread->td_priority < 93315dc847eSJeff Roberson td->td_priority SCHED_PRIO_SLOP) 93415dc847eSJeff Roberson 93515dc847eSJeff Roberson if (nke->ke_thread->td_priority < td->td_priority) 93615dc847eSJeff Roberson td->td_flags |= TDF_NEEDRESCHED; 93715dc847eSJeff Roberson } 93815dc847eSJeff Roberson #endif 93915dc847eSJeff Roberson /* 94015dc847eSJeff Roberson * We used a tick charge it to the ksegrp so that we can compute our 94115dc847eSJeff Roberson * interactivity. 94215dc847eSJeff Roberson */ 94315dc847eSJeff Roberson kg->kg_runtime += tickincr << 10; 944407b0157SJeff Roberson 94535e6168fSJeff Roberson /* 94635e6168fSJeff Roberson * We used up one time slice. 94735e6168fSJeff Roberson */ 94835e6168fSJeff Roberson ke->ke_slice--; 94915dc847eSJeff Roberson #ifdef SMP 950c36ccfa2SJeff Roberson kseq->ksq_rslices--; 95115dc847eSJeff Roberson #endif 95215dc847eSJeff Roberson 95315dc847eSJeff Roberson if (ke->ke_slice > 0) 95415dc847eSJeff Roberson return; 95535e6168fSJeff Roberson /* 95615dc847eSJeff Roberson * We're out of time, recompute priorities and requeue. 95735e6168fSJeff Roberson */ 95815dc847eSJeff Roberson kseq_rem(kseq, ke); 959e1f89c22SJeff Roberson sched_priority(kg); 96015dc847eSJeff Roberson sched_slice(ke); 96115dc847eSJeff Roberson if (SCHED_CURR(kg, ke)) 96215dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 96315dc847eSJeff Roberson else 96415dc847eSJeff Roberson ke->ke_runq = kseq->ksq_next; 96515dc847eSJeff Roberson kseq_add(kseq, ke); 9664a338afdSJulian Elischer td->td_flags |= TDF_NEEDRESCHED; 96735e6168fSJeff Roberson } 96835e6168fSJeff Roberson 96935e6168fSJeff Roberson int 97035e6168fSJeff Roberson sched_runnable(void) 97135e6168fSJeff Roberson { 97235e6168fSJeff Roberson struct kseq *kseq; 973b90816f1SJeff Roberson int load; 97435e6168fSJeff Roberson 975b90816f1SJeff Roberson load = 1; 976b90816f1SJeff Roberson 977b90816f1SJeff Roberson mtx_lock_spin(&sched_lock); 9780a016a05SJeff Roberson kseq = KSEQ_SELF(); 97935e6168fSJeff Roberson 98015dc847eSJeff Roberson if (kseq->ksq_load) 981b90816f1SJeff Roberson goto out; 982c9f25d8fSJeff Roberson #ifdef SMP 9830a016a05SJeff Roberson /* 9840a016a05SJeff Roberson * For SMP we may steal other processor's KSEs. Just search until we 9850a016a05SJeff Roberson * verify that at least on other cpu has a runnable task. 9860a016a05SJeff Roberson */ 987c9f25d8fSJeff Roberson if (smp_started) { 988c9f25d8fSJeff Roberson int i; 989c9f25d8fSJeff Roberson 990c9f25d8fSJeff Roberson for (i = 0; i < mp_maxid; i++) { 991c9f25d8fSJeff Roberson if (CPU_ABSENT(i)) 992c9f25d8fSJeff Roberson continue; 9930a016a05SJeff Roberson kseq = KSEQ_CPU(i); 9947cd650a9SJeff Roberson if (kseq->ksq_load > 1) 995b90816f1SJeff Roberson goto out; 996c9f25d8fSJeff Roberson } 997c9f25d8fSJeff Roberson } 998c9f25d8fSJeff Roberson #endif 999b90816f1SJeff Roberson load = 0; 1000b90816f1SJeff Roberson out: 1001b90816f1SJeff Roberson mtx_unlock_spin(&sched_lock); 1002b90816f1SJeff Roberson return (load); 100335e6168fSJeff Roberson } 100435e6168fSJeff Roberson 100535e6168fSJeff Roberson void 100635e6168fSJeff Roberson sched_userret(struct thread *td) 100735e6168fSJeff Roberson { 100835e6168fSJeff Roberson struct ksegrp *kg; 100935e6168fSJeff Roberson 101035e6168fSJeff Roberson kg = td->td_ksegrp; 101135e6168fSJeff Roberson 101235e6168fSJeff Roberson if (td->td_priority != kg->kg_user_pri) { 101335e6168fSJeff Roberson mtx_lock_spin(&sched_lock); 101435e6168fSJeff Roberson td->td_priority = kg->kg_user_pri; 101535e6168fSJeff Roberson mtx_unlock_spin(&sched_lock); 101635e6168fSJeff Roberson } 101735e6168fSJeff Roberson } 101835e6168fSJeff Roberson 1019c9f25d8fSJeff Roberson struct kse * 1020c9f25d8fSJeff Roberson sched_choose(void) 1021c9f25d8fSJeff Roberson { 10220a016a05SJeff Roberson struct kseq *kseq; 1023c9f25d8fSJeff Roberson struct kse *ke; 102415dc847eSJeff Roberson 1025b90816f1SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 102615dc847eSJeff Roberson #ifdef SMP 1027245f3abfSJeff Roberson retry: 102815dc847eSJeff Roberson #endif 1029c36ccfa2SJeff Roberson kseq = KSEQ_SELF(); 10300a016a05SJeff Roberson ke = kseq_choose(kseq); 103135e6168fSJeff Roberson if (ke) { 103215dc847eSJeff Roberson runq_remove(ke->ke_runq, ke); 103335e6168fSJeff Roberson ke->ke_state = KES_THREAD; 1034245f3abfSJeff Roberson 103515dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 103615dc847eSJeff Roberson CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)", 103715dc847eSJeff Roberson ke, ke->ke_runq, ke->ke_slice, 103815dc847eSJeff Roberson ke->ke_thread->td_priority); 1039245f3abfSJeff Roberson } 104015dc847eSJeff Roberson return (ke); 104135e6168fSJeff Roberson } 104235e6168fSJeff Roberson 1043c9f25d8fSJeff Roberson #ifdef SMP 1044c36ccfa2SJeff Roberson if (smp_started) { 1045c9f25d8fSJeff Roberson /* 1046c9f25d8fSJeff Roberson * Find the cpu with the highest load and steal one proc. 1047c9f25d8fSJeff Roberson */ 1048c36ccfa2SJeff Roberson if ((kseq = kseq_load_highest()) == NULL) 1049c36ccfa2SJeff Roberson return (NULL); 1050c36ccfa2SJeff Roberson 1051c36ccfa2SJeff Roberson /* 1052c36ccfa2SJeff Roberson * Remove this kse from this kseq and runq and then requeue 1053c36ccfa2SJeff Roberson * on the current processor. Then we will dequeue it 1054c36ccfa2SJeff Roberson * normally above. 1055c36ccfa2SJeff Roberson */ 1056c36ccfa2SJeff Roberson ke = kseq_choose(kseq); 1057c36ccfa2SJeff Roberson runq_remove(ke->ke_runq, ke); 1058c36ccfa2SJeff Roberson ke->ke_state = KES_THREAD; 1059c36ccfa2SJeff Roberson kseq_rem(kseq, ke); 1060c36ccfa2SJeff Roberson 1061c36ccfa2SJeff Roberson ke->ke_cpu = PCPU_GET(cpuid); 1062c36ccfa2SJeff Roberson sched_add(ke); 106315dc847eSJeff Roberson goto retry; 1064c9f25d8fSJeff Roberson } 1065c9f25d8fSJeff Roberson #endif 106615dc847eSJeff Roberson 106715dc847eSJeff Roberson return (NULL); 106835e6168fSJeff Roberson } 106935e6168fSJeff Roberson 107035e6168fSJeff Roberson void 107135e6168fSJeff Roberson sched_add(struct kse *ke) 107235e6168fSJeff Roberson { 1073c9f25d8fSJeff Roberson struct kseq *kseq; 107415dc847eSJeff Roberson struct ksegrp *kg; 1075c9f25d8fSJeff Roberson 10765d7ef00cSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 10775d7ef00cSJeff Roberson KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 10785d7ef00cSJeff Roberson KASSERT((ke->ke_thread->td_kse != NULL), 10795d7ef00cSJeff Roberson ("sched_add: No KSE on thread")); 10805d7ef00cSJeff Roberson KASSERT(ke->ke_state != KES_ONRUNQ, 10815d7ef00cSJeff Roberson ("sched_add: kse %p (%s) already in run queue", ke, 10825d7ef00cSJeff Roberson ke->ke_proc->p_comm)); 10835d7ef00cSJeff Roberson KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 10845d7ef00cSJeff Roberson ("sched_add: process swapped out")); 10859bca28a7SJeff Roberson KASSERT(ke->ke_runq == NULL, 10869bca28a7SJeff Roberson ("sched_add: KSE %p is still assigned to a run queue", ke)); 10875d7ef00cSJeff Roberson 108815dc847eSJeff Roberson kg = ke->ke_ksegrp; 108915dc847eSJeff Roberson 1090b5c4c4a7SJeff Roberson switch (PRI_BASE(kg->kg_pri_class)) { 1091a8949de2SJeff Roberson case PRI_ITHD: 1092a8949de2SJeff Roberson case PRI_REALTIME: 1093a6ed4186SJeff Roberson kseq = KSEQ_SELF(); 109415dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 109515dc847eSJeff Roberson ke->ke_slice = SCHED_SLICE_MAX; 10967cd650a9SJeff Roberson ke->ke_cpu = PCPU_GET(cpuid); 1097a8949de2SJeff Roberson break; 1098a8949de2SJeff Roberson case PRI_TIMESHARE: 1099a8949de2SJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 110015dc847eSJeff Roberson if (SCHED_CURR(kg, ke)) 110115dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 110215dc847eSJeff Roberson else 110315dc847eSJeff Roberson ke->ke_runq = kseq->ksq_next; 110415dc847eSJeff Roberson break; 110515dc847eSJeff Roberson case PRI_IDLE: 110615dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 110715dc847eSJeff Roberson /* 110815dc847eSJeff Roberson * This is for priority prop. 110915dc847eSJeff Roberson */ 111015dc847eSJeff Roberson if (ke->ke_thread->td_priority < PRI_MAX_TIMESHARE) 111115dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 111215dc847eSJeff Roberson else 111315dc847eSJeff Roberson ke->ke_runq = &kseq->ksq_idle; 111415dc847eSJeff Roberson ke->ke_slice = SCHED_SLICE_MIN; 111515dc847eSJeff Roberson break; 111615dc847eSJeff Roberson default: 111715dc847eSJeff Roberson panic("Unknown pri class.\n"); 1118a8949de2SJeff Roberson break; 1119a6ed4186SJeff Roberson } 1120a8949de2SJeff Roberson 112135e6168fSJeff Roberson ke->ke_ksegrp->kg_runq_kses++; 112235e6168fSJeff Roberson ke->ke_state = KES_ONRUNQ; 112335e6168fSJeff Roberson 112415dc847eSJeff Roberson runq_add(ke->ke_runq, ke); 11259bca28a7SJeff Roberson kseq_add(kseq, ke); 112635e6168fSJeff Roberson } 112735e6168fSJeff Roberson 112835e6168fSJeff Roberson void 112935e6168fSJeff Roberson sched_rem(struct kse *ke) 113035e6168fSJeff Roberson { 113115dc847eSJeff Roberson struct kseq *kseq; 113215dc847eSJeff Roberson 113335e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 11349bca28a7SJeff Roberson KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 113535e6168fSJeff Roberson 113635e6168fSJeff Roberson ke->ke_state = KES_THREAD; 113735e6168fSJeff Roberson ke->ke_ksegrp->kg_runq_kses--; 113815dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 113915dc847eSJeff Roberson runq_remove(ke->ke_runq, ke); 114015dc847eSJeff Roberson kseq_rem(kseq, ke); 114135e6168fSJeff Roberson } 114235e6168fSJeff Roberson 114335e6168fSJeff Roberson fixpt_t 114435e6168fSJeff Roberson sched_pctcpu(struct kse *ke) 114535e6168fSJeff Roberson { 114635e6168fSJeff Roberson fixpt_t pctcpu; 114735e6168fSJeff Roberson 114835e6168fSJeff Roberson pctcpu = 0; 114935e6168fSJeff Roberson 1150b90816f1SJeff Roberson mtx_lock_spin(&sched_lock); 115135e6168fSJeff Roberson if (ke->ke_ticks) { 115235e6168fSJeff Roberson int rtick; 115335e6168fSJeff Roberson 115435e6168fSJeff Roberson /* Update to account for time potentially spent sleeping */ 115535e6168fSJeff Roberson ke->ke_ltick = ticks; 115635e6168fSJeff Roberson sched_pctcpu_update(ke); 115735e6168fSJeff Roberson 115835e6168fSJeff Roberson /* How many rtick per second ? */ 115965c8760dSJeff Roberson rtick = ke->ke_ticks / SCHED_CPU_TIME; 11607121cce5SScott Long pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 116135e6168fSJeff Roberson } 116235e6168fSJeff Roberson 116335e6168fSJeff Roberson ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1164828e7683SJohn Baldwin mtx_unlock_spin(&sched_lock); 116535e6168fSJeff Roberson 116635e6168fSJeff Roberson return (pctcpu); 116735e6168fSJeff Roberson } 116835e6168fSJeff Roberson 116935e6168fSJeff Roberson int 117035e6168fSJeff Roberson sched_sizeof_kse(void) 117135e6168fSJeff Roberson { 117235e6168fSJeff Roberson return (sizeof(struct kse) + sizeof(struct ke_sched)); 117335e6168fSJeff Roberson } 117435e6168fSJeff Roberson 117535e6168fSJeff Roberson int 117635e6168fSJeff Roberson sched_sizeof_ksegrp(void) 117735e6168fSJeff Roberson { 117835e6168fSJeff Roberson return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 117935e6168fSJeff Roberson } 118035e6168fSJeff Roberson 118135e6168fSJeff Roberson int 118235e6168fSJeff Roberson sched_sizeof_proc(void) 118335e6168fSJeff Roberson { 118435e6168fSJeff Roberson return (sizeof(struct proc)); 118535e6168fSJeff Roberson } 118635e6168fSJeff Roberson 118735e6168fSJeff Roberson int 118835e6168fSJeff Roberson sched_sizeof_thread(void) 118935e6168fSJeff Roberson { 119035e6168fSJeff Roberson return (sizeof(struct thread) + sizeof(struct td_sched)); 119135e6168fSJeff Roberson } 1192