135e6168fSJeff Roberson /*- 215dc847eSJeff Roberson * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 335e6168fSJeff Roberson * All rights reserved. 435e6168fSJeff Roberson * 535e6168fSJeff Roberson * Redistribution and use in source and binary forms, with or without 635e6168fSJeff Roberson * modification, are permitted provided that the following conditions 735e6168fSJeff Roberson * are met: 835e6168fSJeff Roberson * 1. Redistributions of source code must retain the above copyright 935e6168fSJeff Roberson * notice unmodified, this list of conditions, and the following 1035e6168fSJeff Roberson * disclaimer. 1135e6168fSJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 1235e6168fSJeff Roberson * notice, this list of conditions and the following disclaimer in the 1335e6168fSJeff Roberson * documentation and/or other materials provided with the distribution. 1435e6168fSJeff Roberson * 1535e6168fSJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1635e6168fSJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1735e6168fSJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1835e6168fSJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1935e6168fSJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2035e6168fSJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2135e6168fSJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2235e6168fSJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2335e6168fSJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2435e6168fSJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2535e6168fSJeff Roberson * 2635e6168fSJeff Roberson * $FreeBSD$ 2735e6168fSJeff Roberson */ 2835e6168fSJeff Roberson 2935e6168fSJeff Roberson #include <sys/param.h> 3035e6168fSJeff Roberson #include <sys/systm.h> 3135e6168fSJeff Roberson #include <sys/kernel.h> 3235e6168fSJeff Roberson #include <sys/ktr.h> 3335e6168fSJeff Roberson #include <sys/lock.h> 3435e6168fSJeff Roberson #include <sys/mutex.h> 3535e6168fSJeff Roberson #include <sys/proc.h> 36245f3abfSJeff Roberson #include <sys/resource.h> 3735e6168fSJeff Roberson #include <sys/sched.h> 3835e6168fSJeff Roberson #include <sys/smp.h> 3935e6168fSJeff Roberson #include <sys/sx.h> 4035e6168fSJeff Roberson #include <sys/sysctl.h> 4135e6168fSJeff Roberson #include <sys/sysproto.h> 4235e6168fSJeff Roberson #include <sys/vmmeter.h> 4335e6168fSJeff Roberson #ifdef DDB 4435e6168fSJeff Roberson #include <ddb/ddb.h> 4535e6168fSJeff Roberson #endif 4635e6168fSJeff Roberson #ifdef KTRACE 4735e6168fSJeff Roberson #include <sys/uio.h> 4835e6168fSJeff Roberson #include <sys/ktrace.h> 4935e6168fSJeff Roberson #endif 5035e6168fSJeff Roberson 5135e6168fSJeff Roberson #include <machine/cpu.h> 5235e6168fSJeff Roberson 5315dc847eSJeff Roberson #define KTR_ULE KTR_NFS 5415dc847eSJeff Roberson 5535e6168fSJeff Roberson /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 5635e6168fSJeff Roberson /* XXX This is bogus compatability crap for ps */ 5735e6168fSJeff Roberson static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 5835e6168fSJeff Roberson SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 5935e6168fSJeff Roberson 6035e6168fSJeff Roberson static void sched_setup(void *dummy); 6135e6168fSJeff Roberson SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 6235e6168fSJeff Roberson 6315dc847eSJeff Roberson static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED"); 64e1f89c22SJeff Roberson 6515dc847eSJeff Roberson static int sched_strict; 6615dc847eSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, ""); 6715dc847eSJeff Roberson 6815dc847eSJeff Roberson static int slice_min = 1; 6915dc847eSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 7015dc847eSJeff Roberson 7115dc847eSJeff Roberson static int slice_max = 2; 7215dc847eSJeff Roberson SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 7315dc847eSJeff Roberson 7415dc847eSJeff Roberson int realstathz; 7515dc847eSJeff Roberson int tickincr = 1; 76783caefbSJeff Roberson 7735e6168fSJeff Roberson /* 7835e6168fSJeff Roberson * These datastructures are allocated within their parent datastructure but 7935e6168fSJeff Roberson * are scheduler specific. 8035e6168fSJeff Roberson */ 8135e6168fSJeff Roberson 8235e6168fSJeff Roberson struct ke_sched { 8335e6168fSJeff Roberson int ske_slice; 8435e6168fSJeff Roberson struct runq *ske_runq; 8535e6168fSJeff Roberson /* The following variables are only used for pctcpu calculation */ 8635e6168fSJeff Roberson int ske_ltick; /* Last tick that we were running on */ 8735e6168fSJeff Roberson int ske_ftick; /* First tick that we were running on */ 8835e6168fSJeff Roberson int ske_ticks; /* Tick count */ 8915dc847eSJeff Roberson /* CPU that we have affinity for. */ 90cd6e33dfSJeff Roberson u_char ske_cpu; 9135e6168fSJeff Roberson }; 9235e6168fSJeff Roberson #define ke_slice ke_sched->ske_slice 9335e6168fSJeff Roberson #define ke_runq ke_sched->ske_runq 9435e6168fSJeff Roberson #define ke_ltick ke_sched->ske_ltick 9535e6168fSJeff Roberson #define ke_ftick ke_sched->ske_ftick 9635e6168fSJeff Roberson #define ke_ticks ke_sched->ske_ticks 97cd6e33dfSJeff Roberson #define ke_cpu ke_sched->ske_cpu 9835e6168fSJeff Roberson 9935e6168fSJeff Roberson struct kg_sched { 100407b0157SJeff Roberson int skg_slptime; /* Number of ticks we vol. slept */ 101407b0157SJeff Roberson int skg_runtime; /* Number of ticks we were running */ 10235e6168fSJeff Roberson }; 10335e6168fSJeff Roberson #define kg_slptime kg_sched->skg_slptime 104407b0157SJeff Roberson #define kg_runtime kg_sched->skg_runtime 10535e6168fSJeff Roberson 10635e6168fSJeff Roberson struct td_sched { 10735e6168fSJeff Roberson int std_slptime; 10835e6168fSJeff Roberson }; 10935e6168fSJeff Roberson #define td_slptime td_sched->std_slptime 11035e6168fSJeff Roberson 1115d7ef00cSJeff Roberson struct td_sched td_sched; 11235e6168fSJeff Roberson struct ke_sched ke_sched; 11335e6168fSJeff Roberson struct kg_sched kg_sched; 11435e6168fSJeff Roberson 11535e6168fSJeff Roberson struct ke_sched *kse0_sched = &ke_sched; 11635e6168fSJeff Roberson struct kg_sched *ksegrp0_sched = &kg_sched; 11735e6168fSJeff Roberson struct p_sched *proc0_sched = NULL; 11835e6168fSJeff Roberson struct td_sched *thread0_sched = &td_sched; 11935e6168fSJeff Roberson 12035e6168fSJeff Roberson /* 12135e6168fSJeff Roberson * This priority range has 20 priorities on either end that are reachable 12235e6168fSJeff Roberson * only through nice values. 123e1f89c22SJeff Roberson * 124e1f89c22SJeff Roberson * PRI_RANGE: Total priority range for timeshare threads. 125e1f89c22SJeff Roberson * PRI_NRESV: Reserved priorities for nice. 126e1f89c22SJeff Roberson * PRI_BASE: The start of the dynamic range. 127e1f89c22SJeff Roberson * DYN_RANGE: Number of priorities that are available int the dynamic 128e1f89c22SJeff Roberson * priority range. 12935e6168fSJeff Roberson */ 130407b0157SJeff Roberson #define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 131245f3abfSJeff Roberson #define SCHED_PRI_NRESV PRIO_TOTAL 13298c9b132SJeff Roberson #define SCHED_PRI_NHALF (PRIO_TOTAL / 2) 13315dc847eSJeff Roberson #define SCHED_PRI_NTHRESH (SCHED_PRI_NHALF - 1) 134e1f89c22SJeff Roberson #define SCHED_PRI_BASE ((SCHED_PRI_NRESV / 2) + PRI_MIN_TIMESHARE) 135e1f89c22SJeff Roberson #define SCHED_DYN_RANGE (SCHED_PRI_RANGE - SCHED_PRI_NRESV) 13615dc847eSJeff Roberson #define SCHED_PRI_INTERACT(score) \ 13715dc847eSJeff Roberson ((score) * SCHED_DYN_RANGE / SCHED_INTERACT_RANGE) 13835e6168fSJeff Roberson 13935e6168fSJeff Roberson /* 140e1f89c22SJeff Roberson * These determine the interactivity of a process. 14135e6168fSJeff Roberson * 142407b0157SJeff Roberson * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 143407b0157SJeff Roberson * before throttling back. 144e1f89c22SJeff Roberson * SLP_RUN_THROTTLE: Divisor for reducing slp/run time. 145e1f89c22SJeff Roberson * INTERACT_RANGE: Range of interactivity values. Smaller is better. 146e1f89c22SJeff Roberson * INTERACT_HALF: Convenience define, half of the interactivity range. 147e1f89c22SJeff Roberson * INTERACT_THRESH: Threshhold for placement on the current runq. 14835e6168fSJeff Roberson */ 14915dc847eSJeff Roberson #define SCHED_SLP_RUN_MAX ((hz / 10) << 10) 150407b0157SJeff Roberson #define SCHED_SLP_RUN_THROTTLE (10) 151e1f89c22SJeff Roberson #define SCHED_INTERACT_RANGE (100) 152e1f89c22SJeff Roberson #define SCHED_INTERACT_HALF (SCHED_INTERACT_RANGE / 2) 153e1f89c22SJeff Roberson #define SCHED_INTERACT_THRESH (10) 154e1f89c22SJeff Roberson 15535e6168fSJeff Roberson /* 15635e6168fSJeff Roberson * These parameters and macros determine the size of the time slice that is 15735e6168fSJeff Roberson * granted to each thread. 15835e6168fSJeff Roberson * 15935e6168fSJeff Roberson * SLICE_MIN: Minimum time slice granted, in units of ticks. 16035e6168fSJeff Roberson * SLICE_MAX: Maximum time slice granted. 16135e6168fSJeff Roberson * SLICE_RANGE: Range of available time slices scaled by hz. 162245f3abfSJeff Roberson * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 163245f3abfSJeff Roberson * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 16435e6168fSJeff Roberson */ 16515dc847eSJeff Roberson #define SCHED_SLICE_MIN (slice_min) 16615dc847eSJeff Roberson #define SCHED_SLICE_MAX (slice_max) 16735e6168fSJeff Roberson #define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 16835e6168fSJeff Roberson #define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 169245f3abfSJeff Roberson #define SCHED_SLICE_NICE(nice) \ 17015dc847eSJeff Roberson (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_PRI_NTHRESH)) 17135e6168fSJeff Roberson 17235e6168fSJeff Roberson /* 17335e6168fSJeff Roberson * This macro determines whether or not the kse belongs on the current or 17435e6168fSJeff Roberson * next run queue. 175407b0157SJeff Roberson * 176407b0157SJeff Roberson * XXX nice value should effect how interactive a kg is. 17735e6168fSJeff Roberson */ 17815dc847eSJeff Roberson #define SCHED_INTERACTIVE(kg) \ 17915dc847eSJeff Roberson (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 18015dc847eSJeff Roberson #define SCHED_CURR(kg, ke) SCHED_INTERACTIVE(kg) 18115dc847eSJeff Roberson #if 0 18215dc847eSJeff Roberson (ke->ke_thread->td_priority < PRI_MIN_TIMESHARE || SCHED_INTERACTIVE(kg)) 18315dc847eSJeff Roberson #endif 18435e6168fSJeff Roberson 18535e6168fSJeff Roberson /* 18635e6168fSJeff Roberson * Cpu percentage computation macros and defines. 18735e6168fSJeff Roberson * 18835e6168fSJeff Roberson * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 18935e6168fSJeff Roberson * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 19035e6168fSJeff Roberson */ 19135e6168fSJeff Roberson 1925053d272SJeff Roberson #define SCHED_CPU_TIME 10 19335e6168fSJeff Roberson #define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 19435e6168fSJeff Roberson 19535e6168fSJeff Roberson /* 19615dc847eSJeff Roberson * kseq - per processor runqs and statistics. 19735e6168fSJeff Roberson */ 19835e6168fSJeff Roberson 19915dc847eSJeff Roberson #define KSEQ_NCLASS (PRI_IDLE + 1) /* Number of run classes. */ 20015dc847eSJeff Roberson 20135e6168fSJeff Roberson struct kseq { 202a8949de2SJeff Roberson struct runq ksq_idle; /* Queue of IDLE threads. */ 20315dc847eSJeff Roberson struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 20415dc847eSJeff Roberson struct runq *ksq_next; /* Next timeshare queue. */ 20515dc847eSJeff Roberson struct runq *ksq_curr; /* Current queue. */ 20615dc847eSJeff Roberson int ksq_loads[KSEQ_NCLASS]; /* Load for each class */ 20715dc847eSJeff Roberson int ksq_load; /* Aggregate load. */ 20815dc847eSJeff Roberson short ksq_nice[PRIO_TOTAL + 1]; /* KSEs in each nice bin. */ 20915dc847eSJeff Roberson short ksq_nicemin; /* Least nice. */ 2105d7ef00cSJeff Roberson #ifdef SMP 2115d7ef00cSJeff Roberson unsigned int ksq_rslices; /* Slices on run queue */ 2125d7ef00cSJeff Roberson #endif 21335e6168fSJeff Roberson }; 21435e6168fSJeff Roberson 21535e6168fSJeff Roberson /* 21635e6168fSJeff Roberson * One kse queue per processor. 21735e6168fSJeff Roberson */ 2180a016a05SJeff Roberson #ifdef SMP 21935e6168fSJeff Roberson struct kseq kseq_cpu[MAXCPU]; 2200a016a05SJeff Roberson #define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) 2210a016a05SJeff Roberson #define KSEQ_CPU(x) (&kseq_cpu[(x)]) 2220a016a05SJeff Roberson #else 2230a016a05SJeff Roberson struct kseq kseq_cpu; 2240a016a05SJeff Roberson #define KSEQ_SELF() (&kseq_cpu) 2250a016a05SJeff Roberson #define KSEQ_CPU(x) (&kseq_cpu) 2260a016a05SJeff Roberson #endif 22735e6168fSJeff Roberson 228245f3abfSJeff Roberson static void sched_slice(struct kse *ke); 22915dc847eSJeff Roberson static void sched_priority(struct ksegrp *kg); 230e1f89c22SJeff Roberson static int sched_interact_score(struct ksegrp *kg); 23135e6168fSJeff Roberson void sched_pctcpu_update(struct kse *ke); 23235e6168fSJeff Roberson int sched_pickcpu(void); 23335e6168fSJeff Roberson 2345d7ef00cSJeff Roberson /* Operations on per processor queues */ 2350a016a05SJeff Roberson static struct kse * kseq_choose(struct kseq *kseq); 2360a016a05SJeff Roberson static void kseq_setup(struct kseq *kseq); 237a8949de2SJeff Roberson static void kseq_add(struct kseq *kseq, struct kse *ke); 23815dc847eSJeff Roberson static void kseq_rem(struct kseq *kseq, struct kse *ke); 23915dc847eSJeff Roberson static void kseq_nice_add(struct kseq *kseq, int nice); 24015dc847eSJeff Roberson static void kseq_nice_rem(struct kseq *kseq, int nice); 24115dc847eSJeff Roberson void kseq_print(struct kseq *kseq); 2425d7ef00cSJeff Roberson #ifdef SMP 2435d7ef00cSJeff Roberson struct kseq * kseq_load_highest(void); 2445d7ef00cSJeff Roberson #endif 2455d7ef00cSJeff Roberson 24615dc847eSJeff Roberson void 24715dc847eSJeff Roberson kseq_print(struct kseq *kseq) 24815dc847eSJeff Roberson { 24915dc847eSJeff Roberson int i; 25015dc847eSJeff Roberson 25115dc847eSJeff Roberson if (kseq == NULL) 25215dc847eSJeff Roberson kseq = KSEQ_SELF(); 25315dc847eSJeff Roberson 25415dc847eSJeff Roberson printf("kseq:\n"); 25515dc847eSJeff Roberson printf("\tload: %d\n", kseq->ksq_load); 25615dc847eSJeff Roberson printf("\tload ITHD: %d\n", kseq->ksq_loads[PRI_ITHD]); 25715dc847eSJeff Roberson printf("\tload REALTIME: %d\n", kseq->ksq_loads[PRI_REALTIME]); 25815dc847eSJeff Roberson printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]); 25915dc847eSJeff Roberson printf("\tload IDLE: %d\n", kseq->ksq_loads[PRI_IDLE]); 26015dc847eSJeff Roberson printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 26115dc847eSJeff Roberson printf("\tnice counts:\n"); 26215dc847eSJeff Roberson for (i = 0; i < PRIO_TOTAL + 1; i++) 26315dc847eSJeff Roberson if (kseq->ksq_nice[i]) 26415dc847eSJeff Roberson printf("\t\t%d = %d\n", 26515dc847eSJeff Roberson i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 26615dc847eSJeff Roberson } 26715dc847eSJeff Roberson 268a8949de2SJeff Roberson static void 2695d7ef00cSJeff Roberson kseq_add(struct kseq *kseq, struct kse *ke) 2705d7ef00cSJeff Roberson { 27115dc847eSJeff Roberson kseq->ksq_loads[ke->ke_ksegrp->kg_pri_class]++; 27215dc847eSJeff Roberson kseq->ksq_load++; 27315dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 27415dc847eSJeff Roberson CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 27515dc847eSJeff Roberson ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 27615dc847eSJeff Roberson ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); 27715dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 27815dc847eSJeff Roberson kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); 2795d7ef00cSJeff Roberson #ifdef SMP 2805d7ef00cSJeff Roberson kseq->ksq_rslices += ke->ke_slice; 2815d7ef00cSJeff Roberson #endif 2825d7ef00cSJeff Roberson } 28315dc847eSJeff Roberson 284a8949de2SJeff Roberson static void 2855d7ef00cSJeff Roberson kseq_rem(struct kseq *kseq, struct kse *ke) 2865d7ef00cSJeff Roberson { 28715dc847eSJeff Roberson kseq->ksq_loads[ke->ke_ksegrp->kg_pri_class]--; 28815dc847eSJeff Roberson kseq->ksq_load--; 28915dc847eSJeff Roberson ke->ke_runq = NULL; 29015dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 29115dc847eSJeff Roberson kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); 2925d7ef00cSJeff Roberson #ifdef SMP 2935d7ef00cSJeff Roberson kseq->ksq_rslices -= ke->ke_slice; 2945d7ef00cSJeff Roberson #endif 2955d7ef00cSJeff Roberson } 2965d7ef00cSJeff Roberson 29715dc847eSJeff Roberson static void 29815dc847eSJeff Roberson kseq_nice_add(struct kseq *kseq, int nice) 29915dc847eSJeff Roberson { 30015dc847eSJeff Roberson /* Normalize to zero. */ 30115dc847eSJeff Roberson kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 30215dc847eSJeff Roberson if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 0) 30315dc847eSJeff Roberson kseq->ksq_nicemin = nice; 30415dc847eSJeff Roberson } 30515dc847eSJeff Roberson 30615dc847eSJeff Roberson static void 30715dc847eSJeff Roberson kseq_nice_rem(struct kseq *kseq, int nice) 30815dc847eSJeff Roberson { 30915dc847eSJeff Roberson int n; 31015dc847eSJeff Roberson 31115dc847eSJeff Roberson /* Normalize to zero. */ 31215dc847eSJeff Roberson n = nice + SCHED_PRI_NHALF; 31315dc847eSJeff Roberson kseq->ksq_nice[n]--; 31415dc847eSJeff Roberson KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 31515dc847eSJeff Roberson 31615dc847eSJeff Roberson /* 31715dc847eSJeff Roberson * If this wasn't the smallest nice value or there are more in 31815dc847eSJeff Roberson * this bucket we can just return. Otherwise we have to recalculate 31915dc847eSJeff Roberson * the smallest nice. 32015dc847eSJeff Roberson */ 32115dc847eSJeff Roberson if (nice != kseq->ksq_nicemin || 32215dc847eSJeff Roberson kseq->ksq_nice[n] != 0 || 32315dc847eSJeff Roberson kseq->ksq_loads[PRI_TIMESHARE] == 0) 32415dc847eSJeff Roberson return; 32515dc847eSJeff Roberson 32615dc847eSJeff Roberson for (; n < SCHED_PRI_NRESV + 1; n++) 32715dc847eSJeff Roberson if (kseq->ksq_nice[n]) { 32815dc847eSJeff Roberson kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 32915dc847eSJeff Roberson return; 33015dc847eSJeff Roberson } 33115dc847eSJeff Roberson } 33215dc847eSJeff Roberson 3335d7ef00cSJeff Roberson #ifdef SMP 3345d7ef00cSJeff Roberson struct kseq * 3355d7ef00cSJeff Roberson kseq_load_highest(void) 3365d7ef00cSJeff Roberson { 3375d7ef00cSJeff Roberson struct kseq *kseq; 3385d7ef00cSJeff Roberson int load; 3395d7ef00cSJeff Roberson int cpu; 3405d7ef00cSJeff Roberson int i; 3415d7ef00cSJeff Roberson 3425d7ef00cSJeff Roberson cpu = 0; 3435d7ef00cSJeff Roberson load = 0; 3445d7ef00cSJeff Roberson 3455d7ef00cSJeff Roberson for (i = 0; i < mp_maxid; i++) { 3465d7ef00cSJeff Roberson if (CPU_ABSENT(i)) 3475d7ef00cSJeff Roberson continue; 3485d7ef00cSJeff Roberson kseq = KSEQ_CPU(i); 34915dc847eSJeff Roberson if (kseq->ksq_load > load) { 35015dc847eSJeff Roberson load = kseq->ksq_load; 3515d7ef00cSJeff Roberson cpu = i; 3525d7ef00cSJeff Roberson } 3535d7ef00cSJeff Roberson } 3545d7ef00cSJeff Roberson if (load) 3555d7ef00cSJeff Roberson return (KSEQ_CPU(cpu)); 3565d7ef00cSJeff Roberson 3575d7ef00cSJeff Roberson return (NULL); 3585d7ef00cSJeff Roberson } 3595d7ef00cSJeff Roberson #endif 3605d7ef00cSJeff Roberson 3615d7ef00cSJeff Roberson struct kse * 3625d7ef00cSJeff Roberson kseq_choose(struct kseq *kseq) 3635d7ef00cSJeff Roberson { 3645d7ef00cSJeff Roberson struct kse *ke; 3655d7ef00cSJeff Roberson struct runq *swap; 3665d7ef00cSJeff Roberson 36715dc847eSJeff Roberson swap = NULL; 368a8949de2SJeff Roberson 36915dc847eSJeff Roberson for (;;) { 37015dc847eSJeff Roberson ke = runq_choose(kseq->ksq_curr); 37115dc847eSJeff Roberson if (ke == NULL) { 37215dc847eSJeff Roberson /* 37315dc847eSJeff Roberson * We already swaped once and didn't get anywhere. 37415dc847eSJeff Roberson */ 37515dc847eSJeff Roberson if (swap) 37615dc847eSJeff Roberson break; 3775d7ef00cSJeff Roberson swap = kseq->ksq_curr; 3785d7ef00cSJeff Roberson kseq->ksq_curr = kseq->ksq_next; 3795d7ef00cSJeff Roberson kseq->ksq_next = swap; 38015dc847eSJeff Roberson continue; 381a8949de2SJeff Roberson } 38215dc847eSJeff Roberson /* 38315dc847eSJeff Roberson * If we encounter a slice of 0 the kse is in a 38415dc847eSJeff Roberson * TIMESHARE kse group and its nice was too far out 38515dc847eSJeff Roberson * of the range that receives slices. 38615dc847eSJeff Roberson */ 38715dc847eSJeff Roberson if (ke->ke_slice == 0) { 38815dc847eSJeff Roberson runq_remove(ke->ke_runq, ke); 38915dc847eSJeff Roberson sched_slice(ke); 39015dc847eSJeff Roberson ke->ke_runq = kseq->ksq_next; 39115dc847eSJeff Roberson runq_add(ke->ke_runq, ke); 39215dc847eSJeff Roberson continue; 39315dc847eSJeff Roberson } 39415dc847eSJeff Roberson return (ke); 39515dc847eSJeff Roberson } 39615dc847eSJeff Roberson 397a8949de2SJeff Roberson return (runq_choose(&kseq->ksq_idle)); 398245f3abfSJeff Roberson } 3990a016a05SJeff Roberson 4000a016a05SJeff Roberson static void 4010a016a05SJeff Roberson kseq_setup(struct kseq *kseq) 4020a016a05SJeff Roberson { 40315dc847eSJeff Roberson runq_init(&kseq->ksq_timeshare[0]); 40415dc847eSJeff Roberson runq_init(&kseq->ksq_timeshare[1]); 405a8949de2SJeff Roberson runq_init(&kseq->ksq_idle); 40615dc847eSJeff Roberson 40715dc847eSJeff Roberson kseq->ksq_curr = &kseq->ksq_timeshare[0]; 40815dc847eSJeff Roberson kseq->ksq_next = &kseq->ksq_timeshare[1]; 40915dc847eSJeff Roberson 41015dc847eSJeff Roberson kseq->ksq_loads[PRI_ITHD] = 0; 41115dc847eSJeff Roberson kseq->ksq_loads[PRI_REALTIME] = 0; 41215dc847eSJeff Roberson kseq->ksq_loads[PRI_TIMESHARE] = 0; 41315dc847eSJeff Roberson kseq->ksq_loads[PRI_IDLE] = 0; 4145d7ef00cSJeff Roberson #ifdef SMP 4155d7ef00cSJeff Roberson kseq->ksq_rslices = 0; 4165d7ef00cSJeff Roberson #endif 4170a016a05SJeff Roberson } 4180a016a05SJeff Roberson 41935e6168fSJeff Roberson static void 42035e6168fSJeff Roberson sched_setup(void *dummy) 42135e6168fSJeff Roberson { 42235e6168fSJeff Roberson int i; 42335e6168fSJeff Roberson 42415dc847eSJeff Roberson slice_min = (hz/100); 42515dc847eSJeff Roberson slice_max = (hz/10); 426e1f89c22SJeff Roberson 42735e6168fSJeff Roberson mtx_lock_spin(&sched_lock); 42835e6168fSJeff Roberson /* init kseqs */ 4290a016a05SJeff Roberson for (i = 0; i < MAXCPU; i++) 4300a016a05SJeff Roberson kseq_setup(KSEQ_CPU(i)); 43115dc847eSJeff Roberson 43215dc847eSJeff Roberson kseq_add(KSEQ_SELF(), &kse0); 43335e6168fSJeff Roberson mtx_unlock_spin(&sched_lock); 43435e6168fSJeff Roberson } 43535e6168fSJeff Roberson 43635e6168fSJeff Roberson /* 43735e6168fSJeff Roberson * Scale the scheduling priority according to the "interactivity" of this 43835e6168fSJeff Roberson * process. 43935e6168fSJeff Roberson */ 44015dc847eSJeff Roberson static void 44135e6168fSJeff Roberson sched_priority(struct ksegrp *kg) 44235e6168fSJeff Roberson { 44335e6168fSJeff Roberson int pri; 44435e6168fSJeff Roberson 44535e6168fSJeff Roberson if (kg->kg_pri_class != PRI_TIMESHARE) 44615dc847eSJeff Roberson return; 44735e6168fSJeff Roberson 44815dc847eSJeff Roberson pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 449e1f89c22SJeff Roberson pri += SCHED_PRI_BASE; 45035e6168fSJeff Roberson pri += kg->kg_nice; 45135e6168fSJeff Roberson 45235e6168fSJeff Roberson if (pri > PRI_MAX_TIMESHARE) 45335e6168fSJeff Roberson pri = PRI_MAX_TIMESHARE; 45435e6168fSJeff Roberson else if (pri < PRI_MIN_TIMESHARE) 45535e6168fSJeff Roberson pri = PRI_MIN_TIMESHARE; 45635e6168fSJeff Roberson 45735e6168fSJeff Roberson kg->kg_user_pri = pri; 45835e6168fSJeff Roberson 45915dc847eSJeff Roberson return; 46035e6168fSJeff Roberson } 46135e6168fSJeff Roberson 46235e6168fSJeff Roberson /* 463245f3abfSJeff Roberson * Calculate a time slice based on the properties of the kseg and the runq 464a8949de2SJeff Roberson * that we're on. This is only for PRI_TIMESHARE ksegrps. 46535e6168fSJeff Roberson */ 466245f3abfSJeff Roberson static void 467245f3abfSJeff Roberson sched_slice(struct kse *ke) 46835e6168fSJeff Roberson { 46915dc847eSJeff Roberson struct kseq *kseq; 470245f3abfSJeff Roberson struct ksegrp *kg; 47135e6168fSJeff Roberson 472245f3abfSJeff Roberson kg = ke->ke_ksegrp; 47315dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 47435e6168fSJeff Roberson 475245f3abfSJeff Roberson /* 476245f3abfSJeff Roberson * Rationale: 477245f3abfSJeff Roberson * KSEs in interactive ksegs get the minimum slice so that we 478245f3abfSJeff Roberson * quickly notice if it abuses its advantage. 479245f3abfSJeff Roberson * 480245f3abfSJeff Roberson * KSEs in non-interactive ksegs are assigned a slice that is 481245f3abfSJeff Roberson * based on the ksegs nice value relative to the least nice kseg 482245f3abfSJeff Roberson * on the run queue for this cpu. 483245f3abfSJeff Roberson * 484245f3abfSJeff Roberson * If the KSE is less nice than all others it gets the maximum 485245f3abfSJeff Roberson * slice and other KSEs will adjust their slice relative to 486245f3abfSJeff Roberson * this when they first expire. 487245f3abfSJeff Roberson * 488245f3abfSJeff Roberson * There is 20 point window that starts relative to the least 489245f3abfSJeff Roberson * nice kse on the run queue. Slice size is determined by 490245f3abfSJeff Roberson * the kse distance from the last nice ksegrp. 491245f3abfSJeff Roberson * 492245f3abfSJeff Roberson * If you are outside of the window you will get no slice and 493245f3abfSJeff Roberson * you will be reevaluated each time you are selected on the 494245f3abfSJeff Roberson * run queue. 495245f3abfSJeff Roberson * 496245f3abfSJeff Roberson */ 497245f3abfSJeff Roberson 49815dc847eSJeff Roberson if (!SCHED_INTERACTIVE(kg)) { 499245f3abfSJeff Roberson int nice; 500245f3abfSJeff Roberson 50115dc847eSJeff Roberson nice = kg->kg_nice + (0 - kseq->ksq_nicemin); 50215dc847eSJeff Roberson if (kseq->ksq_loads[PRI_TIMESHARE] == 0 || 50315dc847eSJeff Roberson kg->kg_nice < kseq->ksq_nicemin) 504245f3abfSJeff Roberson ke->ke_slice = SCHED_SLICE_MAX; 50515dc847eSJeff Roberson else if (nice <= SCHED_PRI_NTHRESH) 506245f3abfSJeff Roberson ke->ke_slice = SCHED_SLICE_NICE(nice); 507245f3abfSJeff Roberson else 508245f3abfSJeff Roberson ke->ke_slice = 0; 509245f3abfSJeff Roberson } else 510245f3abfSJeff Roberson ke->ke_slice = SCHED_SLICE_MIN; 51135e6168fSJeff Roberson 51215dc847eSJeff Roberson CTR6(KTR_ULE, 51315dc847eSJeff Roberson "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 51415dc847eSJeff Roberson ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, 51515dc847eSJeff Roberson kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg)); 51615dc847eSJeff Roberson 517407b0157SJeff Roberson /* 518a8949de2SJeff Roberson * Check to see if we need to scale back the slp and run time 519a8949de2SJeff Roberson * in the kg. This will cause us to forget old interactivity 520a8949de2SJeff Roberson * while maintaining the current ratio. 521407b0157SJeff Roberson */ 52215dc847eSJeff Roberson CTR4(KTR_ULE, "Slp vs Run %p (Slp %d, Run %d, Score %d)", 52315dc847eSJeff Roberson ke, kg->kg_slptime >> 10, kg->kg_runtime >> 10, 52415dc847eSJeff Roberson sched_interact_score(kg)); 52515dc847eSJeff Roberson 526407b0157SJeff Roberson if ((kg->kg_runtime + kg->kg_slptime) > SCHED_SLP_RUN_MAX) { 527407b0157SJeff Roberson kg->kg_runtime /= SCHED_SLP_RUN_THROTTLE; 528407b0157SJeff Roberson kg->kg_slptime /= SCHED_SLP_RUN_THROTTLE; 529407b0157SJeff Roberson } 53015dc847eSJeff Roberson CTR4(KTR_ULE, "Slp vs Run(2) %p (Slp %d, Run %d, Score %d)", 53115dc847eSJeff Roberson ke, kg->kg_slptime >> 10, kg->kg_runtime >> 10, 53215dc847eSJeff Roberson sched_interact_score(kg)); 533407b0157SJeff Roberson 534245f3abfSJeff Roberson return; 53535e6168fSJeff Roberson } 53635e6168fSJeff Roberson 537e1f89c22SJeff Roberson static int 538e1f89c22SJeff Roberson sched_interact_score(struct ksegrp *kg) 539e1f89c22SJeff Roberson { 540e1f89c22SJeff Roberson int big; 541e1f89c22SJeff Roberson int small; 542e1f89c22SJeff Roberson int base; 543e1f89c22SJeff Roberson 544e1f89c22SJeff Roberson if (kg->kg_runtime > kg->kg_slptime) { 545e1f89c22SJeff Roberson big = kg->kg_runtime; 546e1f89c22SJeff Roberson small = kg->kg_slptime; 547e1f89c22SJeff Roberson base = SCHED_INTERACT_HALF; 548e1f89c22SJeff Roberson } else { 549e1f89c22SJeff Roberson big = kg->kg_slptime; 550e1f89c22SJeff Roberson small = kg->kg_runtime; 551e1f89c22SJeff Roberson base = 0; 552e1f89c22SJeff Roberson } 553e1f89c22SJeff Roberson 554e1f89c22SJeff Roberson big /= SCHED_INTERACT_HALF; 555e1f89c22SJeff Roberson if (big != 0) 556e1f89c22SJeff Roberson small /= big; 557e1f89c22SJeff Roberson else 558e1f89c22SJeff Roberson small = 0; 559e1f89c22SJeff Roberson 560e1f89c22SJeff Roberson small += base; 561e1f89c22SJeff Roberson /* XXX Factor in nice */ 562e1f89c22SJeff Roberson return (small); 563e1f89c22SJeff Roberson } 564e1f89c22SJeff Roberson 56515dc847eSJeff Roberson /* 56615dc847eSJeff Roberson * This is only somewhat accurate since given many processes of the same 56715dc847eSJeff Roberson * priority they will switch when their slices run out, which will be 56815dc847eSJeff Roberson * at most SCHED_SLICE_MAX. 56915dc847eSJeff Roberson */ 57035e6168fSJeff Roberson int 57135e6168fSJeff Roberson sched_rr_interval(void) 57235e6168fSJeff Roberson { 57335e6168fSJeff Roberson return (SCHED_SLICE_MAX); 57435e6168fSJeff Roberson } 57535e6168fSJeff Roberson 57635e6168fSJeff Roberson void 57735e6168fSJeff Roberson sched_pctcpu_update(struct kse *ke) 57835e6168fSJeff Roberson { 57935e6168fSJeff Roberson /* 58035e6168fSJeff Roberson * Adjust counters and watermark for pctcpu calc. 58115dc847eSJeff Roberson * 58265c8760dSJeff Roberson * Shift the tick count out so that the divide doesn't round away 58365c8760dSJeff Roberson * our results. 58465c8760dSJeff Roberson */ 58565c8760dSJeff Roberson ke->ke_ticks <<= 10; 58635e6168fSJeff Roberson ke->ke_ticks = (ke->ke_ticks / (ke->ke_ltick - ke->ke_ftick)) * 58735e6168fSJeff Roberson SCHED_CPU_TICKS; 58865c8760dSJeff Roberson ke->ke_ticks >>= 10; 58935e6168fSJeff Roberson ke->ke_ltick = ticks; 59035e6168fSJeff Roberson ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 59135e6168fSJeff Roberson } 59235e6168fSJeff Roberson 59335e6168fSJeff Roberson #ifdef SMP 5945d7ef00cSJeff Roberson /* XXX Should be changed to kseq_load_lowest() */ 59535e6168fSJeff Roberson int 59635e6168fSJeff Roberson sched_pickcpu(void) 59735e6168fSJeff Roberson { 5980a016a05SJeff Roberson struct kseq *kseq; 59935e6168fSJeff Roberson int load; 6000a016a05SJeff Roberson int cpu; 60135e6168fSJeff Roberson int i; 60235e6168fSJeff Roberson 60335e6168fSJeff Roberson if (!smp_started) 60435e6168fSJeff Roberson return (0); 60535e6168fSJeff Roberson 6060a016a05SJeff Roberson load = 0; 6070a016a05SJeff Roberson cpu = 0; 60835e6168fSJeff Roberson 60935e6168fSJeff Roberson for (i = 0; i < mp_maxid; i++) { 61035e6168fSJeff Roberson if (CPU_ABSENT(i)) 61135e6168fSJeff Roberson continue; 6120a016a05SJeff Roberson kseq = KSEQ_CPU(i); 61315dc847eSJeff Roberson if (kseq->ksq_load < load) { 61435e6168fSJeff Roberson cpu = i; 61515dc847eSJeff Roberson load = kseq->ksq_load; 61635e6168fSJeff Roberson } 61735e6168fSJeff Roberson } 61835e6168fSJeff Roberson 61935e6168fSJeff Roberson CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu); 62035e6168fSJeff Roberson return (cpu); 62135e6168fSJeff Roberson } 62235e6168fSJeff Roberson #else 62335e6168fSJeff Roberson int 62435e6168fSJeff Roberson sched_pickcpu(void) 62535e6168fSJeff Roberson { 62635e6168fSJeff Roberson return (0); 62735e6168fSJeff Roberson } 62835e6168fSJeff Roberson #endif 62935e6168fSJeff Roberson 63035e6168fSJeff Roberson void 63135e6168fSJeff Roberson sched_prio(struct thread *td, u_char prio) 63235e6168fSJeff Roberson { 63335e6168fSJeff Roberson struct kse *ke; 63435e6168fSJeff Roberson struct runq *rq; 63535e6168fSJeff Roberson 63635e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 63735e6168fSJeff Roberson ke = td->td_kse; 63835e6168fSJeff Roberson td->td_priority = prio; 63935e6168fSJeff Roberson 64035e6168fSJeff Roberson if (TD_ON_RUNQ(td)) { 64135e6168fSJeff Roberson rq = ke->ke_runq; 64235e6168fSJeff Roberson 64335e6168fSJeff Roberson runq_remove(rq, ke); 64435e6168fSJeff Roberson runq_add(rq, ke); 64535e6168fSJeff Roberson } 64635e6168fSJeff Roberson } 64735e6168fSJeff Roberson 64835e6168fSJeff Roberson void 64935e6168fSJeff Roberson sched_switchout(struct thread *td) 65035e6168fSJeff Roberson { 65135e6168fSJeff Roberson struct kse *ke; 65235e6168fSJeff Roberson 65335e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 65435e6168fSJeff Roberson 65535e6168fSJeff Roberson ke = td->td_kse; 65635e6168fSJeff Roberson 65735e6168fSJeff Roberson td->td_last_kse = ke; 658060563ecSJulian Elischer td->td_lastcpu = td->td_oncpu; 659060563ecSJulian Elischer td->td_oncpu = NOCPU; 6604a338afdSJulian Elischer td->td_flags &= ~TDF_NEEDRESCHED; 66135e6168fSJeff Roberson 66235e6168fSJeff Roberson if (TD_IS_RUNNING(td)) { 66315dc847eSJeff Roberson runq_add(ke->ke_runq, ke); 66415dc847eSJeff Roberson /* setrunqueue(td); */ 66535e6168fSJeff Roberson return; 666e1f89c22SJeff Roberson } 66715dc847eSJeff Roberson if (ke->ke_runq) 66815dc847eSJeff Roberson kseq_rem(KSEQ_CPU(ke->ke_cpu), ke); 66935e6168fSJeff Roberson /* 67035e6168fSJeff Roberson * We will not be on the run queue. So we must be 67135e6168fSJeff Roberson * sleeping or similar. 67235e6168fSJeff Roberson */ 673ac2e4153SJulian Elischer if (td->td_proc->p_flag & P_THREADED) 67435e6168fSJeff Roberson kse_reassign(ke); 67535e6168fSJeff Roberson } 67635e6168fSJeff Roberson 67735e6168fSJeff Roberson void 67835e6168fSJeff Roberson sched_switchin(struct thread *td) 67935e6168fSJeff Roberson { 68035e6168fSJeff Roberson /* struct kse *ke = td->td_kse; */ 68135e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 68235e6168fSJeff Roberson 683060563ecSJulian Elischer td->td_oncpu = PCPU_GET(cpuid); 68415dc847eSJeff Roberson 68535e6168fSJeff Roberson if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE && 68635e6168fSJeff Roberson td->td_priority != td->td_ksegrp->kg_user_pri) 6874a338afdSJulian Elischer curthread->td_flags |= TDF_NEEDRESCHED; 68835e6168fSJeff Roberson } 68935e6168fSJeff Roberson 69035e6168fSJeff Roberson void 69135e6168fSJeff Roberson sched_nice(struct ksegrp *kg, int nice) 69235e6168fSJeff Roberson { 69315dc847eSJeff Roberson struct kse *ke; 69435e6168fSJeff Roberson struct thread *td; 69515dc847eSJeff Roberson struct kseq *kseq; 69635e6168fSJeff Roberson 69715dc847eSJeff Roberson /* 69815dc847eSJeff Roberson * We need to adjust the nice counts for running KSEs. 69915dc847eSJeff Roberson */ 70015dc847eSJeff Roberson if (kg->kg_pri_class == PRI_TIMESHARE) 70115dc847eSJeff Roberson FOREACH_KSE_IN_GROUP(kg, ke) { 70215dc847eSJeff Roberson if (ke->ke_state != KES_ONRUNQ && 70315dc847eSJeff Roberson ke->ke_state != KES_THREAD) 70415dc847eSJeff Roberson continue; 70515dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 70615dc847eSJeff Roberson kseq_nice_rem(kseq, kg->kg_nice); 70715dc847eSJeff Roberson kseq_nice_add(kseq, nice); 70815dc847eSJeff Roberson } 70935e6168fSJeff Roberson kg->kg_nice = nice; 71035e6168fSJeff Roberson sched_priority(kg); 71115dc847eSJeff Roberson FOREACH_THREAD_IN_GROUP(kg, td) 7124a338afdSJulian Elischer td->td_flags |= TDF_NEEDRESCHED; 71335e6168fSJeff Roberson } 71435e6168fSJeff Roberson 71535e6168fSJeff Roberson void 71635e6168fSJeff Roberson sched_sleep(struct thread *td, u_char prio) 71735e6168fSJeff Roberson { 71835e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 71935e6168fSJeff Roberson 72035e6168fSJeff Roberson td->td_slptime = ticks; 72135e6168fSJeff Roberson td->td_priority = prio; 72235e6168fSJeff Roberson 72315dc847eSJeff Roberson CTR2(KTR_ULE, "sleep kse %p (tick: %d)", 72415dc847eSJeff Roberson td->td_kse, td->td_slptime); 72535e6168fSJeff Roberson } 72635e6168fSJeff Roberson 72735e6168fSJeff Roberson void 72835e6168fSJeff Roberson sched_wakeup(struct thread *td) 72935e6168fSJeff Roberson { 73035e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 73135e6168fSJeff Roberson 73235e6168fSJeff Roberson /* 73335e6168fSJeff Roberson * Let the kseg know how long we slept for. This is because process 73435e6168fSJeff Roberson * interactivity behavior is modeled in the kseg. 73535e6168fSJeff Roberson */ 73635e6168fSJeff Roberson if (td->td_slptime) { 737f1e8dc4aSJeff Roberson struct ksegrp *kg; 73815dc847eSJeff Roberson int hzticks; 739f1e8dc4aSJeff Roberson 740f1e8dc4aSJeff Roberson kg = td->td_ksegrp; 74115dc847eSJeff Roberson hzticks = ticks - td->td_slptime; 74215dc847eSJeff Roberson kg->kg_slptime += hzticks << 10; 743f1e8dc4aSJeff Roberson sched_priority(kg); 74415dc847eSJeff Roberson CTR2(KTR_ULE, "wakeup kse %p (%d ticks)", 74515dc847eSJeff Roberson td->td_kse, hzticks); 74635e6168fSJeff Roberson td->td_slptime = 0; 747f1e8dc4aSJeff Roberson } 74835e6168fSJeff Roberson setrunqueue(td); 74935e6168fSJeff Roberson if (td->td_priority < curthread->td_priority) 7504a338afdSJulian Elischer curthread->td_flags |= TDF_NEEDRESCHED; 75135e6168fSJeff Roberson } 75235e6168fSJeff Roberson 75335e6168fSJeff Roberson /* 75435e6168fSJeff Roberson * Penalize the parent for creating a new child and initialize the child's 75535e6168fSJeff Roberson * priority. 75635e6168fSJeff Roberson */ 75735e6168fSJeff Roberson void 75815dc847eSJeff Roberson sched_fork(struct proc *p, struct proc *p1) 75935e6168fSJeff Roberson { 76035e6168fSJeff Roberson 76135e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 76235e6168fSJeff Roberson 76315dc847eSJeff Roberson sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 76415dc847eSJeff Roberson sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 76515dc847eSJeff Roberson sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 76615dc847eSJeff Roberson } 76715dc847eSJeff Roberson 76815dc847eSJeff Roberson void 76915dc847eSJeff Roberson sched_fork_kse(struct kse *ke, struct kse *child) 77015dc847eSJeff Roberson { 77115dc847eSJeff Roberson child->ke_slice = ke->ke_slice; 77215dc847eSJeff Roberson child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */ 77315dc847eSJeff Roberson child->ke_runq = NULL; 77415dc847eSJeff Roberson 77515dc847eSJeff Roberson /* 77615dc847eSJeff Roberson * Claim that we've been running for one second for statistical 77715dc847eSJeff Roberson * purposes. 77815dc847eSJeff Roberson */ 77915dc847eSJeff Roberson child->ke_ticks = 0; 78015dc847eSJeff Roberson child->ke_ltick = ticks; 78115dc847eSJeff Roberson child->ke_ftick = ticks - hz; 78215dc847eSJeff Roberson } 78315dc847eSJeff Roberson 78415dc847eSJeff Roberson void 78515dc847eSJeff Roberson sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 78615dc847eSJeff Roberson { 78735e6168fSJeff Roberson /* XXX Need something better here */ 788407b0157SJeff Roberson if (kg->kg_slptime > kg->kg_runtime) { 789e1f89c22SJeff Roberson child->kg_slptime = SCHED_DYN_RANGE; 790e1f89c22SJeff Roberson child->kg_runtime = kg->kg_slptime / SCHED_DYN_RANGE; 791407b0157SJeff Roberson } else { 792e1f89c22SJeff Roberson child->kg_runtime = SCHED_DYN_RANGE; 793e1f89c22SJeff Roberson child->kg_slptime = kg->kg_runtime / SCHED_DYN_RANGE; 794407b0157SJeff Roberson } 79515dc847eSJeff Roberson 79635e6168fSJeff Roberson child->kg_user_pri = kg->kg_user_pri; 79715dc847eSJeff Roberson child->kg_nice = kg->kg_nice; 798c9f25d8fSJeff Roberson } 799c9f25d8fSJeff Roberson 80015dc847eSJeff Roberson void 80115dc847eSJeff Roberson sched_fork_thread(struct thread *td, struct thread *child) 80215dc847eSJeff Roberson { 80315dc847eSJeff Roberson } 80415dc847eSJeff Roberson 80515dc847eSJeff Roberson void 80615dc847eSJeff Roberson sched_class(struct ksegrp *kg, int class) 80715dc847eSJeff Roberson { 80815dc847eSJeff Roberson struct kseq *kseq; 80915dc847eSJeff Roberson struct kse *ke; 81015dc847eSJeff Roberson 81115dc847eSJeff Roberson if (kg->kg_pri_class == class) 81215dc847eSJeff Roberson return; 81315dc847eSJeff Roberson 81415dc847eSJeff Roberson FOREACH_KSE_IN_GROUP(kg, ke) { 81515dc847eSJeff Roberson if (ke->ke_state != KES_ONRUNQ && 81615dc847eSJeff Roberson ke->ke_state != KES_THREAD) 81715dc847eSJeff Roberson continue; 81815dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 81915dc847eSJeff Roberson 82015dc847eSJeff Roberson kseq->ksq_loads[kg->kg_pri_class]--; 82115dc847eSJeff Roberson kseq->ksq_loads[class]++; 82215dc847eSJeff Roberson 82315dc847eSJeff Roberson if (kg->kg_pri_class == PRI_TIMESHARE) 82415dc847eSJeff Roberson kseq_nice_rem(kseq, kg->kg_nice); 82515dc847eSJeff Roberson else if (class == PRI_TIMESHARE) 82615dc847eSJeff Roberson kseq_nice_add(kseq, kg->kg_nice); 82715dc847eSJeff Roberson } 82815dc847eSJeff Roberson 82915dc847eSJeff Roberson kg->kg_pri_class = class; 83035e6168fSJeff Roberson } 83135e6168fSJeff Roberson 83235e6168fSJeff Roberson /* 83335e6168fSJeff Roberson * Return some of the child's priority and interactivity to the parent. 83435e6168fSJeff Roberson */ 83535e6168fSJeff Roberson void 83615dc847eSJeff Roberson sched_exit(struct proc *p, struct proc *child) 83735e6168fSJeff Roberson { 83815dc847eSJeff Roberson struct ksegrp *kg; 83915dc847eSJeff Roberson struct kse *ke; 84015dc847eSJeff Roberson 84135e6168fSJeff Roberson /* XXX Need something better here */ 84235e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 84315dc847eSJeff Roberson kg = FIRST_KSEGRP_IN_PROC(child); 84415dc847eSJeff Roberson ke = FIRST_KSE_IN_KSEGRP(kg); 84515dc847eSJeff Roberson kseq_rem(KSEQ_CPU(ke->ke_cpu), ke); 84635e6168fSJeff Roberson } 84735e6168fSJeff Roberson 84835e6168fSJeff Roberson void 84915dc847eSJeff Roberson sched_clock(struct kse *ke) 85035e6168fSJeff Roberson { 85135e6168fSJeff Roberson struct kseq *kseq; 8520a016a05SJeff Roberson struct ksegrp *kg; 85315dc847eSJeff Roberson struct thread *td; 85415dc847eSJeff Roberson #if 0 85515dc847eSJeff Roberson struct kse *nke; 85615dc847eSJeff Roberson #endif 85735e6168fSJeff Roberson 85815dc847eSJeff Roberson /* 85915dc847eSJeff Roberson * sched_setup() apparently happens prior to stathz being set. We 86015dc847eSJeff Roberson * need to resolve the timers earlier in the boot so we can avoid 86115dc847eSJeff Roberson * calculating this here. 86215dc847eSJeff Roberson */ 86315dc847eSJeff Roberson if (realstathz == 0) { 86415dc847eSJeff Roberson realstathz = stathz ? stathz : hz; 86515dc847eSJeff Roberson tickincr = hz / realstathz; 86615dc847eSJeff Roberson /* 86715dc847eSJeff Roberson * XXX This does not work for values of stathz that are much 86815dc847eSJeff Roberson * larger than hz. 86915dc847eSJeff Roberson */ 87015dc847eSJeff Roberson if (tickincr == 0) 87115dc847eSJeff Roberson tickincr = 1; 87215dc847eSJeff Roberson } 87335e6168fSJeff Roberson 87415dc847eSJeff Roberson td = ke->ke_thread; 87515dc847eSJeff Roberson kg = ke->ke_ksegrp; 87635e6168fSJeff Roberson 8770a016a05SJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 8780a016a05SJeff Roberson KASSERT((td != NULL), ("schedclock: null thread pointer")); 8790a016a05SJeff Roberson 8800a016a05SJeff Roberson /* Adjust ticks for pctcpu */ 88165c8760dSJeff Roberson ke->ke_ticks++; 882d465fb95SJeff Roberson ke->ke_ltick = ticks; 883a8949de2SJeff Roberson 884d465fb95SJeff Roberson /* Go up to one second beyond our max and then trim back down */ 885d465fb95SJeff Roberson if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 886d465fb95SJeff Roberson sched_pctcpu_update(ke); 887d465fb95SJeff Roberson 8880a016a05SJeff Roberson if (td->td_kse->ke_flags & KEF_IDLEKSE) 88935e6168fSJeff Roberson return; 8900a016a05SJeff Roberson 89115dc847eSJeff Roberson CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)", 89215dc847eSJeff Roberson ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 893c9f25d8fSJeff Roberson 89435e6168fSJeff Roberson /* 895a8949de2SJeff Roberson * We only do slicing code for TIMESHARE ksegrps. 896a8949de2SJeff Roberson */ 897a8949de2SJeff Roberson if (kg->kg_pri_class != PRI_TIMESHARE) 898a8949de2SJeff Roberson return; 899a8949de2SJeff Roberson /* 90015dc847eSJeff Roberson * Check for a higher priority task on the run queue. This can happen 90115dc847eSJeff Roberson * on SMP if another processor woke up a process on our runq. 90235e6168fSJeff Roberson */ 90315dc847eSJeff Roberson kseq = KSEQ_SELF(); 90415dc847eSJeff Roberson #if 0 90515dc847eSJeff Roberson if (kseq->ksq_load > 1 && (nke = kseq_choose(kseq)) != NULL) { 90615dc847eSJeff Roberson if (sched_strict && 90715dc847eSJeff Roberson nke->ke_thread->td_priority < td->td_priority) 90815dc847eSJeff Roberson td->td_flags |= TDF_NEEDRESCHED; 90915dc847eSJeff Roberson else if (nke->ke_thread->td_priority < 91015dc847eSJeff Roberson td->td_priority SCHED_PRIO_SLOP) 91115dc847eSJeff Roberson 91215dc847eSJeff Roberson if (nke->ke_thread->td_priority < td->td_priority) 91315dc847eSJeff Roberson td->td_flags |= TDF_NEEDRESCHED; 91415dc847eSJeff Roberson } 91515dc847eSJeff Roberson #endif 91615dc847eSJeff Roberson /* 91715dc847eSJeff Roberson * We used a tick charge it to the ksegrp so that we can compute our 91815dc847eSJeff Roberson * interactivity. 91915dc847eSJeff Roberson */ 92015dc847eSJeff Roberson kg->kg_runtime += tickincr << 10; 921407b0157SJeff Roberson 92235e6168fSJeff Roberson /* 92335e6168fSJeff Roberson * We used up one time slice. 92435e6168fSJeff Roberson */ 92535e6168fSJeff Roberson ke->ke_slice--; 92615dc847eSJeff Roberson #ifdef SMP 92715dc847eSJeff Roberson kseq->ksq_rslice--; 92815dc847eSJeff Roberson #endif 92915dc847eSJeff Roberson 93015dc847eSJeff Roberson if (ke->ke_slice > 0) 93115dc847eSJeff Roberson return; 93235e6168fSJeff Roberson /* 93315dc847eSJeff Roberson * We're out of time, recompute priorities and requeue. 93435e6168fSJeff Roberson */ 93515dc847eSJeff Roberson kseq_rem(kseq, ke); 936e1f89c22SJeff Roberson sched_priority(kg); 93715dc847eSJeff Roberson sched_slice(ke); 93815dc847eSJeff Roberson if (SCHED_CURR(kg, ke)) 93915dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 94015dc847eSJeff Roberson else 94115dc847eSJeff Roberson ke->ke_runq = kseq->ksq_next; 94215dc847eSJeff Roberson kseq_add(kseq, ke); 9434a338afdSJulian Elischer td->td_flags |= TDF_NEEDRESCHED; 94435e6168fSJeff Roberson } 94535e6168fSJeff Roberson 94635e6168fSJeff Roberson int 94735e6168fSJeff Roberson sched_runnable(void) 94835e6168fSJeff Roberson { 94935e6168fSJeff Roberson struct kseq *kseq; 95035e6168fSJeff Roberson 9510a016a05SJeff Roberson kseq = KSEQ_SELF(); 95235e6168fSJeff Roberson 95315dc847eSJeff Roberson if (kseq->ksq_load) 954c9f25d8fSJeff Roberson return (1); 955c9f25d8fSJeff Roberson #ifdef SMP 9560a016a05SJeff Roberson /* 9570a016a05SJeff Roberson * For SMP we may steal other processor's KSEs. Just search until we 9580a016a05SJeff Roberson * verify that at least on other cpu has a runnable task. 9590a016a05SJeff Roberson */ 960c9f25d8fSJeff Roberson if (smp_started) { 961c9f25d8fSJeff Roberson int i; 962c9f25d8fSJeff Roberson 963c9f25d8fSJeff Roberson for (i = 0; i < mp_maxid; i++) { 964c9f25d8fSJeff Roberson if (CPU_ABSENT(i)) 965c9f25d8fSJeff Roberson continue; 9660a016a05SJeff Roberson kseq = KSEQ_CPU(i); 96715dc847eSJeff Roberson if (kseq->ksq_load) 968c9f25d8fSJeff Roberson return (1); 969c9f25d8fSJeff Roberson } 970c9f25d8fSJeff Roberson } 971c9f25d8fSJeff Roberson #endif 972c9f25d8fSJeff Roberson return (0); 97335e6168fSJeff Roberson } 97435e6168fSJeff Roberson 97535e6168fSJeff Roberson void 97635e6168fSJeff Roberson sched_userret(struct thread *td) 97735e6168fSJeff Roberson { 97835e6168fSJeff Roberson struct ksegrp *kg; 97935e6168fSJeff Roberson 98035e6168fSJeff Roberson kg = td->td_ksegrp; 98135e6168fSJeff Roberson 98235e6168fSJeff Roberson if (td->td_priority != kg->kg_user_pri) { 98335e6168fSJeff Roberson mtx_lock_spin(&sched_lock); 98435e6168fSJeff Roberson td->td_priority = kg->kg_user_pri; 98535e6168fSJeff Roberson mtx_unlock_spin(&sched_lock); 98635e6168fSJeff Roberson } 98735e6168fSJeff Roberson } 98835e6168fSJeff Roberson 989c9f25d8fSJeff Roberson struct kse * 990c9f25d8fSJeff Roberson sched_choose(void) 991c9f25d8fSJeff Roberson { 9920a016a05SJeff Roberson struct kseq *kseq; 993c9f25d8fSJeff Roberson struct kse *ke; 99415dc847eSJeff Roberson #ifdef SMP 99515dc847eSJeff Roberson int steal; 99615dc847eSJeff Roberson 99715dc847eSJeff Roberson steal = 0; 99815dc847eSJeff Roberson #endif 999c9f25d8fSJeff Roberson 10000a016a05SJeff Roberson kseq = KSEQ_SELF(); 100115dc847eSJeff Roberson #ifdef SMP 1002245f3abfSJeff Roberson retry: 100315dc847eSJeff Roberson #endif 10040a016a05SJeff Roberson ke = kseq_choose(kseq); 100535e6168fSJeff Roberson if (ke) { 100615dc847eSJeff Roberson runq_remove(ke->ke_runq, ke); 100735e6168fSJeff Roberson ke->ke_state = KES_THREAD; 1008245f3abfSJeff Roberson 100915dc847eSJeff Roberson if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 101015dc847eSJeff Roberson CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)", 101115dc847eSJeff Roberson ke, ke->ke_runq, ke->ke_slice, 101215dc847eSJeff Roberson ke->ke_thread->td_priority); 1013245f3abfSJeff Roberson } 101415dc847eSJeff Roberson #ifdef SMP 101515dc847eSJeff Roberson /* 101615dc847eSJeff Roberson * If we've stolen this thread we need to kill the pointer 101715dc847eSJeff Roberson * to the run queue and reset the cpu id. 101815dc847eSJeff Roberson */ 101915dc847eSJeff Roberson if (steal) { 102015dc847eSJeff Roberson kseq_rem(kseq, ke); 102115dc847eSJeff Roberson ke->ke_cpu = PCPU_GET(cpuid); 102215dc847eSJeff Roberson kseq_add(KSEQ_SELF(), ke); 102315dc847eSJeff Roberson } 102415dc847eSJeff Roberson #endif 102515dc847eSJeff Roberson return (ke); 102635e6168fSJeff Roberson } 102735e6168fSJeff Roberson 1028c9f25d8fSJeff Roberson #ifdef SMP 1029c9f25d8fSJeff Roberson if (ke == NULL && smp_started) { 1030c9f25d8fSJeff Roberson /* 1031c9f25d8fSJeff Roberson * Find the cpu with the highest load and steal one proc. 1032c9f25d8fSJeff Roberson */ 103315dc847eSJeff Roberson steal = 1; 103415dc847eSJeff Roberson if ((kseq = kseq_load_highest()) != NULL) 103515dc847eSJeff Roberson goto retry; 1036c9f25d8fSJeff Roberson } 1037c9f25d8fSJeff Roberson #endif 103815dc847eSJeff Roberson 103915dc847eSJeff Roberson return (NULL); 104035e6168fSJeff Roberson } 104135e6168fSJeff Roberson 104235e6168fSJeff Roberson void 104335e6168fSJeff Roberson sched_add(struct kse *ke) 104435e6168fSJeff Roberson { 1045c9f25d8fSJeff Roberson struct kseq *kseq; 104615dc847eSJeff Roberson struct ksegrp *kg; 1047c9f25d8fSJeff Roberson 10485d7ef00cSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 10495d7ef00cSJeff Roberson KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 10505d7ef00cSJeff Roberson KASSERT((ke->ke_thread->td_kse != NULL), 10515d7ef00cSJeff Roberson ("sched_add: No KSE on thread")); 10525d7ef00cSJeff Roberson KASSERT(ke->ke_state != KES_ONRUNQ, 10535d7ef00cSJeff Roberson ("sched_add: kse %p (%s) already in run queue", ke, 10545d7ef00cSJeff Roberson ke->ke_proc->p_comm)); 10555d7ef00cSJeff Roberson KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 10565d7ef00cSJeff Roberson ("sched_add: process swapped out")); 10575d7ef00cSJeff Roberson 105815dc847eSJeff Roberson kg = ke->ke_ksegrp; 105915dc847eSJeff Roberson 106015dc847eSJeff Roberson if (ke->ke_runq) 106115dc847eSJeff Roberson Debugger("hrm?"); 106215dc847eSJeff Roberson 106315dc847eSJeff Roberson switch (kg->kg_pri_class) { 1064a8949de2SJeff Roberson case PRI_ITHD: 1065a8949de2SJeff Roberson case PRI_REALTIME: 1066a6ed4186SJeff Roberson kseq = KSEQ_SELF(); 106715dc847eSJeff Roberson if (ke->ke_runq == NULL) 106815dc847eSJeff Roberson kseq_add(kseq, ke); 106915dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 107015dc847eSJeff Roberson ke->ke_slice = SCHED_SLICE_MAX; 1071a8949de2SJeff Roberson break; 1072a8949de2SJeff Roberson case PRI_TIMESHARE: 1073a8949de2SJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 107415dc847eSJeff Roberson if (ke->ke_runq == NULL) { 107515dc847eSJeff Roberson if (SCHED_CURR(kg, ke)) 107615dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 107715dc847eSJeff Roberson else 107815dc847eSJeff Roberson ke->ke_runq = kseq->ksq_next; 107915dc847eSJeff Roberson kseq_add(kseq, ke); 108015dc847eSJeff Roberson } 108115dc847eSJeff Roberson break; 108215dc847eSJeff Roberson case PRI_IDLE: 108315dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 108415dc847eSJeff Roberson 108515dc847eSJeff Roberson if (ke->ke_runq == NULL) 108615dc847eSJeff Roberson kseq_add(kseq, ke); 108715dc847eSJeff Roberson /* 108815dc847eSJeff Roberson * This is for priority prop. 108915dc847eSJeff Roberson */ 109015dc847eSJeff Roberson if (ke->ke_thread->td_priority < PRI_MAX_TIMESHARE) 109115dc847eSJeff Roberson ke->ke_runq = kseq->ksq_curr; 109215dc847eSJeff Roberson else 109315dc847eSJeff Roberson ke->ke_runq = &kseq->ksq_idle; 109415dc847eSJeff Roberson ke->ke_slice = SCHED_SLICE_MIN; 109515dc847eSJeff Roberson break; 109615dc847eSJeff Roberson default: 109715dc847eSJeff Roberson panic("Unknown pri class.\n"); 1098a8949de2SJeff Roberson break; 1099a6ed4186SJeff Roberson } 1100a8949de2SJeff Roberson 110135e6168fSJeff Roberson ke->ke_ksegrp->kg_runq_kses++; 110235e6168fSJeff Roberson ke->ke_state = KES_ONRUNQ; 110335e6168fSJeff Roberson 110415dc847eSJeff Roberson runq_add(ke->ke_runq, ke); 110535e6168fSJeff Roberson } 110635e6168fSJeff Roberson 110735e6168fSJeff Roberson void 110835e6168fSJeff Roberson sched_rem(struct kse *ke) 110935e6168fSJeff Roberson { 111015dc847eSJeff Roberson struct kseq *kseq; 111115dc847eSJeff Roberson 111235e6168fSJeff Roberson mtx_assert(&sched_lock, MA_OWNED); 111335e6168fSJeff Roberson /* KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); */ 111415dc847eSJeff Roberson panic("WTF\n"); 111535e6168fSJeff Roberson 111635e6168fSJeff Roberson ke->ke_state = KES_THREAD; 111735e6168fSJeff Roberson ke->ke_ksegrp->kg_runq_kses--; 111815dc847eSJeff Roberson kseq = KSEQ_CPU(ke->ke_cpu); 111915dc847eSJeff Roberson runq_remove(ke->ke_runq, ke); 112015dc847eSJeff Roberson kseq_rem(kseq, ke); 112135e6168fSJeff Roberson } 112235e6168fSJeff Roberson 112335e6168fSJeff Roberson fixpt_t 112435e6168fSJeff Roberson sched_pctcpu(struct kse *ke) 112535e6168fSJeff Roberson { 112635e6168fSJeff Roberson fixpt_t pctcpu; 112735e6168fSJeff Roberson 112835e6168fSJeff Roberson pctcpu = 0; 112935e6168fSJeff Roberson 113035e6168fSJeff Roberson if (ke->ke_ticks) { 113135e6168fSJeff Roberson int rtick; 113235e6168fSJeff Roberson 113335e6168fSJeff Roberson /* Update to account for time potentially spent sleeping */ 113435e6168fSJeff Roberson ke->ke_ltick = ticks; 113535e6168fSJeff Roberson sched_pctcpu_update(ke); 113635e6168fSJeff Roberson 113735e6168fSJeff Roberson /* How many rtick per second ? */ 113865c8760dSJeff Roberson rtick = ke->ke_ticks / SCHED_CPU_TIME; 11397121cce5SScott Long pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 114035e6168fSJeff Roberson } 114135e6168fSJeff Roberson 114235e6168fSJeff Roberson ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 114335e6168fSJeff Roberson 114435e6168fSJeff Roberson return (pctcpu); 114535e6168fSJeff Roberson } 114635e6168fSJeff Roberson 114735e6168fSJeff Roberson int 114835e6168fSJeff Roberson sched_sizeof_kse(void) 114935e6168fSJeff Roberson { 115035e6168fSJeff Roberson return (sizeof(struct kse) + sizeof(struct ke_sched)); 115135e6168fSJeff Roberson } 115235e6168fSJeff Roberson 115335e6168fSJeff Roberson int 115435e6168fSJeff Roberson sched_sizeof_ksegrp(void) 115535e6168fSJeff Roberson { 115635e6168fSJeff Roberson return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 115735e6168fSJeff Roberson } 115835e6168fSJeff Roberson 115935e6168fSJeff Roberson int 116035e6168fSJeff Roberson sched_sizeof_proc(void) 116135e6168fSJeff Roberson { 116235e6168fSJeff Roberson return (sizeof(struct proc)); 116335e6168fSJeff Roberson } 116435e6168fSJeff Roberson 116535e6168fSJeff Roberson int 116635e6168fSJeff Roberson sched_sizeof_thread(void) 116735e6168fSJeff Roberson { 116835e6168fSJeff Roberson return (sizeof(struct thread) + sizeof(struct td_sched)); 116935e6168fSJeff Roberson } 1170