xref: /freebsd/sys/kern/sched_ule.c (revision 0ec896fd28be9eba2112c1389059dbaec92de282)
1 /*-
2  * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/ktr.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/proc.h>
37 #include <sys/resource.h>
38 #include <sys/sched.h>
39 #include <sys/smp.h>
40 #include <sys/sx.h>
41 #include <sys/sysctl.h>
42 #include <sys/sysproto.h>
43 #include <sys/vmmeter.h>
44 #ifdef DDB
45 #include <ddb/ddb.h>
46 #endif
47 #ifdef KTRACE
48 #include <sys/uio.h>
49 #include <sys/ktrace.h>
50 #endif
51 
52 #include <machine/cpu.h>
53 
54 #define KTR_ULE         KTR_NFS
55 
56 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
57 /* XXX This is bogus compatability crap for ps */
58 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
59 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
60 
61 static void sched_setup(void *dummy);
62 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
63 
64 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED");
65 
66 static int sched_strict;
67 SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, "");
68 
69 static int slice_min = 1;
70 SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
71 
72 static int slice_max = 10;
73 SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
74 
75 int realstathz;
76 int tickincr = 1;
77 
78 #ifdef SMP
79 /* Callout to handle load balancing SMP systems. */
80 static struct callout kseq_lb_callout;
81 #endif
82 
83 /*
84  * These datastructures are allocated within their parent datastructure but
85  * are scheduler specific.
86  */
87 
88 struct ke_sched {
89 	int		ske_slice;
90 	struct runq	*ske_runq;
91 	/* The following variables are only used for pctcpu calculation */
92 	int		ske_ltick;	/* Last tick that we were running on */
93 	int		ske_ftick;	/* First tick that we were running on */
94 	int		ske_ticks;	/* Tick count */
95 	/* CPU that we have affinity for. */
96 	u_char		ske_cpu;
97 };
98 #define	ke_slice	ke_sched->ske_slice
99 #define	ke_runq		ke_sched->ske_runq
100 #define	ke_ltick	ke_sched->ske_ltick
101 #define	ke_ftick	ke_sched->ske_ftick
102 #define	ke_ticks	ke_sched->ske_ticks
103 #define	ke_cpu		ke_sched->ske_cpu
104 
105 struct kg_sched {
106 	int	skg_slptime;		/* Number of ticks we vol. slept */
107 	int	skg_runtime;		/* Number of ticks we were running */
108 };
109 #define	kg_slptime	kg_sched->skg_slptime
110 #define	kg_runtime	kg_sched->skg_runtime
111 
112 struct td_sched {
113 	int	std_slptime;
114 };
115 #define	td_slptime	td_sched->std_slptime
116 
117 struct td_sched td_sched;
118 struct ke_sched ke_sched;
119 struct kg_sched kg_sched;
120 
121 struct ke_sched *kse0_sched = &ke_sched;
122 struct kg_sched *ksegrp0_sched = &kg_sched;
123 struct p_sched *proc0_sched = NULL;
124 struct td_sched *thread0_sched = &td_sched;
125 
126 /*
127  * The priority is primarily determined by the interactivity score.  Thus, we
128  * give lower(better) priorities to kse groups that use less CPU.  The nice
129  * value is then directly added to this to allow nice to have some effect
130  * on latency.
131  *
132  * PRI_RANGE:	Total priority range for timeshare threads.
133  * PRI_NRESV:	Number of nice values.
134  * PRI_BASE:	The start of the dynamic range.
135  */
136 #define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
137 #define	SCHED_PRI_NRESV		PRIO_TOTAL
138 #define	SCHED_PRI_NHALF		(PRIO_TOTAL / 2)
139 #define	SCHED_PRI_NTHRESH	(SCHED_PRI_NHALF - 1)
140 #define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
141 #define	SCHED_PRI_INTERACT(score)					\
142     ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
143 
144 /*
145  * These determine the interactivity of a process.
146  *
147  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
148  *		before throttling back.
149  * SLP_RUN_THROTTLE:	Divisor for reducing slp/run time at fork time.
150  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
151  * INTERACT_THRESH:	Threshhold for placement on the current runq.
152  */
153 #define	SCHED_SLP_RUN_MAX	((hz * 2) << 10)
154 #define	SCHED_SLP_RUN_THROTTLE	(100)
155 #define	SCHED_INTERACT_MAX	(100)
156 #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
157 #define	SCHED_INTERACT_THRESH	(20)
158 
159 /*
160  * These parameters and macros determine the size of the time slice that is
161  * granted to each thread.
162  *
163  * SLICE_MIN:	Minimum time slice granted, in units of ticks.
164  * SLICE_MAX:	Maximum time slice granted.
165  * SLICE_RANGE:	Range of available time slices scaled by hz.
166  * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
167  * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
168  */
169 #define	SCHED_SLICE_MIN			(slice_min)
170 #define	SCHED_SLICE_MAX			(slice_max)
171 #define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
172 #define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
173 #define	SCHED_SLICE_NICE(nice)						\
174     (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_PRI_NTHRESH))
175 
176 /*
177  * This macro determines whether or not the kse belongs on the current or
178  * next run queue.
179  *
180  * XXX nice value should effect how interactive a kg is.
181  */
182 #define	SCHED_INTERACTIVE(kg)						\
183     (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
184 #define	SCHED_CURR(kg, ke)						\
185     (ke->ke_thread->td_priority < PRI_MIN_TIMESHARE || SCHED_INTERACTIVE(kg))
186 
187 /*
188  * Cpu percentage computation macros and defines.
189  *
190  * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
191  * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
192  */
193 
194 #define	SCHED_CPU_TIME	10
195 #define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
196 
197 /*
198  * kseq - per processor runqs and statistics.
199  */
200 
201 #define	KSEQ_NCLASS	(PRI_IDLE + 1)	/* Number of run classes. */
202 
203 struct kseq {
204 	struct runq	ksq_idle;		/* Queue of IDLE threads. */
205 	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
206 	struct runq	*ksq_next;		/* Next timeshare queue. */
207 	struct runq	*ksq_curr;		/* Current queue. */
208 	int		ksq_loads[KSEQ_NCLASS];	/* Load for each class */
209 	int		ksq_load;		/* Aggregate load. */
210 	short		ksq_nice[PRIO_TOTAL + 1]; /* KSEs in each nice bin. */
211 	short		ksq_nicemin;		/* Least nice. */
212 #ifdef SMP
213 	int		ksq_cpus;	/* Count of CPUs in this kseq. */
214 	unsigned int	ksq_rslices;	/* Slices on run queue */
215 #endif
216 };
217 
218 /*
219  * One kse queue per processor.
220  */
221 #ifdef SMP
222 struct kseq	kseq_cpu[MAXCPU];
223 struct kseq	*kseq_idmap[MAXCPU];
224 #define	KSEQ_SELF()	(kseq_idmap[PCPU_GET(cpuid)])
225 #define	KSEQ_CPU(x)	(kseq_idmap[(x)])
226 #else
227 struct kseq	kseq_cpu;
228 #define	KSEQ_SELF()	(&kseq_cpu)
229 #define	KSEQ_CPU(x)	(&kseq_cpu)
230 #endif
231 
232 static void sched_slice(struct kse *ke);
233 static void sched_priority(struct ksegrp *kg);
234 static int sched_interact_score(struct ksegrp *kg);
235 static void sched_interact_update(struct ksegrp *kg);
236 void sched_pctcpu_update(struct kse *ke);
237 int sched_pickcpu(void);
238 
239 /* Operations on per processor queues */
240 static struct kse * kseq_choose(struct kseq *kseq);
241 static void kseq_setup(struct kseq *kseq);
242 static void kseq_add(struct kseq *kseq, struct kse *ke);
243 static void kseq_rem(struct kseq *kseq, struct kse *ke);
244 static void kseq_nice_add(struct kseq *kseq, int nice);
245 static void kseq_nice_rem(struct kseq *kseq, int nice);
246 void kseq_print(int cpu);
247 #ifdef SMP
248 struct kseq * kseq_load_highest(void);
249 void kseq_balance(void *arg);
250 void kseq_move(struct kseq *from, int cpu);
251 #endif
252 
253 void
254 kseq_print(int cpu)
255 {
256 	struct kseq *kseq;
257 	int i;
258 
259 	kseq = KSEQ_CPU(cpu);
260 
261 	printf("kseq:\n");
262 	printf("\tload:           %d\n", kseq->ksq_load);
263 	printf("\tload ITHD:      %d\n", kseq->ksq_loads[PRI_ITHD]);
264 	printf("\tload REALTIME:  %d\n", kseq->ksq_loads[PRI_REALTIME]);
265 	printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]);
266 	printf("\tload IDLE:      %d\n", kseq->ksq_loads[PRI_IDLE]);
267 	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
268 	printf("\tnice counts:\n");
269 	for (i = 0; i < PRIO_TOTAL + 1; i++)
270 		if (kseq->ksq_nice[i])
271 			printf("\t\t%d = %d\n",
272 			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
273 }
274 
275 static void
276 kseq_add(struct kseq *kseq, struct kse *ke)
277 {
278 	mtx_assert(&sched_lock, MA_OWNED);
279 	kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]++;
280 	kseq->ksq_load++;
281 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
282 	CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
283 	    ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
284 	    ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin);
285 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
286 		kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice);
287 #ifdef SMP
288 	kseq->ksq_rslices += ke->ke_slice;
289 #endif
290 }
291 
292 static void
293 kseq_rem(struct kseq *kseq, struct kse *ke)
294 {
295 	mtx_assert(&sched_lock, MA_OWNED);
296 	kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]--;
297 	kseq->ksq_load--;
298 	ke->ke_runq = NULL;
299 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
300 		kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice);
301 #ifdef SMP
302 	kseq->ksq_rslices -= ke->ke_slice;
303 #endif
304 }
305 
306 static void
307 kseq_nice_add(struct kseq *kseq, int nice)
308 {
309 	mtx_assert(&sched_lock, MA_OWNED);
310 	/* Normalize to zero. */
311 	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
312 	if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 1)
313 		kseq->ksq_nicemin = nice;
314 }
315 
316 static void
317 kseq_nice_rem(struct kseq *kseq, int nice)
318 {
319 	int n;
320 
321 	mtx_assert(&sched_lock, MA_OWNED);
322 	/* Normalize to zero. */
323 	n = nice + SCHED_PRI_NHALF;
324 	kseq->ksq_nice[n]--;
325 	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
326 
327 	/*
328 	 * If this wasn't the smallest nice value or there are more in
329 	 * this bucket we can just return.  Otherwise we have to recalculate
330 	 * the smallest nice.
331 	 */
332 	if (nice != kseq->ksq_nicemin ||
333 	    kseq->ksq_nice[n] != 0 ||
334 	    kseq->ksq_loads[PRI_TIMESHARE] == 0)
335 		return;
336 
337 	for (; n < SCHED_PRI_NRESV + 1; n++)
338 		if (kseq->ksq_nice[n]) {
339 			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
340 			return;
341 		}
342 }
343 
344 #ifdef SMP
345 /*
346  * kseq_balance is a simple CPU load balancing algorithm.  It operates by
347  * finding the least loaded and most loaded cpu and equalizing their load
348  * by migrating some processes.
349  *
350  * Dealing only with two CPUs at a time has two advantages.  Firstly, most
351  * installations will only have 2 cpus.  Secondly, load balancing too much at
352  * once can have an unpleasant effect on the system.  The scheduler rarely has
353  * enough information to make perfect decisions.  So this algorithm chooses
354  * algorithm simplicity and more gradual effects on load in larger systems.
355  *
356  * It could be improved by considering the priorities and slices assigned to
357  * each task prior to balancing them.  There are many pathological cases with
358  * any approach and so the semi random algorithm below may work as well as any.
359  *
360  */
361 void
362 kseq_balance(void *arg)
363 {
364 	struct kseq *kseq;
365 	int high_load;
366 	int low_load;
367 	int high_cpu;
368 	int low_cpu;
369 	int move;
370 	int diff;
371 	int i;
372 
373 	high_cpu = 0;
374 	low_cpu = 0;
375 	high_load = 0;
376 	low_load = -1;
377 
378 	mtx_lock_spin(&sched_lock);
379 	if (smp_started == 0)
380 		goto out;
381 
382 	for (i = 0; i < mp_maxid; i++) {
383 		if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
384 			continue;
385 		kseq = KSEQ_CPU(i);
386 		if (kseq->ksq_load > high_load) {
387 			high_load = kseq->ksq_load;
388 			high_cpu = i;
389 		}
390 		if (low_load == -1 || kseq->ksq_load < low_load) {
391 			low_load = kseq->ksq_load;
392 			low_cpu = i;
393 		}
394 	}
395 
396 	kseq = KSEQ_CPU(high_cpu);
397 
398 	/*
399 	 * Nothing to do.
400 	 */
401 	if (high_load < kseq->ksq_cpus + 1)
402 		goto out;
403 
404 	high_load -= kseq->ksq_cpus;
405 
406 	if (low_load >= high_load)
407 		goto out;
408 
409 	diff = high_load - low_load;
410 	move = diff / 2;
411 	if (diff & 0x1)
412 		move++;
413 
414 	for (i = 0; i < move; i++)
415 		kseq_move(kseq, low_cpu);
416 
417 out:
418 	mtx_unlock_spin(&sched_lock);
419 	callout_reset(&kseq_lb_callout, hz, kseq_balance, NULL);
420 
421 	return;
422 }
423 
424 struct kseq *
425 kseq_load_highest(void)
426 {
427 	struct kseq *kseq;
428 	int load;
429 	int cpu;
430 	int i;
431 
432 	mtx_assert(&sched_lock, MA_OWNED);
433 	cpu = 0;
434 	load = 0;
435 
436 	for (i = 0; i < mp_maxid; i++) {
437 		if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
438 			continue;
439 		kseq = KSEQ_CPU(i);
440 		if (kseq->ksq_load > load) {
441 			load = kseq->ksq_load;
442 			cpu = i;
443 		}
444 	}
445 	kseq = KSEQ_CPU(cpu);
446 
447 	if (load > kseq->ksq_cpus)
448 		return (kseq);
449 
450 	return (NULL);
451 }
452 
453 void
454 kseq_move(struct kseq *from, int cpu)
455 {
456 	struct kse *ke;
457 
458 	ke = kseq_choose(from);
459 	runq_remove(ke->ke_runq, ke);
460 	ke->ke_state = KES_THREAD;
461 	kseq_rem(from, ke);
462 
463 	ke->ke_cpu = cpu;
464 	sched_add(ke);
465 }
466 #endif
467 
468 struct kse *
469 kseq_choose(struct kseq *kseq)
470 {
471 	struct kse *ke;
472 	struct runq *swap;
473 
474 	mtx_assert(&sched_lock, MA_OWNED);
475 	swap = NULL;
476 
477 	for (;;) {
478 		ke = runq_choose(kseq->ksq_curr);
479 		if (ke == NULL) {
480 			/*
481 			 * We already swaped once and didn't get anywhere.
482 			 */
483 			if (swap)
484 				break;
485 			swap = kseq->ksq_curr;
486 			kseq->ksq_curr = kseq->ksq_next;
487 			kseq->ksq_next = swap;
488 			continue;
489 		}
490 		/*
491 		 * If we encounter a slice of 0 the kse is in a
492 		 * TIMESHARE kse group and its nice was too far out
493 		 * of the range that receives slices.
494 		 */
495 		if (ke->ke_slice == 0) {
496 			runq_remove(ke->ke_runq, ke);
497 			sched_slice(ke);
498 			ke->ke_runq = kseq->ksq_next;
499 			runq_add(ke->ke_runq, ke);
500 			continue;
501 		}
502 		return (ke);
503 	}
504 
505 	return (runq_choose(&kseq->ksq_idle));
506 }
507 
508 static void
509 kseq_setup(struct kseq *kseq)
510 {
511 	runq_init(&kseq->ksq_timeshare[0]);
512 	runq_init(&kseq->ksq_timeshare[1]);
513 	runq_init(&kseq->ksq_idle);
514 
515 	kseq->ksq_curr = &kseq->ksq_timeshare[0];
516 	kseq->ksq_next = &kseq->ksq_timeshare[1];
517 
518 	kseq->ksq_loads[PRI_ITHD] = 0;
519 	kseq->ksq_loads[PRI_REALTIME] = 0;
520 	kseq->ksq_loads[PRI_TIMESHARE] = 0;
521 	kseq->ksq_loads[PRI_IDLE] = 0;
522 	kseq->ksq_load = 0;
523 #ifdef SMP
524 	kseq->ksq_rslices = 0;
525 #endif
526 }
527 
528 static void
529 sched_setup(void *dummy)
530 {
531 #ifdef SMP
532 	int i;
533 #endif
534 
535 	slice_min = (hz/100);	/* 10ms */
536 	slice_max = (hz/7);	/* ~140ms */
537 
538 #ifdef SMP
539 	/* init kseqs */
540 	/* Create the idmap. */
541 #ifdef ULE_HTT_EXPERIMENTAL
542 	if (smp_topology == NULL) {
543 #else
544 	if (1) {
545 #endif
546 		for (i = 0; i < MAXCPU; i++) {
547 			kseq_setup(&kseq_cpu[i]);
548 			kseq_idmap[i] = &kseq_cpu[i];
549 			kseq_cpu[i].ksq_cpus = 1;
550 		}
551 	} else {
552 		int j;
553 
554 		for (i = 0; i < smp_topology->ct_count; i++) {
555 			struct cpu_group *cg;
556 
557 			cg = &smp_topology->ct_group[i];
558 			kseq_setup(&kseq_cpu[i]);
559 
560 			for (j = 0; j < MAXCPU; j++)
561 				if ((cg->cg_mask & (1 << j)) != 0)
562 					kseq_idmap[j] = &kseq_cpu[i];
563 			kseq_cpu[i].ksq_cpus = cg->cg_count;
564 		}
565 	}
566 	callout_init(&kseq_lb_callout, 1);
567 	kseq_balance(NULL);
568 #else
569 	kseq_setup(KSEQ_SELF());
570 #endif
571 	mtx_lock_spin(&sched_lock);
572 	kseq_add(KSEQ_SELF(), &kse0);
573 	mtx_unlock_spin(&sched_lock);
574 }
575 
576 /*
577  * Scale the scheduling priority according to the "interactivity" of this
578  * process.
579  */
580 static void
581 sched_priority(struct ksegrp *kg)
582 {
583 	int pri;
584 
585 	if (kg->kg_pri_class != PRI_TIMESHARE)
586 		return;
587 
588 	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
589 	pri += SCHED_PRI_BASE;
590 	pri += kg->kg_nice;
591 
592 	if (pri > PRI_MAX_TIMESHARE)
593 		pri = PRI_MAX_TIMESHARE;
594 	else if (pri < PRI_MIN_TIMESHARE)
595 		pri = PRI_MIN_TIMESHARE;
596 
597 	kg->kg_user_pri = pri;
598 
599 	return;
600 }
601 
602 /*
603  * Calculate a time slice based on the properties of the kseg and the runq
604  * that we're on.  This is only for PRI_TIMESHARE ksegrps.
605  */
606 static void
607 sched_slice(struct kse *ke)
608 {
609 	struct kseq *kseq;
610 	struct ksegrp *kg;
611 
612 	kg = ke->ke_ksegrp;
613 	kseq = KSEQ_CPU(ke->ke_cpu);
614 
615 	/*
616 	 * Rationale:
617 	 * KSEs in interactive ksegs get the minimum slice so that we
618 	 * quickly notice if it abuses its advantage.
619 	 *
620 	 * KSEs in non-interactive ksegs are assigned a slice that is
621 	 * based on the ksegs nice value relative to the least nice kseg
622 	 * on the run queue for this cpu.
623 	 *
624 	 * If the KSE is less nice than all others it gets the maximum
625 	 * slice and other KSEs will adjust their slice relative to
626 	 * this when they first expire.
627 	 *
628 	 * There is 20 point window that starts relative to the least
629 	 * nice kse on the run queue.  Slice size is determined by
630 	 * the kse distance from the last nice ksegrp.
631 	 *
632 	 * If you are outside of the window you will get no slice and
633 	 * you will be reevaluated each time you are selected on the
634 	 * run queue.
635 	 *
636 	 */
637 
638 	if (!SCHED_INTERACTIVE(kg)) {
639 		int nice;
640 
641 		nice = kg->kg_nice + (0 - kseq->ksq_nicemin);
642 		if (kseq->ksq_loads[PRI_TIMESHARE] == 0 ||
643 		    kg->kg_nice < kseq->ksq_nicemin)
644 			ke->ke_slice = SCHED_SLICE_MAX;
645 		else if (nice <= SCHED_PRI_NTHRESH)
646 			ke->ke_slice = SCHED_SLICE_NICE(nice);
647 		else
648 			ke->ke_slice = 0;
649 	} else
650 		ke->ke_slice = SCHED_SLICE_MIN;
651 
652 	CTR6(KTR_ULE,
653 	    "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
654 	    ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin,
655 	    kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg));
656 
657 	/*
658 	 * Check to see if we need to scale back the slp and run time
659 	 * in the kg.  This will cause us to forget old interactivity
660 	 * while maintaining the current ratio.
661 	 */
662 	sched_interact_update(kg);
663 
664 	return;
665 }
666 
667 static void
668 sched_interact_update(struct ksegrp *kg)
669 {
670 	/* XXX Fixme, use a linear algorithm and not a while loop. */
671 	while ((kg->kg_runtime + kg->kg_slptime) >  SCHED_SLP_RUN_MAX) {
672 		kg->kg_runtime = (kg->kg_runtime / 5) * 4;
673 		kg->kg_slptime = (kg->kg_slptime / 5) * 4;
674 	}
675 }
676 
677 static int
678 sched_interact_score(struct ksegrp *kg)
679 {
680 	int div;
681 
682 	if (kg->kg_runtime > kg->kg_slptime) {
683 		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
684 		return (SCHED_INTERACT_HALF +
685 		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
686 	} if (kg->kg_slptime > kg->kg_runtime) {
687 		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
688 		return (kg->kg_runtime / div);
689 	}
690 
691 	/*
692 	 * This can happen if slptime and runtime are 0.
693 	 */
694 	return (0);
695 
696 }
697 
698 /*
699  * This is only somewhat accurate since given many processes of the same
700  * priority they will switch when their slices run out, which will be
701  * at most SCHED_SLICE_MAX.
702  */
703 int
704 sched_rr_interval(void)
705 {
706 	return (SCHED_SLICE_MAX);
707 }
708 
709 void
710 sched_pctcpu_update(struct kse *ke)
711 {
712 	/*
713 	 * Adjust counters and watermark for pctcpu calc.
714 	 */
715 
716 	/*
717 	 * Shift the tick count out so that the divide doesn't round away
718 	 * our results.
719 	 */
720 	ke->ke_ticks <<= 10;
721 	ke->ke_ticks = (ke->ke_ticks / (ke->ke_ltick - ke->ke_ftick)) *
722 		    SCHED_CPU_TICKS;
723 	ke->ke_ticks >>= 10;
724 	ke->ke_ltick = ticks;
725 	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
726 }
727 
728 #ifdef SMP
729 /* XXX Should be changed to kseq_load_lowest() */
730 int
731 sched_pickcpu(void)
732 {
733 	struct kseq *kseq;
734 	int load;
735 	int cpu;
736 	int i;
737 
738 	mtx_assert(&sched_lock, MA_OWNED);
739 	if (!smp_started)
740 		return (0);
741 
742 	load = 0;
743 	cpu = 0;
744 
745 	for (i = 0; i < mp_maxid; i++) {
746 		if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
747 			continue;
748 		kseq = KSEQ_CPU(i);
749 		if (kseq->ksq_load < load) {
750 			cpu = i;
751 			load = kseq->ksq_load;
752 		}
753 	}
754 
755 	CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu);
756 	return (cpu);
757 }
758 #else
759 int
760 sched_pickcpu(void)
761 {
762 	return (0);
763 }
764 #endif
765 
766 void
767 sched_prio(struct thread *td, u_char prio)
768 {
769 	struct kse *ke;
770 	struct runq *rq;
771 
772 	mtx_assert(&sched_lock, MA_OWNED);
773 	ke = td->td_kse;
774 	td->td_priority = prio;
775 
776 	if (TD_ON_RUNQ(td)) {
777 		rq = ke->ke_runq;
778 
779 		runq_remove(rq, ke);
780 		runq_add(rq, ke);
781 	}
782 }
783 
784 void
785 sched_switchout(struct thread *td)
786 {
787 	struct kse *ke;
788 
789 	mtx_assert(&sched_lock, MA_OWNED);
790 
791 	ke = td->td_kse;
792 
793 	td->td_last_kse = ke;
794         td->td_lastcpu = td->td_oncpu;
795 	td->td_oncpu = NOCPU;
796         td->td_flags &= ~TDF_NEEDRESCHED;
797 
798 	if (TD_IS_RUNNING(td)) {
799 		/*
800 		 * This queue is always correct except for idle threads which
801 		 * have a higher priority due to priority propagation.
802 		 */
803 		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE &&
804 		    ke->ke_thread->td_priority > PRI_MIN_IDLE)
805 			ke->ke_runq = KSEQ_SELF()->ksq_curr;
806 		runq_add(ke->ke_runq, ke);
807 		/* setrunqueue(td); */
808 		return;
809 	}
810 	if (ke->ke_runq)
811 		kseq_rem(KSEQ_CPU(ke->ke_cpu), ke);
812 	/*
813 	 * We will not be on the run queue. So we must be
814 	 * sleeping or similar.
815 	 */
816 	if (td->td_proc->p_flag & P_SA)
817 		kse_reassign(ke);
818 }
819 
820 void
821 sched_switchin(struct thread *td)
822 {
823 	/* struct kse *ke = td->td_kse; */
824 	mtx_assert(&sched_lock, MA_OWNED);
825 
826 	td->td_oncpu = PCPU_GET(cpuid);
827 }
828 
829 void
830 sched_nice(struct ksegrp *kg, int nice)
831 {
832 	struct kse *ke;
833 	struct thread *td;
834 	struct kseq *kseq;
835 
836 	PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
837 	mtx_assert(&sched_lock, MA_OWNED);
838 	/*
839 	 * We need to adjust the nice counts for running KSEs.
840 	 */
841 	if (kg->kg_pri_class == PRI_TIMESHARE)
842 		FOREACH_KSE_IN_GROUP(kg, ke) {
843 			if (ke->ke_runq == NULL)
844 				continue;
845 			kseq = KSEQ_CPU(ke->ke_cpu);
846 			kseq_nice_rem(kseq, kg->kg_nice);
847 			kseq_nice_add(kseq, nice);
848 		}
849 	kg->kg_nice = nice;
850 	sched_priority(kg);
851 	FOREACH_THREAD_IN_GROUP(kg, td)
852 		td->td_flags |= TDF_NEEDRESCHED;
853 }
854 
855 void
856 sched_sleep(struct thread *td, u_char prio)
857 {
858 	mtx_assert(&sched_lock, MA_OWNED);
859 
860 	td->td_slptime = ticks;
861 	td->td_priority = prio;
862 
863 	CTR2(KTR_ULE, "sleep kse %p (tick: %d)",
864 	    td->td_kse, td->td_slptime);
865 }
866 
867 void
868 sched_wakeup(struct thread *td)
869 {
870 	mtx_assert(&sched_lock, MA_OWNED);
871 
872 	/*
873 	 * Let the kseg know how long we slept for.  This is because process
874 	 * interactivity behavior is modeled in the kseg.
875 	 */
876 	if (td->td_slptime) {
877 		struct ksegrp *kg;
878 		int hzticks;
879 
880 		kg = td->td_ksegrp;
881 		hzticks = ticks - td->td_slptime;
882 		kg->kg_slptime += hzticks << 10;
883 		sched_interact_update(kg);
884 		sched_priority(kg);
885 		if (td->td_kse)
886 			sched_slice(td->td_kse);
887 		CTR2(KTR_ULE, "wakeup kse %p (%d ticks)",
888 		    td->td_kse, hzticks);
889 		td->td_slptime = 0;
890 	}
891 	setrunqueue(td);
892         if (td->td_priority < curthread->td_priority)
893                 curthread->td_flags |= TDF_NEEDRESCHED;
894 }
895 
896 /*
897  * Penalize the parent for creating a new child and initialize the child's
898  * priority.
899  */
900 void
901 sched_fork(struct proc *p, struct proc *p1)
902 {
903 
904 	mtx_assert(&sched_lock, MA_OWNED);
905 
906 	sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
907 	sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
908 	sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
909 }
910 
911 void
912 sched_fork_kse(struct kse *ke, struct kse *child)
913 {
914 
915 	child->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
916 	child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */
917 	child->ke_runq = NULL;
918 
919 	/*
920 	 * Claim that we've been running for one second for statistical
921 	 * purposes.
922 	 */
923 	child->ke_ticks = 0;
924 	child->ke_ltick = ticks;
925 	child->ke_ftick = ticks - hz;
926 }
927 
928 void
929 sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
930 {
931 
932 	PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED);
933 	/* XXX Need something better here */
934 
935 	child->kg_slptime = kg->kg_slptime / SCHED_SLP_RUN_THROTTLE;
936 	child->kg_runtime = kg->kg_runtime / SCHED_SLP_RUN_THROTTLE;
937 	kg->kg_runtime += tickincr << 10;
938 	sched_interact_update(kg);
939 
940 	child->kg_user_pri = kg->kg_user_pri;
941 	child->kg_nice = kg->kg_nice;
942 }
943 
944 void
945 sched_fork_thread(struct thread *td, struct thread *child)
946 {
947 }
948 
949 void
950 sched_class(struct ksegrp *kg, int class)
951 {
952 	struct kseq *kseq;
953 	struct kse *ke;
954 
955 	mtx_assert(&sched_lock, MA_OWNED);
956 	if (kg->kg_pri_class == class)
957 		return;
958 
959 	FOREACH_KSE_IN_GROUP(kg, ke) {
960 		if (ke->ke_state != KES_ONRUNQ &&
961 		    ke->ke_state != KES_THREAD)
962 			continue;
963 		kseq = KSEQ_CPU(ke->ke_cpu);
964 
965 		kseq->ksq_loads[PRI_BASE(kg->kg_pri_class)]--;
966 		kseq->ksq_loads[PRI_BASE(class)]++;
967 
968 		if (kg->kg_pri_class == PRI_TIMESHARE)
969 			kseq_nice_rem(kseq, kg->kg_nice);
970 		else if (class == PRI_TIMESHARE)
971 			kseq_nice_add(kseq, kg->kg_nice);
972 	}
973 
974 	kg->kg_pri_class = class;
975 }
976 
977 /*
978  * Return some of the child's priority and interactivity to the parent.
979  */
980 void
981 sched_exit(struct proc *p, struct proc *child)
982 {
983 	/* XXX Need something better here */
984 	mtx_assert(&sched_lock, MA_OWNED);
985 	sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child));
986 	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child));
987 }
988 
989 void
990 sched_exit_kse(struct kse *ke, struct kse *child)
991 {
992 	kseq_rem(KSEQ_CPU(child->ke_cpu), child);
993 }
994 
995 void
996 sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
997 {
998 	/* kg->kg_slptime += child->kg_slptime; */
999 	kg->kg_runtime += child->kg_runtime;
1000 	sched_interact_update(kg);
1001 }
1002 
1003 void
1004 sched_exit_thread(struct thread *td, struct thread *child)
1005 {
1006 }
1007 
1008 void
1009 sched_clock(struct kse *ke)
1010 {
1011 	struct kseq *kseq;
1012 	struct ksegrp *kg;
1013 	struct thread *td;
1014 #if 0
1015 	struct kse *nke;
1016 #endif
1017 
1018 	/*
1019 	 * sched_setup() apparently happens prior to stathz being set.  We
1020 	 * need to resolve the timers earlier in the boot so we can avoid
1021 	 * calculating this here.
1022 	 */
1023 	if (realstathz == 0) {
1024 		realstathz = stathz ? stathz : hz;
1025 		tickincr = hz / realstathz;
1026 		/*
1027 		 * XXX This does not work for values of stathz that are much
1028 		 * larger than hz.
1029 		 */
1030 		if (tickincr == 0)
1031 			tickincr = 1;
1032 	}
1033 
1034 	td = ke->ke_thread;
1035 	kg = ke->ke_ksegrp;
1036 
1037 	mtx_assert(&sched_lock, MA_OWNED);
1038 	KASSERT((td != NULL), ("schedclock: null thread pointer"));
1039 
1040 	/* Adjust ticks for pctcpu */
1041 	ke->ke_ticks++;
1042 	ke->ke_ltick = ticks;
1043 
1044 	/* Go up to one second beyond our max and then trim back down */
1045 	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1046 		sched_pctcpu_update(ke);
1047 
1048 	if (td->td_flags & TDF_IDLETD)
1049 		return;
1050 
1051 	CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
1052 	    ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
1053 
1054 	/*
1055 	 * We only do slicing code for TIMESHARE ksegrps.
1056 	 */
1057 	if (kg->kg_pri_class != PRI_TIMESHARE)
1058 		return;
1059 	/*
1060 	 * Check for a higher priority task on the run queue.  This can happen
1061 	 * on SMP if another processor woke up a process on our runq.
1062 	 */
1063 	kseq = KSEQ_SELF();
1064 #if 0
1065 	if (kseq->ksq_load > 1 && (nke = kseq_choose(kseq)) != NULL) {
1066 		if (sched_strict &&
1067 		    nke->ke_thread->td_priority < td->td_priority)
1068 			td->td_flags |= TDF_NEEDRESCHED;
1069 		else if (nke->ke_thread->td_priority <
1070 		    td->td_priority SCHED_PRIO_SLOP)
1071 
1072 		if (nke->ke_thread->td_priority < td->td_priority)
1073 			td->td_flags |= TDF_NEEDRESCHED;
1074 	}
1075 #endif
1076 	/*
1077 	 * We used a tick charge it to the ksegrp so that we can compute our
1078 	 * interactivity.
1079 	 */
1080 	kg->kg_runtime += tickincr << 10;
1081 	sched_interact_update(kg);
1082 
1083 	/*
1084 	 * We used up one time slice.
1085 	 */
1086 	ke->ke_slice--;
1087 #ifdef SMP
1088 	kseq->ksq_rslices--;
1089 #endif
1090 
1091 	if (ke->ke_slice > 0)
1092 		return;
1093 	/*
1094 	 * We're out of time, recompute priorities and requeue.
1095 	 */
1096 	kseq_rem(kseq, ke);
1097 	sched_priority(kg);
1098 	sched_slice(ke);
1099 	if (SCHED_CURR(kg, ke))
1100 		ke->ke_runq = kseq->ksq_curr;
1101 	else
1102 		ke->ke_runq = kseq->ksq_next;
1103 	kseq_add(kseq, ke);
1104 	td->td_flags |= TDF_NEEDRESCHED;
1105 }
1106 
1107 int
1108 sched_runnable(void)
1109 {
1110 	struct kseq *kseq;
1111 	int load;
1112 
1113 	load = 1;
1114 
1115 	mtx_lock_spin(&sched_lock);
1116 	kseq = KSEQ_SELF();
1117 
1118 	if (kseq->ksq_load)
1119 		goto out;
1120 #ifdef SMP
1121 	/*
1122 	 * For SMP we may steal other processor's KSEs.  Just search until we
1123 	 * verify that at least on other cpu has a runnable task.
1124 	 */
1125 	if (smp_started) {
1126 		int i;
1127 
1128 		for (i = 0; i < mp_maxid; i++) {
1129 			if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
1130 				continue;
1131 			kseq = KSEQ_CPU(i);
1132 			if (kseq->ksq_load > kseq->ksq_cpus)
1133 				goto out;
1134 		}
1135 	}
1136 #endif
1137 	load = 0;
1138 out:
1139 	mtx_unlock_spin(&sched_lock);
1140 	return (load);
1141 }
1142 
1143 void
1144 sched_userret(struct thread *td)
1145 {
1146 	struct ksegrp *kg;
1147 	struct kseq *kseq;
1148 	struct kse *ke;
1149 
1150 	kg = td->td_ksegrp;
1151 
1152 	if (td->td_priority != kg->kg_user_pri) {
1153 		mtx_lock_spin(&sched_lock);
1154 		td->td_priority = kg->kg_user_pri;
1155 		kseq = KSEQ_SELF();
1156 		if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE &&
1157 #ifdef SMP
1158 		    kseq->ksq_load > kseq->ksq_cpus &&
1159 #else
1160 		    kseq->ksq_load > 1 &&
1161 #endif
1162 		    (ke = kseq_choose(kseq)) != NULL &&
1163 		    ke->ke_thread->td_priority < td->td_priority)
1164 			curthread->td_flags |= TDF_NEEDRESCHED;
1165 		mtx_unlock_spin(&sched_lock);
1166 	}
1167 }
1168 
1169 struct kse *
1170 sched_choose(void)
1171 {
1172 	struct kseq *kseq;
1173 	struct kse *ke;
1174 
1175 	mtx_assert(&sched_lock, MA_OWNED);
1176 #ifdef SMP
1177 retry:
1178 #endif
1179 	kseq = KSEQ_SELF();
1180 	ke = kseq_choose(kseq);
1181 	if (ke) {
1182 		runq_remove(ke->ke_runq, ke);
1183 		ke->ke_state = KES_THREAD;
1184 
1185 		if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1186 			CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1187 			    ke, ke->ke_runq, ke->ke_slice,
1188 			    ke->ke_thread->td_priority);
1189 		}
1190 		return (ke);
1191 	}
1192 
1193 #ifdef SMP
1194 	if (smp_started) {
1195 		/*
1196 		 * Find the cpu with the highest load and steal one proc.
1197 		 */
1198 		if ((kseq = kseq_load_highest()) == NULL)
1199 			return (NULL);
1200 
1201 		/*
1202 		 * Remove this kse from this kseq and runq and then requeue
1203 		 * on the current processor.  Then we will dequeue it
1204 		 * normally above.
1205 		 */
1206 		kseq_move(kseq, PCPU_GET(cpuid));
1207 		goto retry;
1208 	}
1209 #endif
1210 
1211 	return (NULL);
1212 }
1213 
1214 void
1215 sched_add(struct kse *ke)
1216 {
1217 	struct kseq *kseq;
1218 	struct ksegrp *kg;
1219 
1220 	mtx_assert(&sched_lock, MA_OWNED);
1221 	KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE"));
1222 	KASSERT((ke->ke_thread->td_kse != NULL),
1223 	    ("sched_add: No KSE on thread"));
1224 	KASSERT(ke->ke_state != KES_ONRUNQ,
1225 	    ("sched_add: kse %p (%s) already in run queue", ke,
1226 	    ke->ke_proc->p_comm));
1227 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1228 	    ("sched_add: process swapped out"));
1229 	KASSERT(ke->ke_runq == NULL,
1230 	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1231 
1232 	kg = ke->ke_ksegrp;
1233 
1234 	switch (PRI_BASE(kg->kg_pri_class)) {
1235 	case PRI_ITHD:
1236 	case PRI_REALTIME:
1237 		kseq = KSEQ_SELF();
1238 		ke->ke_runq = kseq->ksq_curr;
1239 		ke->ke_slice = SCHED_SLICE_MAX;
1240 		ke->ke_cpu = PCPU_GET(cpuid);
1241 		break;
1242 	case PRI_TIMESHARE:
1243 		kseq = KSEQ_CPU(ke->ke_cpu);
1244 		if (SCHED_CURR(kg, ke))
1245 			ke->ke_runq = kseq->ksq_curr;
1246 		else
1247 			ke->ke_runq = kseq->ksq_next;
1248 		break;
1249 	case PRI_IDLE:
1250 		kseq = KSEQ_CPU(ke->ke_cpu);
1251 		/*
1252 		 * This is for priority prop.
1253 		 */
1254 		if (ke->ke_thread->td_priority > PRI_MIN_IDLE)
1255 			ke->ke_runq = kseq->ksq_curr;
1256 		else
1257 			ke->ke_runq = &kseq->ksq_idle;
1258 		ke->ke_slice = SCHED_SLICE_MIN;
1259 		break;
1260 	default:
1261 		panic("Unknown pri class.\n");
1262 		break;
1263 	}
1264 
1265 	ke->ke_ksegrp->kg_runq_kses++;
1266 	ke->ke_state = KES_ONRUNQ;
1267 
1268 	runq_add(ke->ke_runq, ke);
1269 	kseq_add(kseq, ke);
1270 }
1271 
1272 void
1273 sched_rem(struct kse *ke)
1274 {
1275 	struct kseq *kseq;
1276 
1277 	mtx_assert(&sched_lock, MA_OWNED);
1278 	KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
1279 
1280 	ke->ke_state = KES_THREAD;
1281 	ke->ke_ksegrp->kg_runq_kses--;
1282 	kseq = KSEQ_CPU(ke->ke_cpu);
1283 	runq_remove(ke->ke_runq, ke);
1284 	kseq_rem(kseq, ke);
1285 }
1286 
1287 fixpt_t
1288 sched_pctcpu(struct kse *ke)
1289 {
1290 	fixpt_t pctcpu;
1291 
1292 	pctcpu = 0;
1293 
1294 	mtx_lock_spin(&sched_lock);
1295 	if (ke->ke_ticks) {
1296 		int rtick;
1297 
1298 		/*
1299 		 * Don't update more frequently than twice a second.  Allowing
1300 		 * this causes the cpu usage to decay away too quickly due to
1301 		 * rounding errors.
1302 		 */
1303 		if (ke->ke_ltick < (ticks - (hz / 2)))
1304 			sched_pctcpu_update(ke);
1305 
1306 		/* How many rtick per second ? */
1307 		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1308 		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1309 	}
1310 
1311 	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1312 	mtx_unlock_spin(&sched_lock);
1313 
1314 	return (pctcpu);
1315 }
1316 
1317 int
1318 sched_sizeof_kse(void)
1319 {
1320 	return (sizeof(struct kse) + sizeof(struct ke_sched));
1321 }
1322 
1323 int
1324 sched_sizeof_ksegrp(void)
1325 {
1326 	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1327 }
1328 
1329 int
1330 sched_sizeof_proc(void)
1331 {
1332 	return (sizeof(struct proc));
1333 }
1334 
1335 int
1336 sched_sizeof_thread(void)
1337 {
1338 	return (sizeof(struct thread) + sizeof(struct td_sched));
1339 }
1340