xref: /freebsd/sys/kern/sched_ule.c (revision b41f3d22cc72d1fbccb742d13e02ba12d7d18af8)
1 /*-
2  * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/ktr.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/proc.h>
37 #include <sys/resource.h>
38 #include <sys/sched.h>
39 #include <sys/smp.h>
40 #include <sys/sx.h>
41 #include <sys/sysctl.h>
42 #include <sys/sysproto.h>
43 #include <sys/vmmeter.h>
44 #ifdef DDB
45 #include <ddb/ddb.h>
46 #endif
47 #ifdef KTRACE
48 #include <sys/uio.h>
49 #include <sys/ktrace.h>
50 #endif
51 
52 #include <machine/cpu.h>
53 
54 #define KTR_ULE         KTR_NFS
55 
56 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
57 /* XXX This is bogus compatability crap for ps */
58 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
59 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
60 
61 static void sched_setup(void *dummy);
62 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
63 
64 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED");
65 
66 static int sched_strict;
67 SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, "");
68 
69 static int slice_min = 1;
70 SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
71 
72 static int slice_max = 10;
73 SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
74 
75 int realstathz;
76 int tickincr = 1;
77 
78 #ifdef SMP
79 /* Callout to handle load balancing SMP systems. */
80 static struct callout kseq_lb_callout;
81 #endif
82 
83 /*
84  * These datastructures are allocated within their parent datastructure but
85  * are scheduler specific.
86  */
87 
88 struct ke_sched {
89 	int		ske_slice;
90 	struct runq	*ske_runq;
91 	/* The following variables are only used for pctcpu calculation */
92 	int		ske_ltick;	/* Last tick that we were running on */
93 	int		ske_ftick;	/* First tick that we were running on */
94 	int		ske_ticks;	/* Tick count */
95 	/* CPU that we have affinity for. */
96 	u_char		ske_cpu;
97 };
98 #define	ke_slice	ke_sched->ske_slice
99 #define	ke_runq		ke_sched->ske_runq
100 #define	ke_ltick	ke_sched->ske_ltick
101 #define	ke_ftick	ke_sched->ske_ftick
102 #define	ke_ticks	ke_sched->ske_ticks
103 #define	ke_cpu		ke_sched->ske_cpu
104 
105 struct kg_sched {
106 	int	skg_slptime;		/* Number of ticks we vol. slept */
107 	int	skg_runtime;		/* Number of ticks we were running */
108 };
109 #define	kg_slptime	kg_sched->skg_slptime
110 #define	kg_runtime	kg_sched->skg_runtime
111 
112 struct td_sched {
113 	int	std_slptime;
114 };
115 #define	td_slptime	td_sched->std_slptime
116 
117 struct td_sched td_sched;
118 struct ke_sched ke_sched;
119 struct kg_sched kg_sched;
120 
121 struct ke_sched *kse0_sched = &ke_sched;
122 struct kg_sched *ksegrp0_sched = &kg_sched;
123 struct p_sched *proc0_sched = NULL;
124 struct td_sched *thread0_sched = &td_sched;
125 
126 /*
127  * This priority range has 20 priorities on either end that are reachable
128  * only through nice values.
129  *
130  * PRI_RANGE:	Total priority range for timeshare threads.
131  * PRI_NRESV:	Reserved priorities for nice.
132  * PRI_BASE:	The start of the dynamic range.
133  * DYN_RANGE:	Number of priorities that are available int the dynamic
134  *		priority range.
135  */
136 #define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
137 #define	SCHED_PRI_NRESV		PRIO_TOTAL
138 #define	SCHED_PRI_NHALF		(PRIO_TOTAL / 2)
139 #define	SCHED_PRI_NTHRESH	(SCHED_PRI_NHALF - 1)
140 #define	SCHED_PRI_BASE		((SCHED_PRI_NRESV / 2) + PRI_MIN_TIMESHARE)
141 #define	SCHED_DYN_RANGE		(SCHED_PRI_RANGE - SCHED_PRI_NRESV)
142 #define	SCHED_PRI_INTERACT(score)					\
143     ((score) * SCHED_DYN_RANGE / SCHED_INTERACT_MAX)
144 
145 /*
146  * These determine the interactivity of a process.
147  *
148  * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
149  *		before throttling back.
150  * SLP_RUN_THROTTLE:	Divisor for reducing slp/run time.
151  * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
152  * INTERACT_THRESH:	Threshhold for placement on the current runq.
153  */
154 #define	SCHED_SLP_RUN_MAX	((hz / 10) << 10)
155 #define	SCHED_SLP_RUN_THROTTLE	(10)
156 #define	SCHED_INTERACT_MAX	(100)
157 #define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
158 #define	SCHED_INTERACT_THRESH	(20)
159 
160 /*
161  * These parameters and macros determine the size of the time slice that is
162  * granted to each thread.
163  *
164  * SLICE_MIN:	Minimum time slice granted, in units of ticks.
165  * SLICE_MAX:	Maximum time slice granted.
166  * SLICE_RANGE:	Range of available time slices scaled by hz.
167  * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
168  * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
169  */
170 #define	SCHED_SLICE_MIN			(slice_min)
171 #define	SCHED_SLICE_MAX			(slice_max)
172 #define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
173 #define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
174 #define	SCHED_SLICE_NICE(nice)						\
175     (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_PRI_NTHRESH))
176 
177 /*
178  * This macro determines whether or not the kse belongs on the current or
179  * next run queue.
180  *
181  * XXX nice value should effect how interactive a kg is.
182  */
183 #define	SCHED_INTERACTIVE(kg)						\
184     (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
185 #define	SCHED_CURR(kg, ke)						\
186     (ke->ke_thread->td_priority < PRI_MIN_TIMESHARE || SCHED_INTERACTIVE(kg))
187 
188 /*
189  * Cpu percentage computation macros and defines.
190  *
191  * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
192  * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
193  */
194 
195 #define	SCHED_CPU_TIME	10
196 #define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
197 
198 /*
199  * kseq - per processor runqs and statistics.
200  */
201 
202 #define	KSEQ_NCLASS	(PRI_IDLE + 1)	/* Number of run classes. */
203 
204 struct kseq {
205 	struct runq	ksq_idle;		/* Queue of IDLE threads. */
206 	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
207 	struct runq	*ksq_next;		/* Next timeshare queue. */
208 	struct runq	*ksq_curr;		/* Current queue. */
209 	int		ksq_loads[KSEQ_NCLASS];	/* Load for each class */
210 	int		ksq_load;		/* Aggregate load. */
211 	short		ksq_nice[PRIO_TOTAL + 1]; /* KSEs in each nice bin. */
212 	short		ksq_nicemin;		/* Least nice. */
213 #ifdef SMP
214 	unsigned int	ksq_rslices;	/* Slices on run queue */
215 #endif
216 };
217 
218 /*
219  * One kse queue per processor.
220  */
221 #ifdef SMP
222 struct kseq	kseq_cpu[MAXCPU];
223 #define	KSEQ_SELF()	(&kseq_cpu[PCPU_GET(cpuid)])
224 #define	KSEQ_CPU(x)	(&kseq_cpu[(x)])
225 #else
226 struct kseq	kseq_cpu;
227 #define	KSEQ_SELF()	(&kseq_cpu)
228 #define	KSEQ_CPU(x)	(&kseq_cpu)
229 #endif
230 
231 static void sched_slice(struct kse *ke);
232 static void sched_priority(struct ksegrp *kg);
233 static int sched_interact_score(struct ksegrp *kg);
234 void sched_pctcpu_update(struct kse *ke);
235 int sched_pickcpu(void);
236 
237 /* Operations on per processor queues */
238 static struct kse * kseq_choose(struct kseq *kseq);
239 static void kseq_setup(struct kseq *kseq);
240 static void kseq_add(struct kseq *kseq, struct kse *ke);
241 static void kseq_rem(struct kseq *kseq, struct kse *ke);
242 static void kseq_nice_add(struct kseq *kseq, int nice);
243 static void kseq_nice_rem(struct kseq *kseq, int nice);
244 void kseq_print(int cpu);
245 #ifdef SMP
246 struct kseq * kseq_load_highest(void);
247 void kseq_balance(void *arg);
248 void kseq_move(struct kseq *from, int cpu);
249 #endif
250 
251 void
252 kseq_print(int cpu)
253 {
254 	struct kseq *kseq;
255 	int i;
256 
257 	kseq = KSEQ_CPU(cpu);
258 
259 	printf("kseq:\n");
260 	printf("\tload:           %d\n", kseq->ksq_load);
261 	printf("\tload ITHD:      %d\n", kseq->ksq_loads[PRI_ITHD]);
262 	printf("\tload REALTIME:  %d\n", kseq->ksq_loads[PRI_REALTIME]);
263 	printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]);
264 	printf("\tload IDLE:      %d\n", kseq->ksq_loads[PRI_IDLE]);
265 	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
266 	printf("\tnice counts:\n");
267 	for (i = 0; i < PRIO_TOTAL + 1; i++)
268 		if (kseq->ksq_nice[i])
269 			printf("\t\t%d = %d\n",
270 			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
271 }
272 
273 static void
274 kseq_add(struct kseq *kseq, struct kse *ke)
275 {
276 	mtx_assert(&sched_lock, MA_OWNED);
277 	kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]++;
278 	kseq->ksq_load++;
279 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
280 	CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
281 	    ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
282 	    ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin);
283 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
284 		kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice);
285 #ifdef SMP
286 	kseq->ksq_rslices += ke->ke_slice;
287 #endif
288 }
289 
290 static void
291 kseq_rem(struct kseq *kseq, struct kse *ke)
292 {
293 	mtx_assert(&sched_lock, MA_OWNED);
294 	kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]--;
295 	kseq->ksq_load--;
296 	ke->ke_runq = NULL;
297 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
298 		kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice);
299 #ifdef SMP
300 	kseq->ksq_rslices -= ke->ke_slice;
301 #endif
302 }
303 
304 static void
305 kseq_nice_add(struct kseq *kseq, int nice)
306 {
307 	mtx_assert(&sched_lock, MA_OWNED);
308 	/* Normalize to zero. */
309 	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
310 	if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 1)
311 		kseq->ksq_nicemin = nice;
312 }
313 
314 static void
315 kseq_nice_rem(struct kseq *kseq, int nice)
316 {
317 	int n;
318 
319 	mtx_assert(&sched_lock, MA_OWNED);
320 	/* Normalize to zero. */
321 	n = nice + SCHED_PRI_NHALF;
322 	kseq->ksq_nice[n]--;
323 	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
324 
325 	/*
326 	 * If this wasn't the smallest nice value or there are more in
327 	 * this bucket we can just return.  Otherwise we have to recalculate
328 	 * the smallest nice.
329 	 */
330 	if (nice != kseq->ksq_nicemin ||
331 	    kseq->ksq_nice[n] != 0 ||
332 	    kseq->ksq_loads[PRI_TIMESHARE] == 0)
333 		return;
334 
335 	for (; n < SCHED_PRI_NRESV + 1; n++)
336 		if (kseq->ksq_nice[n]) {
337 			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
338 			return;
339 		}
340 }
341 
342 #ifdef SMP
343 /*
344  * kseq_balance is a simple CPU load balancing algorithm.  It operates by
345  * finding the least loaded and most loaded cpu and equalizing their load
346  * by migrating some processes.
347  *
348  * Dealing only with two CPUs at a time has two advantages.  Firstly, most
349  * installations will only have 2 cpus.  Secondly, load balancing too much at
350  * once can have an unpleasant effect on the system.  The scheduler rarely has
351  * enough information to make perfect decisions.  So this algorithm chooses
352  * algorithm simplicity and more gradual effects on load in larger systems.
353  *
354  * It could be improved by considering the priorities and slices assigned to
355  * each task prior to balancing them.  There are many pathological cases with
356  * any approach and so the semi random algorithm below may work as well as any.
357  *
358  */
359 void
360 kseq_balance(void *arg)
361 {
362 	struct kseq *kseq;
363 	int high_load;
364 	int low_load;
365 	int high_cpu;
366 	int low_cpu;
367 	int move;
368 	int diff;
369 	int i;
370 
371 	high_cpu = 0;
372 	low_cpu = 0;
373 	high_load = 0;
374 	low_load = -1;
375 
376 	mtx_lock_spin(&sched_lock);
377 	for (i = 0; i < mp_maxid; i++) {
378 		if (CPU_ABSENT(i))
379 			continue;
380 		kseq = KSEQ_CPU(i);
381 		if (kseq->ksq_load > high_load) {
382 			high_load = kseq->ksq_load;
383 			high_cpu = i;
384 		}
385 		if (low_load == -1 || kseq->ksq_load < low_load) {
386 			low_load = kseq->ksq_load;
387 			low_cpu = i;
388 		}
389 	}
390 
391 	/*
392 	 * Nothing to do.
393 	 */
394 	if (high_load < 2 || low_load == high_load)
395 		goto out;
396 
397 	diff = high_load - low_load;
398 	move = diff / 2;
399 	if (diff & 0x1)
400 		move++;
401 
402 	for (i = 0; i < move; i++)
403 		kseq_move(KSEQ_CPU(high_cpu), low_cpu);
404 
405 out:
406 	mtx_unlock_spin(&sched_lock);
407 	callout_reset(&kseq_lb_callout, hz, kseq_balance, NULL);
408 
409 	return;
410 }
411 
412 struct kseq *
413 kseq_load_highest(void)
414 {
415 	struct kseq *kseq;
416 	int load;
417 	int cpu;
418 	int i;
419 
420 	mtx_assert(&sched_lock, MA_OWNED);
421 	cpu = 0;
422 	load = 0;
423 
424 	for (i = 0; i < mp_maxid; i++) {
425 		if (CPU_ABSENT(i))
426 			continue;
427 		kseq = KSEQ_CPU(i);
428 		if (kseq->ksq_load > load) {
429 			load = kseq->ksq_load;
430 			cpu = i;
431 		}
432 	}
433 	if (load > 1)
434 		return (KSEQ_CPU(cpu));
435 
436 	return (NULL);
437 }
438 
439 void
440 kseq_move(struct kseq *from, int cpu)
441 {
442 	struct kse *ke;
443 
444 	ke = kseq_choose(from);
445 	runq_remove(ke->ke_runq, ke);
446 	ke->ke_state = KES_THREAD;
447 	kseq_rem(from, ke);
448 
449 	ke->ke_cpu = cpu;
450 	sched_add(ke);
451 }
452 #endif
453 
454 struct kse *
455 kseq_choose(struct kseq *kseq)
456 {
457 	struct kse *ke;
458 	struct runq *swap;
459 
460 	mtx_assert(&sched_lock, MA_OWNED);
461 	swap = NULL;
462 
463 	for (;;) {
464 		ke = runq_choose(kseq->ksq_curr);
465 		if (ke == NULL) {
466 			/*
467 			 * We already swaped once and didn't get anywhere.
468 			 */
469 			if (swap)
470 				break;
471 			swap = kseq->ksq_curr;
472 			kseq->ksq_curr = kseq->ksq_next;
473 			kseq->ksq_next = swap;
474 			continue;
475 		}
476 		/*
477 		 * If we encounter a slice of 0 the kse is in a
478 		 * TIMESHARE kse group and its nice was too far out
479 		 * of the range that receives slices.
480 		 */
481 		if (ke->ke_slice == 0) {
482 			runq_remove(ke->ke_runq, ke);
483 			sched_slice(ke);
484 			ke->ke_runq = kseq->ksq_next;
485 			runq_add(ke->ke_runq, ke);
486 			continue;
487 		}
488 		return (ke);
489 	}
490 
491 	return (runq_choose(&kseq->ksq_idle));
492 }
493 
494 static void
495 kseq_setup(struct kseq *kseq)
496 {
497 	runq_init(&kseq->ksq_timeshare[0]);
498 	runq_init(&kseq->ksq_timeshare[1]);
499 	runq_init(&kseq->ksq_idle);
500 
501 	kseq->ksq_curr = &kseq->ksq_timeshare[0];
502 	kseq->ksq_next = &kseq->ksq_timeshare[1];
503 
504 	kseq->ksq_loads[PRI_ITHD] = 0;
505 	kseq->ksq_loads[PRI_REALTIME] = 0;
506 	kseq->ksq_loads[PRI_TIMESHARE] = 0;
507 	kseq->ksq_loads[PRI_IDLE] = 0;
508 	kseq->ksq_load = 0;
509 #ifdef SMP
510 	kseq->ksq_rslices = 0;
511 #endif
512 }
513 
514 static void
515 sched_setup(void *dummy)
516 {
517 	int i;
518 
519 	slice_min = (hz/100);
520 	slice_max = (hz/10);
521 
522 	mtx_lock_spin(&sched_lock);
523 	/* init kseqs */
524 	for (i = 0; i < MAXCPU; i++)
525 		kseq_setup(KSEQ_CPU(i));
526 
527 	kseq_add(KSEQ_SELF(), &kse0);
528 	mtx_unlock_spin(&sched_lock);
529 #ifdef SMP
530 	callout_init(&kseq_lb_callout, 1);
531 	kseq_balance(NULL);
532 #endif
533 }
534 
535 /*
536  * Scale the scheduling priority according to the "interactivity" of this
537  * process.
538  */
539 static void
540 sched_priority(struct ksegrp *kg)
541 {
542 	int pri;
543 
544 	if (kg->kg_pri_class != PRI_TIMESHARE)
545 		return;
546 
547 	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
548 	pri += SCHED_PRI_BASE;
549 	pri += kg->kg_nice;
550 
551 	if (pri > PRI_MAX_TIMESHARE)
552 		pri = PRI_MAX_TIMESHARE;
553 	else if (pri < PRI_MIN_TIMESHARE)
554 		pri = PRI_MIN_TIMESHARE;
555 
556 	kg->kg_user_pri = pri;
557 
558 	return;
559 }
560 
561 /*
562  * Calculate a time slice based on the properties of the kseg and the runq
563  * that we're on.  This is only for PRI_TIMESHARE ksegrps.
564  */
565 static void
566 sched_slice(struct kse *ke)
567 {
568 	struct kseq *kseq;
569 	struct ksegrp *kg;
570 
571 	kg = ke->ke_ksegrp;
572 	kseq = KSEQ_CPU(ke->ke_cpu);
573 
574 	/*
575 	 * Rationale:
576 	 * KSEs in interactive ksegs get the minimum slice so that we
577 	 * quickly notice if it abuses its advantage.
578 	 *
579 	 * KSEs in non-interactive ksegs are assigned a slice that is
580 	 * based on the ksegs nice value relative to the least nice kseg
581 	 * on the run queue for this cpu.
582 	 *
583 	 * If the KSE is less nice than all others it gets the maximum
584 	 * slice and other KSEs will adjust their slice relative to
585 	 * this when they first expire.
586 	 *
587 	 * There is 20 point window that starts relative to the least
588 	 * nice kse on the run queue.  Slice size is determined by
589 	 * the kse distance from the last nice ksegrp.
590 	 *
591 	 * If you are outside of the window you will get no slice and
592 	 * you will be reevaluated each time you are selected on the
593 	 * run queue.
594 	 *
595 	 */
596 
597 	if (!SCHED_INTERACTIVE(kg)) {
598 		int nice;
599 
600 		nice = kg->kg_nice + (0 - kseq->ksq_nicemin);
601 		if (kseq->ksq_loads[PRI_TIMESHARE] == 0 ||
602 		    kg->kg_nice < kseq->ksq_nicemin)
603 			ke->ke_slice = SCHED_SLICE_MAX;
604 		else if (nice <= SCHED_PRI_NTHRESH)
605 			ke->ke_slice = SCHED_SLICE_NICE(nice);
606 		else
607 			ke->ke_slice = 0;
608 	} else
609 		ke->ke_slice = SCHED_SLICE_MIN;
610 
611 	CTR6(KTR_ULE,
612 	    "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
613 	    ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin,
614 	    kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg));
615 
616 	/*
617 	 * Check to see if we need to scale back the slp and run time
618 	 * in the kg.  This will cause us to forget old interactivity
619 	 * while maintaining the current ratio.
620 	 */
621 	if ((kg->kg_runtime + kg->kg_slptime) >  SCHED_SLP_RUN_MAX) {
622 		kg->kg_runtime /= SCHED_SLP_RUN_THROTTLE;
623 		kg->kg_slptime /= SCHED_SLP_RUN_THROTTLE;
624 	}
625 	CTR4(KTR_ULE, "Slp vs Run(2) %p (Slp %d, Run %d, Score %d)",
626 	    ke, kg->kg_slptime >> 10, kg->kg_runtime >> 10,
627 	    sched_interact_score(kg));
628 
629 	return;
630 }
631 
632 static int
633 sched_interact_score(struct ksegrp *kg)
634 {
635 	int div;
636 
637 	if (kg->kg_runtime > kg->kg_slptime) {
638 		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
639 		return (SCHED_INTERACT_HALF +
640 		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
641 	} if (kg->kg_slptime > kg->kg_runtime) {
642 		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
643 		return (kg->kg_runtime / div);
644 	}
645 
646 	/*
647 	 * This can happen if slptime and runtime are 0.
648 	 */
649 	return (0);
650 
651 }
652 
653 /*
654  * This is only somewhat accurate since given many processes of the same
655  * priority they will switch when their slices run out, which will be
656  * at most SCHED_SLICE_MAX.
657  */
658 int
659 sched_rr_interval(void)
660 {
661 	return (SCHED_SLICE_MAX);
662 }
663 
664 void
665 sched_pctcpu_update(struct kse *ke)
666 {
667 	/*
668 	 * Adjust counters and watermark for pctcpu calc.
669 	 */
670 
671 	/*
672 	 * Shift the tick count out so that the divide doesn't round away
673 	 * our results.
674 	 */
675 	ke->ke_ticks <<= 10;
676 	ke->ke_ticks = (ke->ke_ticks / (ke->ke_ltick - ke->ke_ftick)) *
677 		    SCHED_CPU_TICKS;
678 	ke->ke_ticks >>= 10;
679 	ke->ke_ltick = ticks;
680 	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
681 }
682 
683 #ifdef SMP
684 /* XXX Should be changed to kseq_load_lowest() */
685 int
686 sched_pickcpu(void)
687 {
688 	struct kseq *kseq;
689 	int load;
690 	int cpu;
691 	int i;
692 
693 	mtx_assert(&sched_lock, MA_OWNED);
694 	if (!smp_started)
695 		return (0);
696 
697 	load = 0;
698 	cpu = 0;
699 
700 	for (i = 0; i < mp_maxid; i++) {
701 		if (CPU_ABSENT(i))
702 			continue;
703 		kseq = KSEQ_CPU(i);
704 		if (kseq->ksq_load < load) {
705 			cpu = i;
706 			load = kseq->ksq_load;
707 		}
708 	}
709 
710 	CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu);
711 	return (cpu);
712 }
713 #else
714 int
715 sched_pickcpu(void)
716 {
717 	return (0);
718 }
719 #endif
720 
721 void
722 sched_prio(struct thread *td, u_char prio)
723 {
724 	struct kse *ke;
725 	struct runq *rq;
726 
727 	mtx_assert(&sched_lock, MA_OWNED);
728 	ke = td->td_kse;
729 	td->td_priority = prio;
730 
731 	if (TD_ON_RUNQ(td)) {
732 		rq = ke->ke_runq;
733 
734 		runq_remove(rq, ke);
735 		runq_add(rq, ke);
736 	}
737 }
738 
739 void
740 sched_switchout(struct thread *td)
741 {
742 	struct kse *ke;
743 
744 	mtx_assert(&sched_lock, MA_OWNED);
745 
746 	ke = td->td_kse;
747 
748 	td->td_last_kse = ke;
749         td->td_lastcpu = td->td_oncpu;
750 	td->td_oncpu = NOCPU;
751         td->td_flags &= ~TDF_NEEDRESCHED;
752 
753 	if (TD_IS_RUNNING(td)) {
754 		/*
755 		 * This queue is always correct except for idle threads which
756 		 * have a higher priority due to priority propagation.
757 		 */
758 		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE &&
759 		    ke->ke_thread->td_priority > PRI_MIN_IDLE)
760 			ke->ke_runq = KSEQ_SELF()->ksq_curr;
761 		runq_add(ke->ke_runq, ke);
762 		/* setrunqueue(td); */
763 		return;
764 	}
765 	if (ke->ke_runq)
766 		kseq_rem(KSEQ_CPU(ke->ke_cpu), ke);
767 	/*
768 	 * We will not be on the run queue. So we must be
769 	 * sleeping or similar.
770 	 */
771 	if (td->td_proc->p_flag & P_SA)
772 		kse_reassign(ke);
773 }
774 
775 void
776 sched_switchin(struct thread *td)
777 {
778 	/* struct kse *ke = td->td_kse; */
779 	mtx_assert(&sched_lock, MA_OWNED);
780 
781 	td->td_oncpu = PCPU_GET(cpuid);
782 }
783 
784 void
785 sched_nice(struct ksegrp *kg, int nice)
786 {
787 	struct kse *ke;
788 	struct thread *td;
789 	struct kseq *kseq;
790 
791 	PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
792 	mtx_assert(&sched_lock, MA_OWNED);
793 	/*
794 	 * We need to adjust the nice counts for running KSEs.
795 	 */
796 	if (kg->kg_pri_class == PRI_TIMESHARE)
797 		FOREACH_KSE_IN_GROUP(kg, ke) {
798 			if (ke->ke_state != KES_ONRUNQ &&
799 			    ke->ke_state != KES_THREAD)
800 				continue;
801 			kseq = KSEQ_CPU(ke->ke_cpu);
802 			kseq_nice_rem(kseq, kg->kg_nice);
803 			kseq_nice_add(kseq, nice);
804 		}
805 	kg->kg_nice = nice;
806 	sched_priority(kg);
807 	FOREACH_THREAD_IN_GROUP(kg, td)
808 		td->td_flags |= TDF_NEEDRESCHED;
809 }
810 
811 void
812 sched_sleep(struct thread *td, u_char prio)
813 {
814 	mtx_assert(&sched_lock, MA_OWNED);
815 
816 	td->td_slptime = ticks;
817 	td->td_priority = prio;
818 
819 	CTR2(KTR_ULE, "sleep kse %p (tick: %d)",
820 	    td->td_kse, td->td_slptime);
821 }
822 
823 void
824 sched_wakeup(struct thread *td)
825 {
826 	mtx_assert(&sched_lock, MA_OWNED);
827 
828 	/*
829 	 * Let the kseg know how long we slept for.  This is because process
830 	 * interactivity behavior is modeled in the kseg.
831 	 */
832 	if (td->td_slptime) {
833 		struct ksegrp *kg;
834 		int hzticks;
835 
836 		kg = td->td_ksegrp;
837 		hzticks = ticks - td->td_slptime;
838 		kg->kg_slptime += hzticks << 10;
839 		sched_priority(kg);
840 		CTR2(KTR_ULE, "wakeup kse %p (%d ticks)",
841 		    td->td_kse, hzticks);
842 		td->td_slptime = 0;
843 	}
844 	setrunqueue(td);
845         if (td->td_priority < curthread->td_priority)
846                 curthread->td_flags |= TDF_NEEDRESCHED;
847 }
848 
849 /*
850  * Penalize the parent for creating a new child and initialize the child's
851  * priority.
852  */
853 void
854 sched_fork(struct proc *p, struct proc *p1)
855 {
856 
857 	mtx_assert(&sched_lock, MA_OWNED);
858 
859 	sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
860 	sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
861 	sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
862 }
863 
864 void
865 sched_fork_kse(struct kse *ke, struct kse *child)
866 {
867 
868 	child->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
869 	child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */
870 	child->ke_runq = NULL;
871 
872 	/*
873 	 * Claim that we've been running for one second for statistical
874 	 * purposes.
875 	 */
876 	child->ke_ticks = 0;
877 	child->ke_ltick = ticks;
878 	child->ke_ftick = ticks - hz;
879 }
880 
881 void
882 sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
883 {
884 
885 	PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED);
886 	/* XXX Need something better here */
887 
888 #if 1
889 	child->kg_slptime = kg->kg_slptime;
890 	child->kg_runtime = kg->kg_runtime;
891 #else
892 	if (kg->kg_slptime > kg->kg_runtime) {
893 		child->kg_slptime = SCHED_DYN_RANGE;
894 		child->kg_runtime = kg->kg_slptime / SCHED_DYN_RANGE;
895 	} else {
896 		child->kg_runtime = SCHED_DYN_RANGE;
897 		child->kg_slptime = kg->kg_runtime / SCHED_DYN_RANGE;
898 	}
899 #endif
900 
901 	child->kg_user_pri = kg->kg_user_pri;
902 	child->kg_nice = kg->kg_nice;
903 }
904 
905 void
906 sched_fork_thread(struct thread *td, struct thread *child)
907 {
908 }
909 
910 void
911 sched_class(struct ksegrp *kg, int class)
912 {
913 	struct kseq *kseq;
914 	struct kse *ke;
915 
916 	mtx_assert(&sched_lock, MA_OWNED);
917 	if (kg->kg_pri_class == class)
918 		return;
919 
920 	FOREACH_KSE_IN_GROUP(kg, ke) {
921 		if (ke->ke_state != KES_ONRUNQ &&
922 		    ke->ke_state != KES_THREAD)
923 			continue;
924 		kseq = KSEQ_CPU(ke->ke_cpu);
925 
926 		kseq->ksq_loads[PRI_BASE(kg->kg_pri_class)]--;
927 		kseq->ksq_loads[PRI_BASE(class)]++;
928 
929 		if (kg->kg_pri_class == PRI_TIMESHARE)
930 			kseq_nice_rem(kseq, kg->kg_nice);
931 		else if (class == PRI_TIMESHARE)
932 			kseq_nice_add(kseq, kg->kg_nice);
933 	}
934 
935 	kg->kg_pri_class = class;
936 }
937 
938 /*
939  * Return some of the child's priority and interactivity to the parent.
940  */
941 void
942 sched_exit(struct proc *p, struct proc *child)
943 {
944 	/* XXX Need something better here */
945 	mtx_assert(&sched_lock, MA_OWNED);
946 	sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child));
947 	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child));
948 }
949 
950 void
951 sched_exit_kse(struct kse *ke, struct kse *child)
952 {
953 	kseq_rem(KSEQ_CPU(child->ke_cpu), child);
954 }
955 
956 void
957 sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
958 {
959 	kg->kg_slptime += child->kg_slptime;
960 	kg->kg_runtime += child->kg_runtime;
961 	if ((kg->kg_runtime + kg->kg_slptime) >  SCHED_SLP_RUN_MAX) {
962 		kg->kg_runtime /= SCHED_SLP_RUN_THROTTLE;
963 		kg->kg_slptime /= SCHED_SLP_RUN_THROTTLE;
964 	}
965 }
966 
967 void
968 sched_exit_thread(struct thread *td, struct thread *child)
969 {
970 }
971 
972 void
973 sched_clock(struct kse *ke)
974 {
975 	struct kseq *kseq;
976 	struct ksegrp *kg;
977 	struct thread *td;
978 #if 0
979 	struct kse *nke;
980 #endif
981 
982 	/*
983 	 * sched_setup() apparently happens prior to stathz being set.  We
984 	 * need to resolve the timers earlier in the boot so we can avoid
985 	 * calculating this here.
986 	 */
987 	if (realstathz == 0) {
988 		realstathz = stathz ? stathz : hz;
989 		tickincr = hz / realstathz;
990 		/*
991 		 * XXX This does not work for values of stathz that are much
992 		 * larger than hz.
993 		 */
994 		if (tickincr == 0)
995 			tickincr = 1;
996 	}
997 
998 	td = ke->ke_thread;
999 	kg = ke->ke_ksegrp;
1000 
1001 	mtx_assert(&sched_lock, MA_OWNED);
1002 	KASSERT((td != NULL), ("schedclock: null thread pointer"));
1003 
1004 	/* Adjust ticks for pctcpu */
1005 	ke->ke_ticks++;
1006 	ke->ke_ltick = ticks;
1007 
1008 	/* Go up to one second beyond our max and then trim back down */
1009 	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1010 		sched_pctcpu_update(ke);
1011 
1012 	if (td->td_flags & TDF_IDLETD)
1013 		return;
1014 
1015 	CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
1016 	    ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
1017 
1018 	/*
1019 	 * We only do slicing code for TIMESHARE ksegrps.
1020 	 */
1021 	if (kg->kg_pri_class != PRI_TIMESHARE)
1022 		return;
1023 	/*
1024 	 * Check for a higher priority task on the run queue.  This can happen
1025 	 * on SMP if another processor woke up a process on our runq.
1026 	 */
1027 	kseq = KSEQ_SELF();
1028 #if 0
1029 	if (kseq->ksq_load > 1 && (nke = kseq_choose(kseq)) != NULL) {
1030 		if (sched_strict &&
1031 		    nke->ke_thread->td_priority < td->td_priority)
1032 			td->td_flags |= TDF_NEEDRESCHED;
1033 		else if (nke->ke_thread->td_priority <
1034 		    td->td_priority SCHED_PRIO_SLOP)
1035 
1036 		if (nke->ke_thread->td_priority < td->td_priority)
1037 			td->td_flags |= TDF_NEEDRESCHED;
1038 	}
1039 #endif
1040 	/*
1041 	 * We used a tick charge it to the ksegrp so that we can compute our
1042 	 * interactivity.
1043 	 */
1044 	kg->kg_runtime += tickincr << 10;
1045 
1046 	/*
1047 	 * We used up one time slice.
1048 	 */
1049 	ke->ke_slice--;
1050 #ifdef SMP
1051 	kseq->ksq_rslices--;
1052 #endif
1053 
1054 	if (ke->ke_slice > 0)
1055 		return;
1056 	/*
1057 	 * We're out of time, recompute priorities and requeue.
1058 	 */
1059 	kseq_rem(kseq, ke);
1060 	sched_priority(kg);
1061 	sched_slice(ke);
1062 	if (SCHED_CURR(kg, ke))
1063 		ke->ke_runq = kseq->ksq_curr;
1064 	else
1065 		ke->ke_runq = kseq->ksq_next;
1066 	kseq_add(kseq, ke);
1067 	td->td_flags |= TDF_NEEDRESCHED;
1068 }
1069 
1070 int
1071 sched_runnable(void)
1072 {
1073 	struct kseq *kseq;
1074 	int load;
1075 
1076 	load = 1;
1077 
1078 	mtx_lock_spin(&sched_lock);
1079 	kseq = KSEQ_SELF();
1080 
1081 	if (kseq->ksq_load)
1082 		goto out;
1083 #ifdef SMP
1084 	/*
1085 	 * For SMP we may steal other processor's KSEs.  Just search until we
1086 	 * verify that at least on other cpu has a runnable task.
1087 	 */
1088 	if (smp_started) {
1089 		int i;
1090 
1091 		for (i = 0; i < mp_maxid; i++) {
1092 			if (CPU_ABSENT(i))
1093 				continue;
1094 			kseq = KSEQ_CPU(i);
1095 			if (kseq->ksq_load > 1)
1096 				goto out;
1097 		}
1098 	}
1099 #endif
1100 	load = 0;
1101 out:
1102 	mtx_unlock_spin(&sched_lock);
1103 	return (load);
1104 }
1105 
1106 void
1107 sched_userret(struct thread *td)
1108 {
1109 	struct ksegrp *kg;
1110 	struct kseq *kseq;
1111 	struct kse *ke;
1112 
1113 	kg = td->td_ksegrp;
1114 
1115 	if (td->td_priority != kg->kg_user_pri) {
1116 		mtx_lock_spin(&sched_lock);
1117 		td->td_priority = kg->kg_user_pri;
1118 		kseq = KSEQ_SELF();
1119 		if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE &&
1120 		    kseq->ksq_load > 1 &&
1121 		    (ke = kseq_choose(kseq)) != NULL &&
1122 		    ke->ke_thread->td_priority < td->td_priority)
1123 			curthread->td_flags |= TDF_NEEDRESCHED;
1124 		mtx_unlock_spin(&sched_lock);
1125 	}
1126 }
1127 
1128 struct kse *
1129 sched_choose(void)
1130 {
1131 	struct kseq *kseq;
1132 	struct kse *ke;
1133 
1134 	mtx_assert(&sched_lock, MA_OWNED);
1135 #ifdef SMP
1136 retry:
1137 #endif
1138 	kseq = KSEQ_SELF();
1139 	ke = kseq_choose(kseq);
1140 	if (ke) {
1141 		runq_remove(ke->ke_runq, ke);
1142 		ke->ke_state = KES_THREAD;
1143 
1144 		if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1145 			CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1146 			    ke, ke->ke_runq, ke->ke_slice,
1147 			    ke->ke_thread->td_priority);
1148 		}
1149 		return (ke);
1150 	}
1151 
1152 #ifdef SMP
1153 	if (smp_started) {
1154 		/*
1155 		 * Find the cpu with the highest load and steal one proc.
1156 		 */
1157 		if ((kseq = kseq_load_highest()) == NULL)
1158 			return (NULL);
1159 
1160 		/*
1161 		 * Remove this kse from this kseq and runq and then requeue
1162 		 * on the current processor.  Then we will dequeue it
1163 		 * normally above.
1164 		 */
1165 		kseq_move(kseq, PCPU_GET(cpuid));
1166 		goto retry;
1167 	}
1168 #endif
1169 
1170 	return (NULL);
1171 }
1172 
1173 void
1174 sched_add(struct kse *ke)
1175 {
1176 	struct kseq *kseq;
1177 	struct ksegrp *kg;
1178 
1179 	mtx_assert(&sched_lock, MA_OWNED);
1180 	KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE"));
1181 	KASSERT((ke->ke_thread->td_kse != NULL),
1182 	    ("sched_add: No KSE on thread"));
1183 	KASSERT(ke->ke_state != KES_ONRUNQ,
1184 	    ("sched_add: kse %p (%s) already in run queue", ke,
1185 	    ke->ke_proc->p_comm));
1186 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1187 	    ("sched_add: process swapped out"));
1188 	KASSERT(ke->ke_runq == NULL,
1189 	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1190 
1191 	kg = ke->ke_ksegrp;
1192 
1193 	switch (PRI_BASE(kg->kg_pri_class)) {
1194 	case PRI_ITHD:
1195 	case PRI_REALTIME:
1196 		kseq = KSEQ_SELF();
1197 		ke->ke_runq = kseq->ksq_curr;
1198 		ke->ke_slice = SCHED_SLICE_MAX;
1199 		ke->ke_cpu = PCPU_GET(cpuid);
1200 		break;
1201 	case PRI_TIMESHARE:
1202 		kseq = KSEQ_CPU(ke->ke_cpu);
1203 		if (SCHED_CURR(kg, ke))
1204 			ke->ke_runq = kseq->ksq_curr;
1205 		else
1206 			ke->ke_runq = kseq->ksq_next;
1207 		break;
1208 	case PRI_IDLE:
1209 		kseq = KSEQ_CPU(ke->ke_cpu);
1210 		/*
1211 		 * This is for priority prop.
1212 		 */
1213 		if (ke->ke_thread->td_priority > PRI_MIN_IDLE)
1214 			ke->ke_runq = kseq->ksq_curr;
1215 		else
1216 			ke->ke_runq = &kseq->ksq_idle;
1217 		ke->ke_slice = SCHED_SLICE_MIN;
1218 		break;
1219 	default:
1220 		panic("Unknown pri class.\n");
1221 		break;
1222 	}
1223 
1224 	ke->ke_ksegrp->kg_runq_kses++;
1225 	ke->ke_state = KES_ONRUNQ;
1226 
1227 	runq_add(ke->ke_runq, ke);
1228 	kseq_add(kseq, ke);
1229 }
1230 
1231 void
1232 sched_rem(struct kse *ke)
1233 {
1234 	struct kseq *kseq;
1235 
1236 	mtx_assert(&sched_lock, MA_OWNED);
1237 	KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
1238 
1239 	ke->ke_state = KES_THREAD;
1240 	ke->ke_ksegrp->kg_runq_kses--;
1241 	kseq = KSEQ_CPU(ke->ke_cpu);
1242 	runq_remove(ke->ke_runq, ke);
1243 	kseq_rem(kseq, ke);
1244 }
1245 
1246 fixpt_t
1247 sched_pctcpu(struct kse *ke)
1248 {
1249 	fixpt_t pctcpu;
1250 
1251 	pctcpu = 0;
1252 
1253 	mtx_lock_spin(&sched_lock);
1254 	if (ke->ke_ticks) {
1255 		int rtick;
1256 
1257 		/* Update to account for time potentially spent sleeping */
1258 		ke->ke_ltick = ticks;
1259 		/*
1260 		 * Don't update more frequently than twice a second.  Allowing
1261 		 * this causes the cpu usage to decay away too quickly due to
1262 		 * rounding errors.
1263 		 */
1264 		if (ke->ke_ltick < (ticks - (hz / 2)))
1265 			sched_pctcpu_update(ke);
1266 
1267 		/* How many rtick per second ? */
1268 		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1269 		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1270 	}
1271 
1272 	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1273 	mtx_unlock_spin(&sched_lock);
1274 
1275 	return (pctcpu);
1276 }
1277 
1278 int
1279 sched_sizeof_kse(void)
1280 {
1281 	return (sizeof(struct kse) + sizeof(struct ke_sched));
1282 }
1283 
1284 int
1285 sched_sizeof_ksegrp(void)
1286 {
1287 	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1288 }
1289 
1290 int
1291 sched_sizeof_proc(void)
1292 {
1293 	return (sizeof(struct proc));
1294 }
1295 
1296 int
1297 sched_sizeof_thread(void)
1298 {
1299 	return (sizeof(struct thread) + sizeof(struct td_sched));
1300 }
1301