xref: /freebsd/sys/kern/sched_ule.c (revision 4b2eaea43fec8e8792be611dea204071a10b655a)
1 /*-
2  * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/ktr.h>
33 #include <sys/lock.h>
34 #include <sys/mutex.h>
35 #include <sys/proc.h>
36 #include <sys/sched.h>
37 #include <sys/smp.h>
38 #include <sys/sx.h>
39 #include <sys/sysctl.h>
40 #include <sys/sysproto.h>
41 #include <sys/vmmeter.h>
42 #ifdef DDB
43 #include <ddb/ddb.h>
44 #endif
45 #ifdef KTRACE
46 #include <sys/uio.h>
47 #include <sys/ktrace.h>
48 #endif
49 
50 #include <machine/cpu.h>
51 
52 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
53 /* XXX This is bogus compatability crap for ps */
54 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
55 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
56 
57 static void sched_setup(void *dummy);
58 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
59 
60 /*
61  * These datastructures are allocated within their parent datastructure but
62  * are scheduler specific.
63  */
64 
65 struct ke_sched {
66 	int		ske_slice;
67 	struct runq	*ske_runq;
68 	/* The following variables are only used for pctcpu calculation */
69 	int		ske_ltick;	/* Last tick that we were running on */
70 	int		ske_ftick;	/* First tick that we were running on */
71 	int		ske_ticks;	/* Tick count */
72 };
73 #define	ke_slice	ke_sched->ske_slice
74 #define	ke_runq		ke_sched->ske_runq
75 #define	ke_ltick	ke_sched->ske_ltick
76 #define	ke_ftick	ke_sched->ske_ftick
77 #define	ke_ticks	ke_sched->ske_ticks
78 
79 struct kg_sched {
80 	int	skg_slptime;
81 };
82 #define	kg_slptime	kg_sched->skg_slptime
83 
84 struct td_sched {
85 	int	std_slptime;
86 };
87 #define	td_slptime	td_sched->std_slptime
88 
89 struct ke_sched ke_sched;
90 struct kg_sched kg_sched;
91 struct td_sched td_sched;
92 
93 struct ke_sched *kse0_sched = &ke_sched;
94 struct kg_sched *ksegrp0_sched = &kg_sched;
95 struct p_sched *proc0_sched = NULL;
96 struct td_sched *thread0_sched = &td_sched;
97 
98 /*
99  * This priority range has 20 priorities on either end that are reachable
100  * only through nice values.
101  */
102 #define	SCHED_PRI_NRESV	40
103 #define	SCHED_PRI_RANGE	((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) - \
104     SCHED_PRI_NRESV)
105 
106 /*
107  * These determine how sleep time effects the priority of a process.
108  *
109  * SLP_MAX:	Maximum amount of accrued sleep time.
110  * SLP_SCALE:	Scale the number of ticks slept across the dynamic priority
111  *		range.
112  * SLP_TOPRI:	Convert a number of ticks slept into a priority value.
113  * SLP_DECAY:	Reduce the sleep time to 50% for every granted slice.
114  */
115 #define	SCHED_SLP_MAX	(hz * 2)
116 #define	SCHED_SLP_SCALE(slp)	(((slp) * SCHED_PRI_RANGE) / SCHED_SLP_MAX)
117 #define	SCHED_SLP_TOPRI(slp)	(SCHED_PRI_RANGE - SCHED_SLP_SCALE((slp)) + \
118     SCHED_PRI_NRESV / 2)
119 #define	SCHED_SLP_DECAY(slp)	((slp) / 2)	/* XXX Multiple kses break */
120 
121 /*
122  * These parameters and macros determine the size of the time slice that is
123  * granted to each thread.
124  *
125  * SLICE_MIN:	Minimum time slice granted, in units of ticks.
126  * SLICE_MAX:	Maximum time slice granted.
127  * SLICE_RANGE:	Range of available time slices scaled by hz.
128  * SLICE_SCALE:	The number slices granted per unit of pri or slp.
129  * PRI_TOSLICE:	Compute a slice size that is proportional to the priority.
130  * SLP_TOSLICE:	Compute a slice size that is inversely proportional to the
131  *		amount of time slept. (smaller slices for interactive ksegs)
132  * PRI_COMP:	This determines what fraction of the actual slice comes from
133  *		the slice size computed from the priority.
134  * SLP_COMP:	This determines what component of the actual slice comes from
135  *		the slize size computed from the sleep time.
136  */
137 #define	SCHED_SLICE_MIN		(hz / 100)
138 #define	SCHED_SLICE_MAX		(hz / 10)
139 #define	SCHED_SLICE_RANGE	(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
140 #define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
141 #define	SCHED_PRI_TOSLICE(pri)						\
142     (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((pri), SCHED_PRI_RANGE))
143 #define	SCHED_SLP_TOSLICE(slp)						\
144     (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((slp), SCHED_SLP_MAX))
145 #define	SCHED_SLP_COMP(slice)	(((slice) / 5) * 3)	/* 60% */
146 #define	SCHED_PRI_COMP(slice)	(((slice) / 5) * 2)	/* 40% */
147 
148 /*
149  * This macro determines whether or not the kse belongs on the current or
150  * next run queue.
151  */
152 #define	SCHED_CURR(kg)	((kg)->kg_slptime > (hz / 4) || \
153     (kg)->kg_pri_class != PRI_TIMESHARE)
154 
155 /*
156  * Cpu percentage computation macros and defines.
157  *
158  * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
159  * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
160  */
161 
162 #define	SCHED_CPU_TIME	60
163 #define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
164 
165 /*
166  * kseq - pair of runqs per processor
167  */
168 
169 struct kseq {
170 	struct runq	ksq_runqs[2];
171 	struct runq	*ksq_curr;
172 	struct runq	*ksq_next;
173 	int		ksq_load;	/* Total runnable */
174 };
175 
176 /*
177  * One kse queue per processor.
178  */
179 #ifdef SMP
180 struct kseq	kseq_cpu[MAXCPU];
181 #define	KSEQ_SELF()	(&kseq_cpu[PCPU_GET(cpuid)])
182 #define	KSEQ_CPU(x)	(&kseq_cpu[(x)])
183 #else
184 struct kseq	kseq_cpu;
185 #define	KSEQ_SELF()	(&kseq_cpu)
186 #define	KSEQ_CPU(x)	(&kseq_cpu)
187 #endif
188 
189 static int sched_slice(struct ksegrp *kg);
190 static int sched_priority(struct ksegrp *kg);
191 void sched_pctcpu_update(struct kse *ke);
192 int sched_pickcpu(void);
193 
194 static struct kse * kseq_choose(struct kseq *kseq);
195 static void kseq_setup(struct kseq *kseq);
196 
197 static void
198 kseq_setup(struct kseq *kseq)
199 {
200 	kseq->ksq_load = 0;
201 	kseq->ksq_curr = &kseq->ksq_runqs[0];
202 	kseq->ksq_next = &kseq->ksq_runqs[1];
203 	runq_init(kseq->ksq_curr);
204 	runq_init(kseq->ksq_next);
205 }
206 
207 static void
208 sched_setup(void *dummy)
209 {
210 	int i;
211 
212 	mtx_lock_spin(&sched_lock);
213 	/* init kseqs */
214 	for (i = 0; i < MAXCPU; i++)
215 		kseq_setup(KSEQ_CPU(i));
216 	mtx_unlock_spin(&sched_lock);
217 }
218 
219 /*
220  * Scale the scheduling priority according to the "interactivity" of this
221  * process.
222  */
223 static int
224 sched_priority(struct ksegrp *kg)
225 {
226 	int pri;
227 
228 	if (kg->kg_pri_class != PRI_TIMESHARE)
229 		return (kg->kg_user_pri);
230 
231 	pri = SCHED_SLP_TOPRI(kg->kg_slptime);
232 	CTR2(KTR_RUNQ, "sched_priority: slptime: %d\tpri: %d",
233 	    kg->kg_slptime, pri);
234 
235 	pri += PRI_MIN_TIMESHARE;
236 	pri += kg->kg_nice;
237 
238 	if (pri > PRI_MAX_TIMESHARE)
239 		pri = PRI_MAX_TIMESHARE;
240 	else if (pri < PRI_MIN_TIMESHARE)
241 		pri = PRI_MIN_TIMESHARE;
242 
243 	kg->kg_user_pri = pri;
244 
245 	return (kg->kg_user_pri);
246 }
247 
248 /*
249  * Calculate a time slice based on the process priority.
250  */
251 static int
252 sched_slice(struct ksegrp *kg)
253 {
254 	int pslice;
255 	int sslice;
256 	int slice;
257 	int pri;
258 
259 	pri = kg->kg_user_pri;
260 	pri -= PRI_MIN_TIMESHARE;
261 	pslice = SCHED_PRI_TOSLICE(pri);
262 	sslice = SCHED_SLP_TOSLICE(kg->kg_slptime);
263 	slice = SCHED_SLP_COMP(sslice) + SCHED_PRI_COMP(pslice);
264 	kg->kg_slptime = SCHED_SLP_DECAY(kg->kg_slptime);
265 
266 	CTR4(KTR_RUNQ,
267 	    "sched_slice: pri: %d\tsslice: %d\tpslice: %d\tslice: %d",
268 	    pri, sslice, pslice, slice);
269 
270 	if (slice < SCHED_SLICE_MIN)
271 		slice = SCHED_SLICE_MIN;
272 	else if (slice > SCHED_SLICE_MAX)
273 		slice = SCHED_SLICE_MAX;
274 
275 	return (slice);
276 }
277 
278 int
279 sched_rr_interval(void)
280 {
281 	return (SCHED_SLICE_MAX);
282 }
283 
284 void
285 sched_pctcpu_update(struct kse *ke)
286 {
287 	/*
288 	 * Adjust counters and watermark for pctcpu calc.
289 	 */
290 	ke->ke_ticks = (ke->ke_ticks / (ke->ke_ltick - ke->ke_ftick)) *
291 		    SCHED_CPU_TICKS;
292 	ke->ke_ltick = ticks;
293 	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
294 }
295 
296 #ifdef SMP
297 int
298 sched_pickcpu(void)
299 {
300 	struct kseq *kseq;
301 	int load;
302 	int cpu;
303 	int i;
304 
305 	if (!smp_started)
306 		return (0);
307 
308 	load = 0;
309 	cpu = 0;
310 
311 	for (i = 0; i < mp_maxid; i++) {
312 		if (CPU_ABSENT(i))
313 			continue;
314 		kseq = KSEQ_CPU(i);
315 		if (kseq->ksq_load < load) {
316 			cpu = i;
317 			load = kseq->ksq_load;
318 		}
319 	}
320 
321 	CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu);
322 	return (cpu);
323 }
324 #else
325 int
326 sched_pickcpu(void)
327 {
328 	return (0);
329 }
330 #endif
331 
332 void
333 sched_prio(struct thread *td, u_char prio)
334 {
335 	struct kse *ke;
336 	struct runq *rq;
337 
338 	mtx_assert(&sched_lock, MA_OWNED);
339 	ke = td->td_kse;
340 	td->td_priority = prio;
341 
342 	if (TD_ON_RUNQ(td)) {
343 		rq = ke->ke_runq;
344 
345 		runq_remove(rq, ke);
346 		runq_add(rq, ke);
347 	}
348 }
349 
350 void
351 sched_switchout(struct thread *td)
352 {
353 	struct kse *ke;
354 
355 	mtx_assert(&sched_lock, MA_OWNED);
356 
357 	ke = td->td_kse;
358 
359 	td->td_last_kse = ke;
360         td->td_lastcpu = ke->ke_oncpu;
361         ke->ke_flags &= ~KEF_NEEDRESCHED;
362 
363 	if (TD_IS_RUNNING(td)) {
364 		setrunqueue(td);
365 		return;
366 	} else
367 		td->td_kse->ke_runq = NULL;
368 
369 	/*
370 	 * We will not be on the run queue. So we must be
371 	 * sleeping or similar.
372 	 */
373 	if (td->td_proc->p_flag & P_KSES)
374 		kse_reassign(ke);
375 }
376 
377 void
378 sched_switchin(struct thread *td)
379 {
380 	/* struct kse *ke = td->td_kse; */
381 	mtx_assert(&sched_lock, MA_OWNED);
382 
383 	td->td_kse->ke_oncpu = PCPU_GET(cpuid); /* XXX */
384 	if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE &&
385 	    td->td_priority != td->td_ksegrp->kg_user_pri)
386 		curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
387 }
388 
389 void
390 sched_nice(struct ksegrp *kg, int nice)
391 {
392 	struct thread *td;
393 
394 	kg->kg_nice = nice;
395 	sched_priority(kg);
396 	FOREACH_THREAD_IN_GROUP(kg, td) {
397 		td->td_kse->ke_flags |= KEF_NEEDRESCHED;
398 	}
399 }
400 
401 void
402 sched_sleep(struct thread *td, u_char prio)
403 {
404 	mtx_assert(&sched_lock, MA_OWNED);
405 
406 	td->td_slptime = ticks;
407 	td->td_priority = prio;
408 
409 	/*
410 	 * If this is an interactive task clear its queue so it moves back
411 	 * on to curr when it wakes up.  Otherwise let it stay on the queue
412 	 * that it was assigned to.
413 	 */
414 	if (SCHED_CURR(td->td_kse->ke_ksegrp))
415 		td->td_kse->ke_runq = NULL;
416 #if 0
417 	if (td->td_priority < PZERO)
418 		kseq_cpu[td->td_kse->ke_oncpu].ksq_load++;
419 #endif
420 }
421 
422 void
423 sched_wakeup(struct thread *td)
424 {
425 	struct ksegrp *kg;
426 
427 	mtx_assert(&sched_lock, MA_OWNED);
428 
429 	/*
430 	 * Let the kseg know how long we slept for.  This is because process
431 	 * interactivity behavior is modeled in the kseg.
432 	 */
433 	kg = td->td_ksegrp;
434 
435 	if (td->td_slptime) {
436 		kg->kg_slptime += ticks - td->td_slptime;
437 		if (kg->kg_slptime > SCHED_SLP_MAX)
438 			kg->kg_slptime = SCHED_SLP_MAX;
439 		td->td_priority = sched_priority(kg);
440 	}
441 	td->td_slptime = 0;
442 #if 0
443 	if (td->td_priority < PZERO)
444 		kseq_cpu[td->td_kse->ke_oncpu].ksq_load--;
445 #endif
446 	setrunqueue(td);
447         if (td->td_priority < curthread->td_priority)
448                 curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
449 }
450 
451 /*
452  * Penalize the parent for creating a new child and initialize the child's
453  * priority.
454  */
455 void
456 sched_fork(struct ksegrp *kg, struct ksegrp *child)
457 {
458 	struct kse *ckse;
459 	struct kse *pkse;
460 
461 	mtx_assert(&sched_lock, MA_OWNED);
462 	ckse = FIRST_KSE_IN_KSEGRP(child);
463 	pkse = FIRST_KSE_IN_KSEGRP(kg);
464 
465 	/* XXX Need something better here */
466 	child->kg_slptime = kg->kg_slptime;
467 	child->kg_user_pri = kg->kg_user_pri;
468 
469 	if (pkse->ke_oncpu != PCPU_GET(cpuid)) {
470 		printf("pkse->ke_oncpu = %d\n", pkse->ke_oncpu);
471 		printf("cpuid = %d", PCPU_GET(cpuid));
472 		Debugger("stop");
473 	}
474 
475 	ckse->ke_slice = pkse->ke_slice;
476 	ckse->ke_oncpu = pkse->ke_oncpu; /* sched_pickcpu(); */
477 	ckse->ke_runq = NULL;
478 	/*
479 	 * Claim that we've been running for one second for statistical
480 	 * purposes.
481 	 */
482 	ckse->ke_ticks = 0;
483 	ckse->ke_ltick = ticks;
484 	ckse->ke_ftick = ticks - hz;
485 }
486 
487 /*
488  * Return some of the child's priority and interactivity to the parent.
489  */
490 void
491 sched_exit(struct ksegrp *kg, struct ksegrp *child)
492 {
493 	/* XXX Need something better here */
494 	mtx_assert(&sched_lock, MA_OWNED);
495 	kg->kg_slptime = child->kg_slptime;
496 	sched_priority(kg);
497 }
498 
499 void
500 sched_clock(struct thread *td)
501 {
502 	struct kse *ke;
503 	struct kse *nke;
504 	struct kseq *kseq;
505 	struct ksegrp *kg;
506 
507 
508 	ke = td->td_kse;
509 	kg = td->td_ksegrp;
510 
511 	mtx_assert(&sched_lock, MA_OWNED);
512 	KASSERT((td != NULL), ("schedclock: null thread pointer"));
513 
514 	/* Adjust ticks for pctcpu */
515 	ke->ke_ticks += 10000;
516 	ke->ke_ltick = ticks;
517 	/* Go up to one second beyond our max and then trim back down */
518 	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
519 		sched_pctcpu_update(ke);
520 
521 	if (td->td_kse->ke_flags & KEF_IDLEKSE)
522 		return;
523 
524 	/*
525 	 * Check for a higher priority task on the run queue.  This can happen
526 	 * on SMP if another processor woke up a process on our runq.
527 	 */
528 	kseq = KSEQ_SELF();
529 	nke = runq_choose(kseq->ksq_curr);
530 
531 	if (nke && nke->ke_thread &&
532 	    nke->ke_thread->td_priority < td->td_priority)
533 		ke->ke_flags |= KEF_NEEDRESCHED;
534 	/*
535 	 * We used a tick, decrease our total sleep time.  This decreases our
536 	 * "interactivity".
537 	 */
538 	if (kg->kg_slptime)
539 		kg->kg_slptime--;
540 	/*
541 	 * We used up one time slice.
542 	 */
543 	ke->ke_slice--;
544 	/*
545 	 * We're out of time, recompute priorities and requeue
546 	 */
547 	if (ke->ke_slice == 0) {
548 		td->td_priority = sched_priority(kg);
549 		ke->ke_slice = sched_slice(kg);
550 		ke->ke_flags |= KEF_NEEDRESCHED;
551 		ke->ke_runq = NULL;
552 	}
553 }
554 
555 int
556 sched_runnable(void)
557 {
558 	struct kseq *kseq;
559 
560 	kseq = KSEQ_SELF();
561 
562 	if (kseq->ksq_load)
563 		return (1);
564 #ifdef SMP
565 	/*
566 	 * For SMP we may steal other processor's KSEs.  Just search until we
567 	 * verify that at least on other cpu has a runnable task.
568 	 */
569 	if (smp_started) {
570 		int i;
571 
572 		for (i = 0; i < mp_maxid; i++) {
573 			if (CPU_ABSENT(i))
574 				continue;
575 			kseq = KSEQ_CPU(i);
576 			if (kseq->ksq_load)
577 				return (1);
578 		}
579 	}
580 #endif
581 	return (0);
582 }
583 
584 void
585 sched_userret(struct thread *td)
586 {
587 	struct ksegrp *kg;
588 
589 	kg = td->td_ksegrp;
590 
591 	if (td->td_priority != kg->kg_user_pri) {
592 		mtx_lock_spin(&sched_lock);
593 		td->td_priority = kg->kg_user_pri;
594 		mtx_unlock_spin(&sched_lock);
595 	}
596 }
597 
598 struct kse *
599 kseq_choose(struct kseq *kseq)
600 {
601 	struct kse *ke;
602 	struct runq *swap;
603 
604 	if ((ke = runq_choose(kseq->ksq_curr)) == NULL) {
605 		swap = kseq->ksq_curr;
606 		kseq->ksq_curr = kseq->ksq_next;
607 		kseq->ksq_next = swap;
608 		ke = runq_choose(kseq->ksq_curr);
609 	}
610 
611 	return (ke);
612 }
613 
614 struct kse *
615 sched_choose(void)
616 {
617 	struct kseq *kseq;
618 	struct kse *ke;
619 
620 	kseq = KSEQ_SELF();
621 	ke = kseq_choose(kseq);
622 
623 	if (ke) {
624 		runq_remove(ke->ke_runq, ke);
625 		kseq->ksq_load--;
626 		ke->ke_state = KES_THREAD;
627 	}
628 
629 #ifdef SMP
630 	if (ke == NULL && smp_started) {
631 		int load;
632 		int cpu;
633 		int i;
634 
635 		load = 0;
636 		cpu = 0;
637 
638 		/*
639 		 * Find the cpu with the highest load and steal one proc.
640 		 */
641 		for (i = 0; i < mp_maxid; i++) {
642 			if (CPU_ABSENT(i))
643 				continue;
644 			kseq = KSEQ_CPU(i);
645 			if (kseq->ksq_load > load) {
646 				load = kseq->ksq_load;
647 				cpu = i;
648 			}
649 		}
650 		if (load) {
651 			kseq = KSEQ_CPU(cpu);
652 			ke = kseq_choose(kseq);
653 			kseq->ksq_load--;
654 			ke->ke_state = KES_THREAD;
655 			runq_remove(ke->ke_runq, ke);
656 			ke->ke_runq = NULL;
657 			ke->ke_oncpu = PCPU_GET(cpuid);
658 		}
659 
660 	}
661 #endif
662 	return (ke);
663 }
664 
665 void
666 sched_add(struct kse *ke)
667 {
668 
669 	mtx_assert(&sched_lock, MA_OWNED);
670 	KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE"));
671 	KASSERT((ke->ke_thread->td_kse != NULL),
672 	    ("runq_add: No KSE on thread"));
673 	KASSERT(ke->ke_state != KES_ONRUNQ,
674 	    ("runq_add: kse %p (%s) already in run queue", ke,
675 	    ke->ke_proc->p_comm));
676 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
677 	    ("runq_add: process swapped out"));
678 
679 
680 	if (ke->ke_runq == NULL) {
681 		struct kseq *kseq;
682 
683 		kseq = KSEQ_CPU(ke->ke_oncpu);
684 		if (SCHED_CURR(ke->ke_ksegrp))
685 			ke->ke_runq = kseq->ksq_curr;
686 		else
687 			ke->ke_runq = kseq->ksq_next;
688 	}
689 	ke->ke_ksegrp->kg_runq_kses++;
690 	ke->ke_state = KES_ONRUNQ;
691 
692 	runq_add(ke->ke_runq, ke);
693 	KSEQ_CPU(ke->ke_oncpu)->ksq_load++;
694 }
695 
696 void
697 sched_rem(struct kse *ke)
698 {
699 	mtx_assert(&sched_lock, MA_OWNED);
700 	/* KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); */
701 
702 	runq_remove(ke->ke_runq, ke);
703 	ke->ke_runq = NULL;
704 	ke->ke_state = KES_THREAD;
705 	ke->ke_ksegrp->kg_runq_kses--;
706 	KSEQ_CPU(ke->ke_oncpu)->ksq_load--;
707 }
708 
709 fixpt_t
710 sched_pctcpu(struct kse *ke)
711 {
712 	fixpt_t pctcpu;
713 
714 	pctcpu = 0;
715 
716 	if (ke->ke_ticks) {
717 		int rtick;
718 
719 		/* Update to account for time potentially spent sleeping */
720 		ke->ke_ltick = ticks;
721 		sched_pctcpu_update(ke);
722 
723 		/* How many rtick per second ? */
724 		rtick = ke->ke_ticks / (SCHED_CPU_TIME * 10000);
725 		pctcpu = (FSCALE * ((FSCALE * rtick)/stathz)) >> FSHIFT;
726 	}
727 
728 	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
729 
730 	return (pctcpu);
731 }
732 
733 int
734 sched_sizeof_kse(void)
735 {
736 	return (sizeof(struct kse) + sizeof(struct ke_sched));
737 }
738 
739 int
740 sched_sizeof_ksegrp(void)
741 {
742 	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
743 }
744 
745 int
746 sched_sizeof_proc(void)
747 {
748 	return (sizeof(struct proc));
749 }
750 
751 int
752 sched_sizeof_thread(void)
753 {
754 	return (sizeof(struct thread) + sizeof(struct td_sched));
755 }
756