xref: /freebsd/sys/kern/sched_ule.c (revision cd6e33df1cde254aa3b3a4ec2d1d3a173a3457c4)
1 /*-
2  * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/ktr.h>
33 #include <sys/lock.h>
34 #include <sys/mutex.h>
35 #include <sys/proc.h>
36 #include <sys/sched.h>
37 #include <sys/smp.h>
38 #include <sys/sx.h>
39 #include <sys/sysctl.h>
40 #include <sys/sysproto.h>
41 #include <sys/vmmeter.h>
42 #ifdef DDB
43 #include <ddb/ddb.h>
44 #endif
45 #ifdef KTRACE
46 #include <sys/uio.h>
47 #include <sys/ktrace.h>
48 #endif
49 
50 #include <machine/cpu.h>
51 
52 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
53 /* XXX This is bogus compatability crap for ps */
54 static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
55 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
56 
57 static void sched_setup(void *dummy);
58 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
59 
60 /*
61  * These datastructures are allocated within their parent datastructure but
62  * are scheduler specific.
63  */
64 
65 struct ke_sched {
66 	int		ske_slice;
67 	struct runq	*ske_runq;
68 	/* The following variables are only used for pctcpu calculation */
69 	int		ske_ltick;	/* Last tick that we were running on */
70 	int		ske_ftick;	/* First tick that we were running on */
71 	int		ske_ticks;	/* Tick count */
72 	u_char		ske_cpu;
73 };
74 #define	ke_slice	ke_sched->ske_slice
75 #define	ke_runq		ke_sched->ske_runq
76 #define	ke_ltick	ke_sched->ske_ltick
77 #define	ke_ftick	ke_sched->ske_ftick
78 #define	ke_ticks	ke_sched->ske_ticks
79 #define	ke_cpu		ke_sched->ske_cpu
80 
81 struct kg_sched {
82 	int	skg_slptime;
83 };
84 #define	kg_slptime	kg_sched->skg_slptime
85 
86 struct td_sched {
87 	int	std_slptime;
88 };
89 #define	td_slptime	td_sched->std_slptime
90 
91 struct ke_sched ke_sched;
92 struct kg_sched kg_sched;
93 struct td_sched td_sched;
94 
95 struct ke_sched *kse0_sched = &ke_sched;
96 struct kg_sched *ksegrp0_sched = &kg_sched;
97 struct p_sched *proc0_sched = NULL;
98 struct td_sched *thread0_sched = &td_sched;
99 
100 /*
101  * This priority range has 20 priorities on either end that are reachable
102  * only through nice values.
103  */
104 #define	SCHED_PRI_NRESV	40
105 #define	SCHED_PRI_RANGE	((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) - \
106     SCHED_PRI_NRESV)
107 
108 /*
109  * These determine how sleep time effects the priority of a process.
110  *
111  * SLP_MAX:	Maximum amount of accrued sleep time.
112  * SLP_SCALE:	Scale the number of ticks slept across the dynamic priority
113  *		range.
114  * SLP_TOPRI:	Convert a number of ticks slept into a priority value.
115  * SLP_DECAY:	Reduce the sleep time to 50% for every granted slice.
116  */
117 #define	SCHED_SLP_MAX	(hz * 2)
118 #define	SCHED_SLP_SCALE(slp)	(((slp) * SCHED_PRI_RANGE) / SCHED_SLP_MAX)
119 #define	SCHED_SLP_TOPRI(slp)	(SCHED_PRI_RANGE - SCHED_SLP_SCALE((slp)) + \
120     SCHED_PRI_NRESV / 2)
121 #define	SCHED_SLP_DECAY(slp)	((slp) / 2)	/* XXX Multiple kses break */
122 
123 /*
124  * These parameters and macros determine the size of the time slice that is
125  * granted to each thread.
126  *
127  * SLICE_MIN:	Minimum time slice granted, in units of ticks.
128  * SLICE_MAX:	Maximum time slice granted.
129  * SLICE_RANGE:	Range of available time slices scaled by hz.
130  * SLICE_SCALE:	The number slices granted per unit of pri or slp.
131  * PRI_TOSLICE:	Compute a slice size that is proportional to the priority.
132  * SLP_TOSLICE:	Compute a slice size that is inversely proportional to the
133  *		amount of time slept. (smaller slices for interactive ksegs)
134  * PRI_COMP:	This determines what fraction of the actual slice comes from
135  *		the slice size computed from the priority.
136  * SLP_COMP:	This determines what component of the actual slice comes from
137  *		the slize size computed from the sleep time.
138  */
139 #define	SCHED_SLICE_MIN		(hz / 100)
140 #define	SCHED_SLICE_MAX		(hz / 4)
141 #define	SCHED_SLICE_RANGE	(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
142 #define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
143 #define	SCHED_PRI_TOSLICE(pri)						\
144     (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((pri), SCHED_PRI_RANGE))
145 #define	SCHED_SLP_TOSLICE(slp)						\
146     (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((slp), SCHED_SLP_MAX))
147 #define	SCHED_SLP_COMP(slice)	(((slice) / 5) * 3)	/* 60% */
148 #define	SCHED_PRI_COMP(slice)	(((slice) / 5) * 2)	/* 40% */
149 
150 /*
151  * This macro determines whether or not the kse belongs on the current or
152  * next run queue.
153  */
154 #define	SCHED_CURR(kg)	((kg)->kg_slptime > (hz / 4) || \
155     (kg)->kg_pri_class != PRI_TIMESHARE)
156 
157 /*
158  * Cpu percentage computation macros and defines.
159  *
160  * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
161  * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
162  */
163 
164 #define	SCHED_CPU_TIME	60
165 #define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
166 
167 /*
168  * kseq - pair of runqs per processor
169  */
170 
171 struct kseq {
172 	struct runq	ksq_runqs[2];
173 	struct runq	*ksq_curr;
174 	struct runq	*ksq_next;
175 	int		ksq_load;	/* Total runnable */
176 };
177 
178 /*
179  * One kse queue per processor.
180  */
181 #ifdef SMP
182 struct kseq	kseq_cpu[MAXCPU];
183 #define	KSEQ_SELF()	(&kseq_cpu[PCPU_GET(cpuid)])
184 #define	KSEQ_CPU(x)	(&kseq_cpu[(x)])
185 #else
186 struct kseq	kseq_cpu;
187 #define	KSEQ_SELF()	(&kseq_cpu)
188 #define	KSEQ_CPU(x)	(&kseq_cpu)
189 #endif
190 
191 static int sched_slice(struct ksegrp *kg);
192 static int sched_priority(struct ksegrp *kg);
193 void sched_pctcpu_update(struct kse *ke);
194 int sched_pickcpu(void);
195 
196 static struct kse * kseq_choose(struct kseq *kseq);
197 static void kseq_setup(struct kseq *kseq);
198 
199 static void
200 kseq_setup(struct kseq *kseq)
201 {
202 	kseq->ksq_load = 0;
203 	kseq->ksq_curr = &kseq->ksq_runqs[0];
204 	kseq->ksq_next = &kseq->ksq_runqs[1];
205 	runq_init(kseq->ksq_curr);
206 	runq_init(kseq->ksq_next);
207 }
208 
209 static void
210 sched_setup(void *dummy)
211 {
212 	int i;
213 
214 	mtx_lock_spin(&sched_lock);
215 	/* init kseqs */
216 	for (i = 0; i < MAXCPU; i++)
217 		kseq_setup(KSEQ_CPU(i));
218 	mtx_unlock_spin(&sched_lock);
219 }
220 
221 /*
222  * Scale the scheduling priority according to the "interactivity" of this
223  * process.
224  */
225 static int
226 sched_priority(struct ksegrp *kg)
227 {
228 	int pri;
229 
230 	if (kg->kg_pri_class != PRI_TIMESHARE)
231 		return (kg->kg_user_pri);
232 
233 	pri = SCHED_SLP_TOPRI(kg->kg_slptime);
234 	CTR2(KTR_RUNQ, "sched_priority: slptime: %d\tpri: %d",
235 	    kg->kg_slptime, pri);
236 
237 	pri += PRI_MIN_TIMESHARE;
238 	pri += kg->kg_nice;
239 
240 	if (pri > PRI_MAX_TIMESHARE)
241 		pri = PRI_MAX_TIMESHARE;
242 	else if (pri < PRI_MIN_TIMESHARE)
243 		pri = PRI_MIN_TIMESHARE;
244 
245 	kg->kg_user_pri = pri;
246 
247 	return (kg->kg_user_pri);
248 }
249 
250 /*
251  * Calculate a time slice based on the process priority.
252  */
253 static int
254 sched_slice(struct ksegrp *kg)
255 {
256 	int pslice;
257 	int sslice;
258 	int slice;
259 	int pri;
260 
261 	pri = kg->kg_user_pri;
262 	pri -= PRI_MIN_TIMESHARE;
263 	pslice = SCHED_PRI_TOSLICE(pri);
264 	sslice = SCHED_SLP_TOSLICE(kg->kg_slptime);
265 	slice = SCHED_SLP_COMP(sslice) + SCHED_PRI_COMP(pslice);
266 	kg->kg_slptime = SCHED_SLP_DECAY(kg->kg_slptime);
267 
268 	CTR4(KTR_RUNQ,
269 	    "sched_slice: pri: %d\tsslice: %d\tpslice: %d\tslice: %d",
270 	    pri, sslice, pslice, slice);
271 
272 	if (slice < SCHED_SLICE_MIN)
273 		slice = SCHED_SLICE_MIN;
274 	else if (slice > SCHED_SLICE_MAX)
275 		slice = SCHED_SLICE_MAX;
276 
277 	return (slice);
278 }
279 
280 int
281 sched_rr_interval(void)
282 {
283 	return (SCHED_SLICE_MAX);
284 }
285 
286 void
287 sched_pctcpu_update(struct kse *ke)
288 {
289 	/*
290 	 * Adjust counters and watermark for pctcpu calc.
291 	 */
292 	ke->ke_ticks = (ke->ke_ticks / (ke->ke_ltick - ke->ke_ftick)) *
293 		    SCHED_CPU_TICKS;
294 	ke->ke_ltick = ticks;
295 	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
296 }
297 
298 #ifdef SMP
299 int
300 sched_pickcpu(void)
301 {
302 	struct kseq *kseq;
303 	int load;
304 	int cpu;
305 	int i;
306 
307 	if (!smp_started)
308 		return (0);
309 
310 	load = 0;
311 	cpu = 0;
312 
313 	for (i = 0; i < mp_maxid; i++) {
314 		if (CPU_ABSENT(i))
315 			continue;
316 		kseq = KSEQ_CPU(i);
317 		if (kseq->ksq_load < load) {
318 			cpu = i;
319 			load = kseq->ksq_load;
320 		}
321 	}
322 
323 	CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu);
324 	return (cpu);
325 }
326 #else
327 int
328 sched_pickcpu(void)
329 {
330 	return (0);
331 }
332 #endif
333 
334 void
335 sched_prio(struct thread *td, u_char prio)
336 {
337 	struct kse *ke;
338 	struct runq *rq;
339 
340 	mtx_assert(&sched_lock, MA_OWNED);
341 	ke = td->td_kse;
342 	td->td_priority = prio;
343 
344 	if (TD_ON_RUNQ(td)) {
345 		rq = ke->ke_runq;
346 
347 		runq_remove(rq, ke);
348 		runq_add(rq, ke);
349 	}
350 }
351 
352 void
353 sched_switchout(struct thread *td)
354 {
355 	struct kse *ke;
356 
357 	mtx_assert(&sched_lock, MA_OWNED);
358 
359 	ke = td->td_kse;
360 
361 	td->td_last_kse = ke;
362         td->td_lastcpu = ke->ke_oncpu;
363 	ke->ke_oncpu = NOCPU;
364         ke->ke_flags &= ~KEF_NEEDRESCHED;
365 
366 	if (TD_IS_RUNNING(td)) {
367 		setrunqueue(td);
368 		return;
369 	} else
370 		td->td_kse->ke_runq = NULL;
371 
372 	/*
373 	 * We will not be on the run queue. So we must be
374 	 * sleeping or similar.
375 	 */
376 	if (td->td_proc->p_flag & P_KSES)
377 		kse_reassign(ke);
378 }
379 
380 void
381 sched_switchin(struct thread *td)
382 {
383 	/* struct kse *ke = td->td_kse; */
384 	mtx_assert(&sched_lock, MA_OWNED);
385 
386 	td->td_kse->ke_oncpu = PCPU_GET(cpuid);
387 	if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE &&
388 	    td->td_priority != td->td_ksegrp->kg_user_pri)
389 		curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
390 }
391 
392 void
393 sched_nice(struct ksegrp *kg, int nice)
394 {
395 	struct thread *td;
396 
397 	kg->kg_nice = nice;
398 	sched_priority(kg);
399 	FOREACH_THREAD_IN_GROUP(kg, td) {
400 		td->td_kse->ke_flags |= KEF_NEEDRESCHED;
401 	}
402 }
403 
404 void
405 sched_sleep(struct thread *td, u_char prio)
406 {
407 	mtx_assert(&sched_lock, MA_OWNED);
408 
409 	td->td_slptime = ticks;
410 	td->td_priority = prio;
411 
412 	/*
413 	 * If this is an interactive task clear its queue so it moves back
414 	 * on to curr when it wakes up.  Otherwise let it stay on the queue
415 	 * that it was assigned to.
416 	 */
417 	if (SCHED_CURR(td->td_kse->ke_ksegrp))
418 		td->td_kse->ke_runq = NULL;
419 #if 0
420 	if (td->td_priority < PZERO)
421 		kseq_cpu[td->td_kse->ke_cpu].ksq_load++;
422 #endif
423 }
424 
425 void
426 sched_wakeup(struct thread *td)
427 {
428 	struct ksegrp *kg;
429 
430 	mtx_assert(&sched_lock, MA_OWNED);
431 
432 	/*
433 	 * Let the kseg know how long we slept for.  This is because process
434 	 * interactivity behavior is modeled in the kseg.
435 	 */
436 	kg = td->td_ksegrp;
437 
438 	if (td->td_slptime) {
439 		kg->kg_slptime += ticks - td->td_slptime;
440 		if (kg->kg_slptime > SCHED_SLP_MAX)
441 			kg->kg_slptime = SCHED_SLP_MAX;
442 		td->td_priority = sched_priority(kg);
443 	}
444 	td->td_slptime = 0;
445 #if 0
446 	if (td->td_priority < PZERO)
447 		kseq_cpu[td->td_kse->ke_cpu].ksq_load--;
448 #endif
449 	setrunqueue(td);
450         if (td->td_priority < curthread->td_priority)
451                 curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
452 }
453 
454 /*
455  * Penalize the parent for creating a new child and initialize the child's
456  * priority.
457  */
458 void
459 sched_fork(struct ksegrp *kg, struct ksegrp *child)
460 {
461 	struct kse *ckse;
462 	struct kse *pkse;
463 
464 	mtx_assert(&sched_lock, MA_OWNED);
465 	ckse = FIRST_KSE_IN_KSEGRP(child);
466 	pkse = FIRST_KSE_IN_KSEGRP(kg);
467 
468 	/* XXX Need something better here */
469 	child->kg_slptime = kg->kg_slptime;
470 	child->kg_user_pri = kg->kg_user_pri;
471 
472 	if (pkse->ke_cpu != PCPU_GET(cpuid)) {
473 		printf("pkse->ke_cpu = %d\n", pkse->ke_cpu);
474 		printf("cpuid = %d", PCPU_GET(cpuid));
475 		Debugger("stop");
476 	}
477 
478 	ckse->ke_slice = pkse->ke_slice;
479 	ckse->ke_cpu = pkse->ke_cpu; /* sched_pickcpu(); */
480 	ckse->ke_runq = NULL;
481 	/*
482 	 * Claim that we've been running for one second for statistical
483 	 * purposes.
484 	 */
485 	ckse->ke_ticks = 0;
486 	ckse->ke_ltick = ticks;
487 	ckse->ke_ftick = ticks - hz;
488 }
489 
490 /*
491  * Return some of the child's priority and interactivity to the parent.
492  */
493 void
494 sched_exit(struct ksegrp *kg, struct ksegrp *child)
495 {
496 	/* XXX Need something better here */
497 	mtx_assert(&sched_lock, MA_OWNED);
498 	kg->kg_slptime = child->kg_slptime;
499 	sched_priority(kg);
500 }
501 
502 void
503 sched_clock(struct thread *td)
504 {
505 	struct kse *ke;
506 	struct kse *nke;
507 	struct kseq *kseq;
508 	struct ksegrp *kg;
509 
510 
511 	ke = td->td_kse;
512 	kg = td->td_ksegrp;
513 
514 	mtx_assert(&sched_lock, MA_OWNED);
515 	KASSERT((td != NULL), ("schedclock: null thread pointer"));
516 
517 	/* Adjust ticks for pctcpu */
518 	ke->ke_ticks += 10000;
519 	ke->ke_ltick = ticks;
520 	/* Go up to one second beyond our max and then trim back down */
521 	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
522 		sched_pctcpu_update(ke);
523 
524 	if (td->td_kse->ke_flags & KEF_IDLEKSE)
525 		return;
526 
527 	/*
528 	 * Check for a higher priority task on the run queue.  This can happen
529 	 * on SMP if another processor woke up a process on our runq.
530 	 */
531 	kseq = KSEQ_SELF();
532 	nke = runq_choose(kseq->ksq_curr);
533 
534 	if (nke && nke->ke_thread &&
535 	    nke->ke_thread->td_priority < td->td_priority)
536 		ke->ke_flags |= KEF_NEEDRESCHED;
537 	/*
538 	 * We used a tick, decrease our total sleep time.  This decreases our
539 	 * "interactivity".
540 	 */
541 	if (kg->kg_slptime)
542 		kg->kg_slptime--;
543 	/*
544 	 * We used up one time slice.
545 	 */
546 	ke->ke_slice--;
547 	/*
548 	 * We're out of time, recompute priorities and requeue
549 	 */
550 	if (ke->ke_slice == 0) {
551 		td->td_priority = sched_priority(kg);
552 		ke->ke_slice = sched_slice(kg);
553 		ke->ke_flags |= KEF_NEEDRESCHED;
554 		ke->ke_runq = NULL;
555 	}
556 }
557 
558 int
559 sched_runnable(void)
560 {
561 	struct kseq *kseq;
562 
563 	kseq = KSEQ_SELF();
564 
565 	if (kseq->ksq_load)
566 		return (1);
567 #ifdef SMP
568 	/*
569 	 * For SMP we may steal other processor's KSEs.  Just search until we
570 	 * verify that at least on other cpu has a runnable task.
571 	 */
572 	if (smp_started) {
573 		int i;
574 
575 		for (i = 0; i < mp_maxid; i++) {
576 			if (CPU_ABSENT(i))
577 				continue;
578 			kseq = KSEQ_CPU(i);
579 			if (kseq->ksq_load)
580 				return (1);
581 		}
582 	}
583 #endif
584 	return (0);
585 }
586 
587 void
588 sched_userret(struct thread *td)
589 {
590 	struct ksegrp *kg;
591 
592 	kg = td->td_ksegrp;
593 
594 	if (td->td_priority != kg->kg_user_pri) {
595 		mtx_lock_spin(&sched_lock);
596 		td->td_priority = kg->kg_user_pri;
597 		mtx_unlock_spin(&sched_lock);
598 	}
599 }
600 
601 struct kse *
602 kseq_choose(struct kseq *kseq)
603 {
604 	struct kse *ke;
605 	struct runq *swap;
606 
607 	if ((ke = runq_choose(kseq->ksq_curr)) == NULL) {
608 		swap = kseq->ksq_curr;
609 		kseq->ksq_curr = kseq->ksq_next;
610 		kseq->ksq_next = swap;
611 		ke = runq_choose(kseq->ksq_curr);
612 	}
613 
614 	return (ke);
615 }
616 
617 struct kse *
618 sched_choose(void)
619 {
620 	struct kseq *kseq;
621 	struct kse *ke;
622 
623 	kseq = KSEQ_SELF();
624 	ke = kseq_choose(kseq);
625 
626 	if (ke) {
627 		runq_remove(ke->ke_runq, ke);
628 		kseq->ksq_load--;
629 		ke->ke_state = KES_THREAD;
630 	}
631 
632 #ifdef SMP
633 	if (ke == NULL && smp_started) {
634 		int load;
635 		int cpu;
636 		int i;
637 
638 		load = 0;
639 		cpu = 0;
640 
641 		/*
642 		 * Find the cpu with the highest load and steal one proc.
643 		 */
644 		for (i = 0; i < mp_maxid; i++) {
645 			if (CPU_ABSENT(i))
646 				continue;
647 			kseq = KSEQ_CPU(i);
648 			if (kseq->ksq_load > load) {
649 				load = kseq->ksq_load;
650 				cpu = i;
651 			}
652 		}
653 		if (load) {
654 			kseq = KSEQ_CPU(cpu);
655 			ke = kseq_choose(kseq);
656 			kseq->ksq_load--;
657 			ke->ke_state = KES_THREAD;
658 			runq_remove(ke->ke_runq, ke);
659 			ke->ke_runq = NULL;
660 			ke->ke_cpu = PCPU_GET(cpuid);
661 		}
662 
663 	}
664 #endif
665 	return (ke);
666 }
667 
668 void
669 sched_add(struct kse *ke)
670 {
671 
672 	mtx_assert(&sched_lock, MA_OWNED);
673 	KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE"));
674 	KASSERT((ke->ke_thread->td_kse != NULL),
675 	    ("runq_add: No KSE on thread"));
676 	KASSERT(ke->ke_state != KES_ONRUNQ,
677 	    ("runq_add: kse %p (%s) already in run queue", ke,
678 	    ke->ke_proc->p_comm));
679 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
680 	    ("runq_add: process swapped out"));
681 
682 
683 	if (ke->ke_runq == NULL) {
684 		struct kseq *kseq;
685 
686 		kseq = KSEQ_CPU(ke->ke_cpu);
687 		if (SCHED_CURR(ke->ke_ksegrp))
688 			ke->ke_runq = kseq->ksq_curr;
689 		else
690 			ke->ke_runq = kseq->ksq_next;
691 	}
692 	ke->ke_ksegrp->kg_runq_kses++;
693 	ke->ke_state = KES_ONRUNQ;
694 
695 	runq_add(ke->ke_runq, ke);
696 	KSEQ_CPU(ke->ke_cpu)->ksq_load++;
697 }
698 
699 void
700 sched_rem(struct kse *ke)
701 {
702 	mtx_assert(&sched_lock, MA_OWNED);
703 	/* KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); */
704 
705 	runq_remove(ke->ke_runq, ke);
706 	ke->ke_runq = NULL;
707 	ke->ke_state = KES_THREAD;
708 	ke->ke_ksegrp->kg_runq_kses--;
709 	KSEQ_CPU(ke->ke_cpu)->ksq_load--;
710 }
711 
712 fixpt_t
713 sched_pctcpu(struct kse *ke)
714 {
715 	fixpt_t pctcpu;
716 	int realstathz;
717 
718 	pctcpu = 0;
719 	realstathz = stathz ? stathz : hz;
720 
721 	if (ke->ke_ticks) {
722 		int rtick;
723 
724 		/* Update to account for time potentially spent sleeping */
725 		ke->ke_ltick = ticks;
726 		sched_pctcpu_update(ke);
727 
728 		/* How many rtick per second ? */
729 		rtick = ke->ke_ticks / (SCHED_CPU_TIME * 10000);
730 		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
731 	}
732 
733 	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
734 
735 	return (pctcpu);
736 }
737 
738 int
739 sched_sizeof_kse(void)
740 {
741 	return (sizeof(struct kse) + sizeof(struct ke_sched));
742 }
743 
744 int
745 sched_sizeof_ksegrp(void)
746 {
747 	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
748 }
749 
750 int
751 sched_sizeof_proc(void)
752 {
753 	return (sizeof(struct proc));
754 }
755 
756 int
757 sched_sizeof_thread(void)
758 {
759 	return (sizeof(struct thread) + sizeof(struct td_sched));
760 }
761