xref: /freebsd/sys/kern/sched_4bsd.c (revision 747ca5f52192617ade3a33956f61380c684b74b8)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/lock.h>
47 #include <sys/kthread.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/resourcevar.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 #include <sys/sysctl.h>
54 #include <sys/sx.h>
55 
56 /*
57  * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
58  * the range 100-256 Hz (approximately).
59  */
60 #define	ESTCPULIM(e) \
61     min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
62     RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
63 #ifdef SMP
64 #define	INVERSE_ESTCPU_WEIGHT	(8 * smp_cpus)
65 #else
66 #define	INVERSE_ESTCPU_WEIGHT	8	/* 1 / (priorities per estcpu level). */
67 #endif
68 #define	NICE_WEIGHT		1	/* Priorities per nice level. */
69 
70 struct ke_sched {
71 	int	ske_cpticks;	/* (j) Ticks of cpu time. */
72 };
73 
74 static struct ke_sched ke_sched;
75 
76 struct ke_sched *kse0_sched = &ke_sched;
77 struct kg_sched *ksegrp0_sched = NULL;
78 struct p_sched *proc0_sched = NULL;
79 struct td_sched *thread0_sched = NULL;
80 
81 static int	sched_quantum;	/* Roundrobin scheduling quantum in ticks. */
82 #define	SCHED_QUANTUM	(hz / 10)	/* Default sched quantum */
83 
84 static struct callout roundrobin_callout;
85 
86 static void	roundrobin(void *arg);
87 static void	schedcpu(void);
88 static void	schedcpu_thread(void *dummy);
89 static void	sched_setup(void *dummy);
90 static void	maybe_resched(struct thread *td);
91 static void	updatepri(struct ksegrp *kg);
92 static void	resetpriority(struct ksegrp *kg);
93 
94 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
95 
96 /*
97  * Global run queue.
98  */
99 static struct runq runq;
100 SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq)
101 
102 static int
103 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
104 {
105 	int error, new_val;
106 
107 	new_val = sched_quantum * tick;
108 	error = sysctl_handle_int(oidp, &new_val, 0, req);
109         if (error != 0 || req->newptr == NULL)
110 		return (error);
111 	if (new_val < tick)
112 		return (EINVAL);
113 	sched_quantum = new_val / tick;
114 	hogticks = 2 * sched_quantum;
115 	return (0);
116 }
117 
118 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
119 	0, sizeof sched_quantum, sysctl_kern_quantum, "I",
120 	"Roundrobin scheduling quantum in microseconds");
121 
122 /*
123  * Arrange to reschedule if necessary, taking the priorities and
124  * schedulers into account.
125  */
126 static void
127 maybe_resched(struct thread *td)
128 {
129 
130 	mtx_assert(&sched_lock, MA_OWNED);
131 	if (td->td_priority < curthread->td_priority && curthread->td_kse)
132 		curthread->td_flags |= TDF_NEEDRESCHED;
133 }
134 
135 /*
136  * Force switch among equal priority processes every 100ms.
137  * We don't actually need to force a context switch of the current process.
138  * The act of firing the event triggers a context switch to softclock() and
139  * then switching back out again which is equivalent to a preemption, thus
140  * no further work is needed on the local CPU.
141  */
142 /* ARGSUSED */
143 static void
144 roundrobin(void *arg)
145 {
146 
147 #ifdef SMP
148 	mtx_lock_spin(&sched_lock);
149 	forward_roundrobin();
150 	mtx_unlock_spin(&sched_lock);
151 #endif
152 
153 	callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
154 }
155 
156 /*
157  * Constants for digital decay and forget:
158  *	90% of (kg_estcpu) usage in 5 * loadav time
159  *	95% of (ke_pctcpu) usage in 60 seconds (load insensitive)
160  *          Note that, as ps(1) mentions, this can let percentages
161  *          total over 100% (I've seen 137.9% for 3 processes).
162  *
163  * Note that schedclock() updates kg_estcpu and p_cpticks asynchronously.
164  *
165  * We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds.
166  * That is, the system wants to compute a value of decay such
167  * that the following for loop:
168  * 	for (i = 0; i < (5 * loadavg); i++)
169  * 		kg_estcpu *= decay;
170  * will compute
171  * 	kg_estcpu *= 0.1;
172  * for all values of loadavg:
173  *
174  * Mathematically this loop can be expressed by saying:
175  * 	decay ** (5 * loadavg) ~= .1
176  *
177  * The system computes decay as:
178  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
179  *
180  * We wish to prove that the system's computation of decay
181  * will always fulfill the equation:
182  * 	decay ** (5 * loadavg) ~= .1
183  *
184  * If we compute b as:
185  * 	b = 2 * loadavg
186  * then
187  * 	decay = b / (b + 1)
188  *
189  * We now need to prove two things:
190  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
191  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
192  *
193  * Facts:
194  *         For x close to zero, exp(x) =~ 1 + x, since
195  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
196  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
197  *         For x close to zero, ln(1+x) =~ x, since
198  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
199  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
200  *         ln(.1) =~ -2.30
201  *
202  * Proof of (1):
203  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
204  *	solving for factor,
205  *      ln(factor) =~ (-2.30/5*loadav), or
206  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
207  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
208  *
209  * Proof of (2):
210  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
211  *	solving for power,
212  *      power*ln(b/(b+1)) =~ -2.30, or
213  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
214  *
215  * Actual power values for the implemented algorithm are as follows:
216  *      loadav: 1       2       3       4
217  *      power:  5.68    10.32   14.94   19.55
218  */
219 
220 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
221 #define	loadfactor(loadav)	(2 * (loadav))
222 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
223 
224 /* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
225 static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
226 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
227 
228 /*
229  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
230  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
231  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
232  *
233  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
234  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
235  *
236  * If you don't want to bother with the faster/more-accurate formula, you
237  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
238  * (more general) method of calculating the %age of CPU used by a process.
239  */
240 #define	CCPU_SHIFT	11
241 
242 /*
243  * Recompute process priorities, every hz ticks.
244  * MP-safe, called without the Giant mutex.
245  */
246 /* ARGSUSED */
247 static void
248 schedcpu(void)
249 {
250 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
251 	struct thread *td;
252 	struct proc *p;
253 	struct kse *ke;
254 	struct ksegrp *kg;
255 	int awake, realstathz;
256 
257 	realstathz = stathz ? stathz : hz;
258 	sx_slock(&allproc_lock);
259 	FOREACH_PROC_IN_SYSTEM(p) {
260 		/*
261 		 * Prevent state changes and protect run queue.
262 		 */
263 		mtx_lock_spin(&sched_lock);
264 		/*
265 		 * Increment time in/out of memory.  We ignore overflow; with
266 		 * 16-bit int's (remember them?) overflow takes 45 days.
267 		 */
268 		p->p_swtime++;
269 		FOREACH_KSEGRP_IN_PROC(p, kg) {
270 			awake = 0;
271 			FOREACH_KSE_IN_GROUP(kg, ke) {
272 				/*
273 				 * Increment sleep time (if sleeping).  We
274 				 * ignore overflow, as above.
275 				 */
276 				/*
277 				 * The kse slptimes are not touched in wakeup
278 				 * because the thread may not HAVE a KSE.
279 				 */
280 				if (ke->ke_state == KES_ONRUNQ) {
281 					awake = 1;
282 					ke->ke_flags &= ~KEF_DIDRUN;
283 				} else if ((ke->ke_state == KES_THREAD) &&
284 				    (TD_IS_RUNNING(ke->ke_thread))) {
285 					awake = 1;
286 					/* Do not clear KEF_DIDRUN */
287 				} else if (ke->ke_flags & KEF_DIDRUN) {
288 					awake = 1;
289 					ke->ke_flags &= ~KEF_DIDRUN;
290 				}
291 
292 				/*
293 				 * ke_pctcpu is only for ps and ttyinfo().
294 				 * Do it per kse, and add them up at the end?
295 				 * XXXKSE
296 				 */
297 				ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >>
298 				    FSHIFT;
299 				/*
300 				 * If the kse has been idle the entire second,
301 				 * stop recalculating its priority until
302 				 * it wakes up.
303 				 */
304 				if (ke->ke_sched->ske_cpticks == 0)
305 					continue;
306 #if	(FSHIFT >= CCPU_SHIFT)
307 				ke->ke_pctcpu += (realstathz == 100)
308 				    ? ((fixpt_t) ke->ke_sched->ske_cpticks) <<
309 				    (FSHIFT - CCPU_SHIFT) :
310 				    100 * (((fixpt_t) ke->ke_sched->ske_cpticks)
311 				    << (FSHIFT - CCPU_SHIFT)) / realstathz;
312 #else
313 				ke->ke_pctcpu += ((FSCALE - ccpu) *
314 				    (ke->ke_sched->ske_cpticks *
315 				    FSCALE / realstathz)) >> FSHIFT;
316 #endif
317 				ke->ke_sched->ske_cpticks = 0;
318 			} /* end of kse loop */
319 			/*
320 			 * If there are ANY running threads in this KSEGRP,
321 			 * then don't count it as sleeping.
322 			 */
323 			if (awake) {
324 				if (kg->kg_slptime > 1) {
325 					/*
326 					 * In an ideal world, this should not
327 					 * happen, because whoever woke us
328 					 * up from the long sleep should have
329 					 * unwound the slptime and reset our
330 					 * priority before we run at the stale
331 					 * priority.  Should KASSERT at some
332 					 * point when all the cases are fixed.
333 					 */
334 					updatepri(kg);
335 				}
336 				kg->kg_slptime = 0;
337 			} else
338 				kg->kg_slptime++;
339 			if (kg->kg_slptime > 1)
340 				continue;
341 			kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
342 		      	resetpriority(kg);
343 			FOREACH_THREAD_IN_GROUP(kg, td) {
344 				if (td->td_priority >= PUSER) {
345 					sched_prio(td, kg->kg_user_pri);
346 				}
347 			}
348 		} /* end of ksegrp loop */
349 		mtx_unlock_spin(&sched_lock);
350 	} /* end of process loop */
351 	sx_sunlock(&allproc_lock);
352 }
353 
354 /*
355  * Main loop for a kthread that executes schedcpu once a second.
356  */
357 static void
358 schedcpu_thread(void *dummy)
359 {
360 	int nowake;
361 
362 	for (;;) {
363 		schedcpu();
364 		tsleep(&nowake, curthread->td_priority, "-", hz);
365 	}
366 }
367 
368 /*
369  * Recalculate the priority of a process after it has slept for a while.
370  * For all load averages >= 1 and max kg_estcpu of 255, sleeping for at
371  * least six times the loadfactor will decay kg_estcpu to zero.
372  */
373 static void
374 updatepri(struct ksegrp *kg)
375 {
376 	register fixpt_t loadfac;
377 	register unsigned int newcpu;
378 
379 	loadfac = loadfactor(averunnable.ldavg[0]);
380 	if (kg->kg_slptime > 5 * loadfac)
381 		kg->kg_estcpu = 0;
382 	else {
383 		newcpu = kg->kg_estcpu;
384 		kg->kg_slptime--;	/* was incremented in schedcpu() */
385 		while (newcpu && --kg->kg_slptime)
386 			newcpu = decay_cpu(loadfac, newcpu);
387 		kg->kg_estcpu = newcpu;
388 	}
389 	resetpriority(kg);
390 }
391 
392 /*
393  * Compute the priority of a process when running in user mode.
394  * Arrange to reschedule if the resulting priority is better
395  * than that of the current process.
396  */
397 static void
398 resetpriority(struct ksegrp *kg)
399 {
400 	register unsigned int newpriority;
401 	struct thread *td;
402 
403 	if (kg->kg_pri_class == PRI_TIMESHARE) {
404 		newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
405 		    NICE_WEIGHT * (kg->kg_nice - PRIO_MIN);
406 		newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
407 		    PRI_MAX_TIMESHARE);
408 		kg->kg_user_pri = newpriority;
409 	}
410 	FOREACH_THREAD_IN_GROUP(kg, td) {
411 		maybe_resched(td);			/* XXXKSE silly */
412 	}
413 }
414 
415 /* ARGSUSED */
416 static void
417 sched_setup(void *dummy)
418 {
419 
420 	if (sched_quantum == 0)
421 		sched_quantum = SCHED_QUANTUM;
422 	hogticks = 2 * sched_quantum;
423 
424 	callout_init(&roundrobin_callout, 0);
425 
426 	/* Kick off timeout driven events by calling first time. */
427 	roundrobin(NULL);
428 
429 	/* Kick off schedcpu kernel process. */
430 	kthread_create(schedcpu_thread, NULL, NULL, 0, 0, "schedcpu");
431 }
432 
433 /* External interfaces start here */
434 int
435 sched_runnable(void)
436 {
437         return runq_check(&runq);
438 }
439 
440 int
441 sched_rr_interval(void)
442 {
443 	if (sched_quantum == 0)
444 		sched_quantum = SCHED_QUANTUM;
445 	return (sched_quantum);
446 }
447 
448 /*
449  * We adjust the priority of the current process.  The priority of
450  * a process gets worse as it accumulates CPU time.  The cpu usage
451  * estimator (kg_estcpu) is increased here.  resetpriority() will
452  * compute a different priority each time kg_estcpu increases by
453  * INVERSE_ESTCPU_WEIGHT
454  * (until MAXPRI is reached).  The cpu usage estimator ramps up
455  * quite quickly when the process is running (linearly), and decays
456  * away exponentially, at a rate which is proportionally slower when
457  * the system is busy.  The basic principle is that the system will
458  * 90% forget that the process used a lot of CPU time in 5 * loadav
459  * seconds.  This causes the system to favor processes which haven't
460  * run much recently, and to round-robin among other processes.
461  */
462 void
463 sched_clock(struct thread *td)
464 {
465 	struct ksegrp *kg;
466 	struct kse *ke;
467 
468 	mtx_assert(&sched_lock, MA_OWNED);
469 	kg = td->td_ksegrp;
470 	ke = td->td_kse;
471 
472 	ke->ke_sched->ske_cpticks++;
473 	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
474 	if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
475 		resetpriority(kg);
476 		if (td->td_priority >= PUSER)
477 			td->td_priority = kg->kg_user_pri;
478 	}
479 }
480 
481 /*
482  * charge childs scheduling cpu usage to parent.
483  *
484  * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp.
485  * Charge it to the ksegrp that did the wait since process estcpu is sum of
486  * all ksegrps, this is strictly as expected.  Assume that the child process
487  * aggregated all the estcpu into the 'built-in' ksegrp.
488  */
489 void
490 sched_exit(struct proc *p, struct proc *p1)
491 {
492 	sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
493 	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
494 	sched_exit_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
495 }
496 
497 void
498 sched_exit_kse(struct kse *ke, struct kse *child)
499 {
500 }
501 
502 void
503 sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
504 {
505 
506 	mtx_assert(&sched_lock, MA_OWNED);
507 	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + child->kg_estcpu);
508 }
509 
510 void
511 sched_exit_thread(struct thread *td, struct thread *child)
512 {
513 }
514 
515 void
516 sched_fork(struct proc *p, struct proc *p1)
517 {
518 	sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
519 	sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
520 	sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
521 }
522 
523 void
524 sched_fork_kse(struct kse *ke, struct kse *child)
525 {
526 	child->ke_sched->ske_cpticks = 0;
527 }
528 
529 void
530 sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
531 {
532 	mtx_assert(&sched_lock, MA_OWNED);
533 	child->kg_estcpu = kg->kg_estcpu;
534 }
535 
536 void
537 sched_fork_thread(struct thread *td, struct thread *child)
538 {
539 }
540 
541 void
542 sched_nice(struct ksegrp *kg, int nice)
543 {
544 
545 	PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
546 	mtx_assert(&sched_lock, MA_OWNED);
547 	kg->kg_nice = nice;
548 	resetpriority(kg);
549 }
550 
551 void
552 sched_class(struct ksegrp *kg, int class)
553 {
554 	mtx_assert(&sched_lock, MA_OWNED);
555 	kg->kg_pri_class = class;
556 }
557 
558 /*
559  * Adjust the priority of a thread.
560  * This may include moving the thread within the KSEGRP,
561  * changing the assignment of a kse to the thread,
562  * and moving a KSE in the system run queue.
563  */
564 void
565 sched_prio(struct thread *td, u_char prio)
566 {
567 
568 	mtx_assert(&sched_lock, MA_OWNED);
569 	if (TD_ON_RUNQ(td)) {
570 		adjustrunqueue(td, prio);
571 	} else {
572 		td->td_priority = prio;
573 	}
574 }
575 
576 void
577 sched_sleep(struct thread *td, u_char prio)
578 {
579 
580 	mtx_assert(&sched_lock, MA_OWNED);
581 	td->td_ksegrp->kg_slptime = 0;
582 	td->td_priority = prio;
583 }
584 
585 void
586 sched_switch(struct thread *td)
587 {
588 	struct thread *newtd;
589 	struct kse *ke;
590 	struct proc *p;
591 
592 	ke = td->td_kse;
593 	p = td->td_proc;
594 
595 	mtx_assert(&sched_lock, MA_OWNED);
596 	KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?"));
597 
598 	td->td_lastcpu = td->td_oncpu;
599 	td->td_last_kse = ke;
600 	td->td_oncpu = NOCPU;
601 	td->td_flags &= ~TDF_NEEDRESCHED;
602 	/*
603 	 * At the last moment, if this thread is still marked RUNNING,
604 	 * then put it back on the run queue as it has not been suspended
605 	 * or stopped or any thing else similar.
606 	 */
607 	if (TD_IS_RUNNING(td)) {
608 		/* Put us back on the run queue (kse and all). */
609 		setrunqueue(td);
610 	} else if (p->p_flag & P_SA) {
611 		/*
612 		 * We will not be on the run queue. So we must be
613 		 * sleeping or similar. As it's available,
614 		 * someone else can use the KSE if they need it.
615 		 */
616 		kse_reassign(ke);
617 	}
618 	newtd = choosethread();
619 	if (td != newtd)
620 		cpu_switch(td, newtd);
621 	sched_lock.mtx_lock = (uintptr_t)td;
622 	td->td_oncpu = PCPU_GET(cpuid);
623 }
624 
625 void
626 sched_wakeup(struct thread *td)
627 {
628 	struct ksegrp *kg;
629 
630 	mtx_assert(&sched_lock, MA_OWNED);
631 	kg = td->td_ksegrp;
632 	if (kg->kg_slptime > 1)
633 		updatepri(kg);
634 	kg->kg_slptime = 0;
635 	setrunqueue(td);
636 	maybe_resched(td);
637 }
638 
639 void
640 sched_add(struct thread *td)
641 {
642 	struct kse *ke;
643 
644 	ke = td->td_kse;
645 	mtx_assert(&sched_lock, MA_OWNED);
646 	KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE"));
647 	KASSERT((ke->ke_thread->td_kse != NULL),
648 	    ("runq_add: No KSE on thread"));
649 	KASSERT(ke->ke_state != KES_ONRUNQ,
650 	    ("runq_add: kse %p (%s) already in run queue", ke,
651 	    ke->ke_proc->p_comm));
652 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
653 	    ("runq_add: process swapped out"));
654 	ke->ke_ksegrp->kg_runq_kses++;
655 	ke->ke_state = KES_ONRUNQ;
656 
657 	runq_add(&runq, ke);
658 }
659 
660 void
661 sched_rem(struct thread *td)
662 {
663 	struct kse *ke;
664 
665 	ke = td->td_kse;
666 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
667 	    ("runq_remove: process swapped out"));
668 	KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
669 	mtx_assert(&sched_lock, MA_OWNED);
670 
671 	runq_remove(&runq, ke);
672 	ke->ke_state = KES_THREAD;
673 	ke->ke_ksegrp->kg_runq_kses--;
674 }
675 
676 struct kse *
677 sched_choose(void)
678 {
679 	struct kse *ke;
680 
681 	ke = runq_choose(&runq);
682 
683 	if (ke != NULL) {
684 		runq_remove(&runq, ke);
685 		ke->ke_state = KES_THREAD;
686 
687 		KASSERT((ke->ke_thread != NULL),
688 		    ("runq_choose: No thread on KSE"));
689 		KASSERT((ke->ke_thread->td_kse != NULL),
690 		    ("runq_choose: No KSE on thread"));
691 		KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
692 		    ("runq_choose: process swapped out"));
693 	}
694 	return (ke);
695 }
696 
697 void
698 sched_userret(struct thread *td)
699 {
700 	struct ksegrp *kg;
701 	/*
702 	 * XXX we cheat slightly on the locking here to avoid locking in
703 	 * the usual case.  Setting td_priority here is essentially an
704 	 * incomplete workaround for not setting it properly elsewhere.
705 	 * Now that some interrupt handlers are threads, not setting it
706 	 * properly elsewhere can clobber it in the window between setting
707 	 * it here and returning to user mode, so don't waste time setting
708 	 * it perfectly here.
709 	 */
710 	kg = td->td_ksegrp;
711 	if (td->td_priority != kg->kg_user_pri) {
712 		mtx_lock_spin(&sched_lock);
713 		td->td_priority = kg->kg_user_pri;
714 		mtx_unlock_spin(&sched_lock);
715 	}
716 }
717 
718 int
719 sched_sizeof_kse(void)
720 {
721 	return (sizeof(struct kse) + sizeof(struct ke_sched));
722 }
723 int
724 sched_sizeof_ksegrp(void)
725 {
726 	return (sizeof(struct ksegrp));
727 }
728 int
729 sched_sizeof_proc(void)
730 {
731 	return (sizeof(struct proc));
732 }
733 int
734 sched_sizeof_thread(void)
735 {
736 	return (sizeof(struct thread));
737 }
738 
739 fixpt_t
740 sched_pctcpu(struct thread *td)
741 {
742 	struct kse *ke;
743 
744 	ke = td->td_kse;
745 	if (ke == NULL)
746 		ke = td->td_last_kse;
747 	if (ke)
748 		return (ke->ke_pctcpu);
749 
750 	return (0);
751 }
752