xref: /freebsd/sys/kern/sched_4bsd.c (revision d086ded32300bc0f33fb1574d0bcfccfbc60881d)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/ktr.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/resourcevar.h>
50 #include <sys/sched.h>
51 #include <sys/smp.h>
52 #include <sys/sysctl.h>
53 #include <sys/sx.h>
54 
55 /*
56  * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
57  * the range 100-256 Hz (approximately).
58  */
59 #define	ESTCPULIM(e) \
60     min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
61     RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
62 #define	INVERSE_ESTCPU_WEIGHT	8	/* 1 / (priorities per estcpu level). */
63 #define	NICE_WEIGHT		1	/* Priorities per nice level. */
64 
65 struct ke_sched {
66 	int	ske_cpticks;	/* (j) Ticks of cpu time. */
67 };
68 
69 static struct ke_sched ke_sched;
70 
71 struct ke_sched *kse0_sched = &ke_sched;
72 struct kg_sched *ksegrp0_sched = NULL;
73 struct p_sched *proc0_sched = NULL;
74 struct td_sched *thread0_sched = NULL;
75 
76 static int	sched_quantum;	/* Roundrobin scheduling quantum in ticks. */
77 #define	SCHED_QUANTUM	(hz / 10)	/* Default sched quantum */
78 
79 static struct callout schedcpu_callout;
80 static struct callout roundrobin_callout;
81 
82 static void	roundrobin(void *arg);
83 static void	schedcpu(void *arg);
84 static void	sched_setup(void *dummy);
85 static void	maybe_resched(struct thread *td);
86 static void	updatepri(struct ksegrp *kg);
87 static void	resetpriority(struct ksegrp *kg);
88 
89 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
90 
91 /*
92  * Global run queue.
93  */
94 static struct runq runq;
95 SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq)
96 
97 static int
98 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
99 {
100 	int error, new_val;
101 
102 	new_val = sched_quantum * tick;
103 	error = sysctl_handle_int(oidp, &new_val, 0, req);
104         if (error != 0 || req->newptr == NULL)
105 		return (error);
106 	if (new_val < tick)
107 		return (EINVAL);
108 	sched_quantum = new_val / tick;
109 	hogticks = 2 * sched_quantum;
110 	return (0);
111 }
112 
113 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
114 	0, sizeof sched_quantum, sysctl_kern_quantum, "I",
115 	"Roundrobin scheduling quantum in microseconds");
116 
117 /*
118  * Arrange to reschedule if necessary, taking the priorities and
119  * schedulers into account.
120  */
121 static void
122 maybe_resched(struct thread *td)
123 {
124 
125 	mtx_assert(&sched_lock, MA_OWNED);
126 	if (td->td_priority < curthread->td_priority && curthread->td_kse)
127 		curthread->td_flags |= TDF_NEEDRESCHED;
128 }
129 
130 /*
131  * Force switch among equal priority processes every 100ms.
132  * We don't actually need to force a context switch of the current process.
133  * The act of firing the event triggers a context switch to softclock() and
134  * then switching back out again which is equivalent to a preemption, thus
135  * no further work is needed on the local CPU.
136  */
137 /* ARGSUSED */
138 static void
139 roundrobin(void *arg)
140 {
141 
142 #ifdef SMP
143 	mtx_lock_spin(&sched_lock);
144 	forward_roundrobin();
145 	mtx_unlock_spin(&sched_lock);
146 #endif
147 
148 	callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
149 }
150 
151 /*
152  * Constants for digital decay and forget:
153  *	90% of (p_estcpu) usage in 5 * loadav time
154  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
155  *          Note that, as ps(1) mentions, this can let percentages
156  *          total over 100% (I've seen 137.9% for 3 processes).
157  *
158  * Note that schedclock() updates p_estcpu and p_cpticks asynchronously.
159  *
160  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
161  * That is, the system wants to compute a value of decay such
162  * that the following for loop:
163  * 	for (i = 0; i < (5 * loadavg); i++)
164  * 		p_estcpu *= decay;
165  * will compute
166  * 	p_estcpu *= 0.1;
167  * for all values of loadavg:
168  *
169  * Mathematically this loop can be expressed by saying:
170  * 	decay ** (5 * loadavg) ~= .1
171  *
172  * The system computes decay as:
173  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
174  *
175  * We wish to prove that the system's computation of decay
176  * will always fulfill the equation:
177  * 	decay ** (5 * loadavg) ~= .1
178  *
179  * If we compute b as:
180  * 	b = 2 * loadavg
181  * then
182  * 	decay = b / (b + 1)
183  *
184  * We now need to prove two things:
185  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
186  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
187  *
188  * Facts:
189  *         For x close to zero, exp(x) =~ 1 + x, since
190  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
191  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
192  *         For x close to zero, ln(1+x) =~ x, since
193  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
194  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
195  *         ln(.1) =~ -2.30
196  *
197  * Proof of (1):
198  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
199  *	solving for factor,
200  *      ln(factor) =~ (-2.30/5*loadav), or
201  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
202  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
203  *
204  * Proof of (2):
205  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
206  *	solving for power,
207  *      power*ln(b/(b+1)) =~ -2.30, or
208  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
209  *
210  * Actual power values for the implemented algorithm are as follows:
211  *      loadav: 1       2       3       4
212  *      power:  5.68    10.32   14.94   19.55
213  */
214 
215 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
216 #define	loadfactor(loadav)	(2 * (loadav))
217 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
218 
219 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
220 static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
221 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
222 
223 /*
224  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
225  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
226  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
227  *
228  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
229  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
230  *
231  * If you don't want to bother with the faster/more-accurate formula, you
232  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
233  * (more general) method of calculating the %age of CPU used by a process.
234  */
235 #define	CCPU_SHIFT	11
236 
237 /*
238  * Recompute process priorities, every hz ticks.
239  * MP-safe, called without the Giant mutex.
240  */
241 /* ARGSUSED */
242 static void
243 schedcpu(void *arg)
244 {
245 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
246 	struct thread *td;
247 	struct proc *p;
248 	struct kse *ke;
249 	struct ksegrp *kg;
250 	int realstathz;
251 	int awake;
252 
253 	realstathz = stathz ? stathz : hz;
254 	sx_slock(&allproc_lock);
255 	FOREACH_PROC_IN_SYSTEM(p) {
256 		mtx_lock_spin(&sched_lock);
257 		p->p_swtime++;
258 		FOREACH_KSEGRP_IN_PROC(p, kg) {
259 			awake = 0;
260 			FOREACH_KSE_IN_GROUP(kg, ke) {
261 				/*
262 				 * Increment time in/out of memory and sleep
263 				 * time (if sleeping).  We ignore overflow;
264 				 * with 16-bit int's (remember them?)
265 				 * overflow takes 45 days.
266 				 */
267 				/*
268 				 * The kse slptimes are not touched in wakeup
269 				 * because the thread may not HAVE a KSE.
270 				 */
271 				if (ke->ke_state == KES_ONRUNQ) {
272 					awake = 1;
273 					ke->ke_flags &= ~KEF_DIDRUN;
274 				} else if ((ke->ke_state == KES_THREAD) &&
275 				    (TD_IS_RUNNING(ke->ke_thread))) {
276 					awake = 1;
277 					/* Do not clear KEF_DIDRUN */
278 				} else if (ke->ke_flags & KEF_DIDRUN) {
279 					awake = 1;
280 					ke->ke_flags &= ~KEF_DIDRUN;
281 				}
282 
283 				/*
284 				 * pctcpu is only for ps?
285 				 * Do it per kse.. and add them up at the end?
286 				 * XXXKSE
287 				 */
288 				ke->ke_pctcpu
289 				    = (ke->ke_pctcpu * ccpu) >>
290 				    FSHIFT;
291 				/*
292 				 * If the kse has been idle the entire second,
293 				 * stop recalculating its priority until
294 				 * it wakes up.
295 				 */
296 				if (ke->ke_sched->ske_cpticks == 0)
297 					continue;
298 #if	(FSHIFT >= CCPU_SHIFT)
299 				ke->ke_pctcpu += (realstathz == 100)
300 				    ? ((fixpt_t) ke->ke_sched->ske_cpticks) <<
301 				    (FSHIFT - CCPU_SHIFT) :
302 				    100 * (((fixpt_t) ke->ke_sched->ske_cpticks)
303 				    << (FSHIFT - CCPU_SHIFT)) / realstathz;
304 #else
305 				ke->ke_pctcpu += ((FSCALE - ccpu) *
306 				    (ke->ke_sched->ske_cpticks *
307 				    FSCALE / realstathz)) >> FSHIFT;
308 #endif
309 				ke->ke_sched->ske_cpticks = 0;
310 			} /* end of kse loop */
311 			/*
312 			 * If there are ANY running threads in this KSEGRP,
313 			 * then don't count it as sleeping.
314 			 */
315 			if (awake) {
316 				if (kg->kg_slptime > 1) {
317 					/*
318 					 * In an ideal world, this should not
319 					 * happen, because whoever woke us
320 					 * up from the long sleep should have
321 					 * unwound the slptime and reset our
322 					 * priority before we run at the stale
323 					 * priority.  Should KASSERT at some
324 					 * point when all the cases are fixed.
325 					 */
326 					updatepri(kg);
327 				}
328 				kg->kg_slptime = 0;
329 			} else {
330 				kg->kg_slptime++;
331 			}
332 			if (kg->kg_slptime > 1)
333 				continue;
334 			kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
335 		      	resetpriority(kg);
336 			FOREACH_THREAD_IN_GROUP(kg, td) {
337 				if (td->td_priority >= PUSER) {
338 					sched_prio(td, kg->kg_user_pri);
339 				}
340 			}
341 		} /* end of ksegrp loop */
342 		mtx_unlock_spin(&sched_lock);
343 	} /* end of process loop */
344 	sx_sunlock(&allproc_lock);
345 	callout_reset(&schedcpu_callout, hz, schedcpu, NULL);
346 }
347 
348 /*
349  * Recalculate the priority of a process after it has slept for a while.
350  * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
351  * least six times the loadfactor will decay p_estcpu to zero.
352  */
353 static void
354 updatepri(struct ksegrp *kg)
355 {
356 	register unsigned int newcpu;
357 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
358 
359 	newcpu = kg->kg_estcpu;
360 	if (kg->kg_slptime > 5 * loadfac)
361 		kg->kg_estcpu = 0;
362 	else {
363 		kg->kg_slptime--;	/* the first time was done in schedcpu */
364 		while (newcpu && --kg->kg_slptime)
365 			newcpu = decay_cpu(loadfac, newcpu);
366 		kg->kg_estcpu = newcpu;
367 	}
368 	resetpriority(kg);
369 }
370 
371 /*
372  * Compute the priority of a process when running in user mode.
373  * Arrange to reschedule if the resulting priority is better
374  * than that of the current process.
375  */
376 static void
377 resetpriority(struct ksegrp *kg)
378 {
379 	register unsigned int newpriority;
380 	struct thread *td;
381 
382 	if (kg->kg_pri_class == PRI_TIMESHARE) {
383 		newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
384 		    NICE_WEIGHT * (kg->kg_nice - PRIO_MIN);
385 		newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
386 		    PRI_MAX_TIMESHARE);
387 		kg->kg_user_pri = newpriority;
388 	}
389 	FOREACH_THREAD_IN_GROUP(kg, td) {
390 		maybe_resched(td);			/* XXXKSE silly */
391 	}
392 }
393 
394 /* ARGSUSED */
395 static void
396 sched_setup(void *dummy)
397 {
398 	if (sched_quantum == 0)
399 		sched_quantum = SCHED_QUANTUM;
400 	hogticks = 2 * sched_quantum;
401 
402 	callout_init(&schedcpu_callout, 1);
403 	callout_init(&roundrobin_callout, 0);
404 
405 	/* Kick off timeout driven events by calling first time. */
406 	roundrobin(NULL);
407 	schedcpu(NULL);
408 }
409 
410 /* External interfaces start here */
411 int
412 sched_runnable(void)
413 {
414         return runq_check(&runq);
415 }
416 
417 int
418 sched_rr_interval(void)
419 {
420 	if (sched_quantum == 0)
421 		sched_quantum = SCHED_QUANTUM;
422 	return (sched_quantum);
423 }
424 
425 /*
426  * We adjust the priority of the current process.  The priority of
427  * a process gets worse as it accumulates CPU time.  The cpu usage
428  * estimator (p_estcpu) is increased here.  resetpriority() will
429  * compute a different priority each time p_estcpu increases by
430  * INVERSE_ESTCPU_WEIGHT
431  * (until MAXPRI is reached).  The cpu usage estimator ramps up
432  * quite quickly when the process is running (linearly), and decays
433  * away exponentially, at a rate which is proportionally slower when
434  * the system is busy.  The basic principle is that the system will
435  * 90% forget that the process used a lot of CPU time in 5 * loadav
436  * seconds.  This causes the system to favor processes which haven't
437  * run much recently, and to round-robin among other processes.
438  */
439 void
440 sched_clock(struct kse *ke)
441 {
442 	struct ksegrp *kg;
443 	struct thread *td;
444 
445 	mtx_assert(&sched_lock, MA_OWNED);
446 	kg = ke->ke_ksegrp;
447 	td = ke->ke_thread;
448 
449 	ke->ke_sched->ske_cpticks++;
450 	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
451 	if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
452 		resetpriority(kg);
453 		if (td->td_priority >= PUSER)
454 			td->td_priority = kg->kg_user_pri;
455 	}
456 }
457 /*
458  * charge childs scheduling cpu usage to parent.
459  *
460  * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp.
461  * Charge it to the ksegrp that did the wait since process estcpu is sum of
462  * all ksegrps, this is strictly as expected.  Assume that the child process
463  * aggregated all the estcpu into the 'built-in' ksegrp.
464  */
465 void
466 sched_exit(struct proc *p, struct proc *p1)
467 {
468 	sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
469 	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
470 	sched_exit_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
471 }
472 
473 void
474 sched_exit_kse(struct kse *ke, struct kse *child)
475 {
476 }
477 
478 void
479 sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
480 {
481 
482 	mtx_assert(&sched_lock, MA_OWNED);
483 	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + child->kg_estcpu);
484 }
485 
486 void
487 sched_exit_thread(struct thread *td, struct thread *child)
488 {
489 }
490 
491 void
492 sched_fork(struct proc *p, struct proc *p1)
493 {
494 	sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
495 	sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
496 	sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
497 }
498 
499 void
500 sched_fork_kse(struct kse *ke, struct kse *child)
501 {
502 	child->ke_sched->ske_cpticks = 0;
503 }
504 
505 void
506 sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
507 {
508 	mtx_assert(&sched_lock, MA_OWNED);
509 	child->kg_estcpu = kg->kg_estcpu;
510 }
511 
512 void
513 sched_fork_thread(struct thread *td, struct thread *child)
514 {
515 }
516 
517 void
518 sched_nice(struct ksegrp *kg, int nice)
519 {
520 
521 	PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
522 	mtx_assert(&sched_lock, MA_OWNED);
523 	kg->kg_nice = nice;
524 	resetpriority(kg);
525 }
526 
527 void
528 sched_class(struct ksegrp *kg, int class)
529 {
530 	mtx_assert(&sched_lock, MA_OWNED);
531 	kg->kg_pri_class = class;
532 }
533 
534 /*
535  * Adjust the priority of a thread.
536  * This may include moving the thread within the KSEGRP,
537  * changing the assignment of a kse to the thread,
538  * and moving a KSE in the system run queue.
539  */
540 void
541 sched_prio(struct thread *td, u_char prio)
542 {
543 
544 	mtx_assert(&sched_lock, MA_OWNED);
545 	if (TD_ON_RUNQ(td)) {
546 		adjustrunqueue(td, prio);
547 	} else {
548 		td->td_priority = prio;
549 	}
550 }
551 
552 void
553 sched_sleep(struct thread *td, u_char prio)
554 {
555 
556 	mtx_assert(&sched_lock, MA_OWNED);
557 	td->td_ksegrp->kg_slptime = 0;
558 	td->td_priority = prio;
559 }
560 
561 void
562 sched_switchin(struct thread *td)
563 {
564 
565 	mtx_assert(&sched_lock, MA_OWNED);
566 	td->td_oncpu = PCPU_GET(cpuid);
567 }
568 
569 void
570 sched_switchout(struct thread *td)
571 {
572 	struct kse *ke;
573 	struct proc *p;
574 
575 	ke = td->td_kse;
576 	p = td->td_proc;
577 
578 	mtx_assert(&sched_lock, MA_OWNED);
579 	KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?"));
580 
581 	td->td_lastcpu = td->td_oncpu;
582 	td->td_last_kse = ke;
583 	td->td_oncpu = NOCPU;
584 	td->td_flags &= ~TDF_NEEDRESCHED;
585 	/*
586 	 * At the last moment, if this thread is still marked RUNNING,
587 	 * then put it back on the run queue as it has not been suspended
588 	 * or stopped or any thing else similar.
589 	 */
590 	if (TD_IS_RUNNING(td)) {
591 		/* Put us back on the run queue (kse and all). */
592 		setrunqueue(td);
593 	} else if (p->p_flag & P_SA) {
594 		/*
595 		 * We will not be on the run queue. So we must be
596 		 * sleeping or similar. As it's available,
597 		 * someone else can use the KSE if they need it.
598 		 */
599 		kse_reassign(ke);
600 	}
601 }
602 
603 void
604 sched_wakeup(struct thread *td)
605 {
606 	struct ksegrp *kg;
607 
608 	mtx_assert(&sched_lock, MA_OWNED);
609 	kg = td->td_ksegrp;
610 	if (kg->kg_slptime > 1)
611 		updatepri(kg);
612 	kg->kg_slptime = 0;
613 	setrunqueue(td);
614 	maybe_resched(td);
615 }
616 
617 void
618 sched_add(struct kse *ke)
619 {
620 	mtx_assert(&sched_lock, MA_OWNED);
621 	KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE"));
622 	KASSERT((ke->ke_thread->td_kse != NULL),
623 	    ("runq_add: No KSE on thread"));
624 	KASSERT(ke->ke_state != KES_ONRUNQ,
625 	    ("runq_add: kse %p (%s) already in run queue", ke,
626 	    ke->ke_proc->p_comm));
627 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
628 	    ("runq_add: process swapped out"));
629 	ke->ke_ksegrp->kg_runq_kses++;
630 	ke->ke_state = KES_ONRUNQ;
631 
632 	runq_add(&runq, ke);
633 }
634 
635 void
636 sched_rem(struct kse *ke)
637 {
638 	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
639 	    ("runq_remove: process swapped out"));
640 	KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
641 	mtx_assert(&sched_lock, MA_OWNED);
642 
643 	runq_remove(&runq, ke);
644 	ke->ke_state = KES_THREAD;
645 	ke->ke_ksegrp->kg_runq_kses--;
646 }
647 
648 struct kse *
649 sched_choose(void)
650 {
651 	struct kse *ke;
652 
653 	ke = runq_choose(&runq);
654 
655 	if (ke != NULL) {
656 		runq_remove(&runq, ke);
657 		ke->ke_state = KES_THREAD;
658 
659 		KASSERT((ke->ke_thread != NULL),
660 		    ("runq_choose: No thread on KSE"));
661 		KASSERT((ke->ke_thread->td_kse != NULL),
662 		    ("runq_choose: No KSE on thread"));
663 		KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
664 		    ("runq_choose: process swapped out"));
665 	}
666 	return (ke);
667 }
668 
669 void
670 sched_userret(struct thread *td)
671 {
672 	struct ksegrp *kg;
673 	/*
674 	 * XXX we cheat slightly on the locking here to avoid locking in
675 	 * the usual case.  Setting td_priority here is essentially an
676 	 * incomplete workaround for not setting it properly elsewhere.
677 	 * Now that some interrupt handlers are threads, not setting it
678 	 * properly elsewhere can clobber it in the window between setting
679 	 * it here and returning to user mode, so don't waste time setting
680 	 * it perfectly here.
681 	 */
682 	kg = td->td_ksegrp;
683 	if (td->td_priority != kg->kg_user_pri) {
684 		mtx_lock_spin(&sched_lock);
685 		td->td_priority = kg->kg_user_pri;
686 		mtx_unlock_spin(&sched_lock);
687 	}
688 }
689 
690 int
691 sched_sizeof_kse(void)
692 {
693 	return (sizeof(struct kse) + sizeof(struct ke_sched));
694 }
695 int
696 sched_sizeof_ksegrp(void)
697 {
698 	return (sizeof(struct ksegrp));
699 }
700 int
701 sched_sizeof_proc(void)
702 {
703 	return (sizeof(struct proc));
704 }
705 int
706 sched_sizeof_thread(void)
707 {
708 	return (sizeof(struct thread));
709 }
710 
711 fixpt_t
712 sched_pctcpu(struct kse *ke)
713 {
714 	return (ke->ke_pctcpu);
715 }
716