xref: /freebsd/sys/kern/kern_synch.c (revision ee2ea5ceafed78a5bd9810beb9e3ca927180c226)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
39  * $FreeBSD$
40  */
41 
42 #include "opt_ddb.h"
43 #include "opt_ktrace.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/condvar.h>
48 #include <sys/kernel.h>
49 #include <sys/ktr.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/resourcevar.h>
54 #include <sys/signalvar.h>
55 #include <sys/smp.h>
56 #include <sys/sx.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysproto.h>
59 #include <sys/vmmeter.h>
60 #ifdef DDB
61 #include <ddb/ddb.h>
62 #endif
63 #ifdef KTRACE
64 #include <sys/uio.h>
65 #include <sys/ktrace.h>
66 #endif
67 
68 #include <machine/cpu.h>
69 
70 static void sched_setup(void *dummy);
71 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
72 
73 int	hogticks;
74 int	lbolt;
75 int	sched_quantum;		/* Roundrobin scheduling quantum in ticks. */
76 
77 static struct callout loadav_callout;
78 static struct callout schedcpu_callout;
79 static struct callout roundrobin_callout;
80 
81 struct loadavg averunnable =
82 	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
83 /*
84  * Constants for averages over 1, 5, and 15 minutes
85  * when sampling at 5 second intervals.
86  */
87 static fixpt_t cexp[3] = {
88 	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
89 	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
90 	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
91 };
92 
93 static void	endtsleep(void *);
94 static void	loadav(void *arg);
95 static void	roundrobin(void *arg);
96 static void	schedcpu(void *arg);
97 
98 static int
99 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
100 {
101 	int error, new_val;
102 
103 	new_val = sched_quantum * tick;
104 	error = sysctl_handle_int(oidp, &new_val, 0, req);
105         if (error != 0 || req->newptr == NULL)
106 		return (error);
107 	if (new_val < tick)
108 		return (EINVAL);
109 	sched_quantum = new_val / tick;
110 	hogticks = 2 * sched_quantum;
111 	return (0);
112 }
113 
114 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
115 	0, sizeof sched_quantum, sysctl_kern_quantum, "I",
116 	"Roundrobin scheduling quantum in microseconds");
117 
118 /*
119  * Arrange to reschedule if necessary, taking the priorities and
120  * schedulers into account.
121  */
122 void
123 maybe_resched(struct thread *td)
124 {
125 
126 	mtx_assert(&sched_lock, MA_OWNED);
127 	if (td->td_priority < curthread->td_priority)
128 		curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
129 }
130 
131 int
132 roundrobin_interval(void)
133 {
134 	return (sched_quantum);
135 }
136 
137 /*
138  * Force switch among equal priority processes every 100ms.
139  * We don't actually need to force a context switch of the current process.
140  * The act of firing the event triggers a context switch to softclock() and
141  * then switching back out again which is equivalent to a preemption, thus
142  * no further work is needed on the local CPU.
143  */
144 /* ARGSUSED */
145 static void
146 roundrobin(arg)
147 	void *arg;
148 {
149 
150 #ifdef SMP
151 	mtx_lock_spin(&sched_lock);
152 	forward_roundrobin();
153 	mtx_unlock_spin(&sched_lock);
154 #endif
155 
156 	callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
157 }
158 
159 /*
160  * Constants for digital decay and forget:
161  *	90% of (p_estcpu) usage in 5 * loadav time
162  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
163  *          Note that, as ps(1) mentions, this can let percentages
164  *          total over 100% (I've seen 137.9% for 3 processes).
165  *
166  * Note that schedclock() updates p_estcpu and p_cpticks asynchronously.
167  *
168  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
169  * That is, the system wants to compute a value of decay such
170  * that the following for loop:
171  * 	for (i = 0; i < (5 * loadavg); i++)
172  * 		p_estcpu *= decay;
173  * will compute
174  * 	p_estcpu *= 0.1;
175  * for all values of loadavg:
176  *
177  * Mathematically this loop can be expressed by saying:
178  * 	decay ** (5 * loadavg) ~= .1
179  *
180  * The system computes decay as:
181  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
182  *
183  * We wish to prove that the system's computation of decay
184  * will always fulfill the equation:
185  * 	decay ** (5 * loadavg) ~= .1
186  *
187  * If we compute b as:
188  * 	b = 2 * loadavg
189  * then
190  * 	decay = b / (b + 1)
191  *
192  * We now need to prove two things:
193  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
194  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
195  *
196  * Facts:
197  *         For x close to zero, exp(x) =~ 1 + x, since
198  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
199  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
200  *         For x close to zero, ln(1+x) =~ x, since
201  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
202  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
203  *         ln(.1) =~ -2.30
204  *
205  * Proof of (1):
206  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
207  *	solving for factor,
208  *      ln(factor) =~ (-2.30/5*loadav), or
209  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
210  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
211  *
212  * Proof of (2):
213  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
214  *	solving for power,
215  *      power*ln(b/(b+1)) =~ -2.30, or
216  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
217  *
218  * Actual power values for the implemented algorithm are as follows:
219  *      loadav: 1       2       3       4
220  *      power:  5.68    10.32   14.94   19.55
221  */
222 
223 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
224 #define	loadfactor(loadav)	(2 * (loadav))
225 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
226 
227 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
228 static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
229 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
230 
231 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
232 static int	fscale __unused = FSCALE;
233 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
234 
235 /*
236  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
237  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
238  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
239  *
240  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
241  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
242  *
243  * If you don't want to bother with the faster/more-accurate formula, you
244  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
245  * (more general) method of calculating the %age of CPU used by a process.
246  */
247 #define	CCPU_SHIFT	11
248 
249 /*
250  * Recompute process priorities, every hz ticks.
251  * MP-safe, called without the Giant mutex.
252  */
253 /* ARGSUSED */
254 static void
255 schedcpu(arg)
256 	void *arg;
257 {
258 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
259 	struct thread *td;
260 	struct proc *p;
261 	struct kse *ke;
262 	struct ksegrp *kg;
263 	int realstathz;
264 	int awake;
265 
266 	realstathz = stathz ? stathz : hz;
267 	sx_slock(&allproc_lock);
268 	FOREACH_PROC_IN_SYSTEM(p) {
269 		mtx_lock_spin(&sched_lock);
270 		p->p_swtime++;
271 		FOREACH_KSEGRP_IN_PROC(p, kg) {
272 			awake = 0;
273 			FOREACH_KSE_IN_GROUP(kg, ke) {
274 				/*
275 				 * Increment time in/out of memory and sleep
276 				 * time (if sleeping).  We ignore overflow;
277 				 * with 16-bit int's (remember them?)
278 				 * overflow takes 45 days.
279 				 */
280 				/* XXXKSE */
281 			/*	if ((ke->ke_flags & KEF_ONRUNQ) == 0) */
282 				if (p->p_stat == SSLEEP || p->p_stat == SSTOP) {
283 					ke->ke_slptime++;
284 				} else {
285 					ke->ke_slptime = 0;
286 					awake = 1;
287 				}
288 
289 				/*
290 				 * pctcpu is only for ps?
291 				 * Do it per kse.. and add them up at the end?
292 				 * XXXKSE
293 				 */
294 				ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >> FSHIFT;
295 				/*
296 				 * If the kse has been idle the entire second,
297 				 * stop recalculating its priority until
298 				 * it wakes up.
299 				 */
300 				if (ke->ke_slptime > 1) {
301 					continue;
302 				}
303 
304 #if	(FSHIFT >= CCPU_SHIFT)
305 				ke->ke_pctcpu += (realstathz == 100) ?
306 				    ((fixpt_t) ke->ke_cpticks) <<
307 				    (FSHIFT - CCPU_SHIFT) :
308 				    100 * (((fixpt_t) ke->ke_cpticks) <<
309 				    (FSHIFT - CCPU_SHIFT)) / realstathz;
310 #else
311 				ke->ke_pctcpu += ((FSCALE - ccpu) *
312 				    (ke->ke_cpticks * FSCALE / realstathz)) >>
313 				    FSHIFT;
314 #endif
315 				ke->ke_cpticks = 0;
316 			} /* end of kse loop */
317 			if (awake == 0) {
318 				kg->kg_slptime++;
319 			} else {
320 				kg->kg_slptime = 0;
321 			}
322 			kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
323 		      	resetpriority(kg);
324 			td = FIRST_THREAD_IN_PROC(p);
325 		      	if (td->td_priority >= PUSER &&
326 			    (p->p_sflag & PS_INMEM)) {
327 				int changedqueue =
328 				    ((td->td_priority / RQ_PPQ) !=
329 				     (kg->kg_user_pri / RQ_PPQ));
330 
331 				td->td_priority = kg->kg_user_pri;
332 				FOREACH_KSE_IN_GROUP(kg, ke) {
333 					if ((ke->ke_oncpu == NOCPU) &&
334 					    (p->p_stat == SRUN) && /* XXXKSE */
335 					    changedqueue) {
336 						remrunqueue(ke->ke_thread);
337 						setrunqueue(ke->ke_thread);
338 					}
339 				}
340 			}
341 		} /* end of ksegrp loop */
342 		mtx_unlock_spin(&sched_lock);
343 	} /* end of process loop */
344 	sx_sunlock(&allproc_lock);
345 	wakeup((caddr_t)&lbolt);
346 	callout_reset(&schedcpu_callout, hz, schedcpu, NULL);
347 }
348 
349 /*
350  * Recalculate the priority of a process after it has slept for a while.
351  * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
352  * least six times the loadfactor will decay p_estcpu to zero.
353  */
354 void
355 updatepri(td)
356 	register struct thread *td;
357 {
358 	register struct ksegrp *kg;
359 	register unsigned int newcpu;
360 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
361 
362 	if (td == NULL)
363 		return;
364 	kg = td->td_ksegrp;
365 	newcpu = kg->kg_estcpu;
366 	if (kg->kg_slptime > 5 * loadfac)
367 		kg->kg_estcpu = 0;
368 	else {
369 		kg->kg_slptime--;	/* the first time was done in schedcpu */
370 		while (newcpu && --kg->kg_slptime)
371 			newcpu = decay_cpu(loadfac, newcpu);
372 		kg->kg_estcpu = newcpu;
373 	}
374 	resetpriority(td->td_ksegrp);
375 }
376 
377 /*
378  * We're only looking at 7 bits of the address; everything is
379  * aligned to 4, lots of things are aligned to greater powers
380  * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
381  */
382 #define TABLESIZE	128
383 static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE];
384 #define LOOKUP(x)	(((intptr_t)(x) >> 8) & (TABLESIZE - 1))
385 
386 void
387 sleepinit(void)
388 {
389 	int i;
390 
391 	sched_quantum = hz/10;
392 	hogticks = 2 * sched_quantum;
393 	for (i = 0; i < TABLESIZE; i++)
394 		TAILQ_INIT(&slpque[i]);
395 }
396 
397 /*
398  * General sleep call.  Suspends the current process until a wakeup is
399  * performed on the specified identifier.  The process will then be made
400  * runnable with the specified priority.  Sleeps at most timo/hz seconds
401  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
402  * before and after sleeping, else signals are not checked.  Returns 0 if
403  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
404  * signal needs to be delivered, ERESTART is returned if the current system
405  * call should be restarted if possible, and EINTR is returned if the system
406  * call should be interrupted by the signal (return EINTR).
407  *
408  * The mutex argument is exited before the caller is suspended, and
409  * entered before msleep returns.  If priority includes the PDROP
410  * flag the mutex is not entered before returning.
411  */
412 int
413 msleep(ident, mtx, priority, wmesg, timo)
414 	void *ident;
415 	struct mtx *mtx;
416 	int priority, timo;
417 	const char *wmesg;
418 {
419 	struct proc *p = curproc;
420 	struct thread *td = curthread;
421 	int sig, catch = priority & PCATCH;
422 	int rval = 0;
423 	WITNESS_SAVE_DECL(mtx);
424 
425 #ifdef KTRACE
426 	if (p && KTRPOINT(p, KTR_CSW))
427 		ktrcsw(p->p_tracep, 1, 0);
428 #endif
429 	WITNESS_SLEEP(0, &mtx->mtx_object);
430 	KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL,
431 	    ("sleeping without a mutex"));
432 	mtx_lock_spin(&sched_lock);
433 	if (cold || panicstr) {
434 		/*
435 		 * After a panic, or during autoconfiguration,
436 		 * just give interrupts a chance, then just return;
437 		 * don't run any other procs or panic below,
438 		 * in case this is the idle process and already asleep.
439 		 */
440 		if (mtx != NULL && priority & PDROP)
441 			mtx_unlock(mtx);
442 		mtx_unlock_spin(&sched_lock);
443 		return (0);
444 	}
445 
446 	DROP_GIANT();
447 
448 	if (mtx != NULL) {
449 		mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
450 		WITNESS_SAVE(&mtx->mtx_object, mtx);
451 		mtx_unlock(mtx);
452 		if (priority & PDROP)
453 			mtx = NULL;
454 	}
455 
456 	KASSERT(p != NULL, ("msleep1"));
457 	KASSERT(ident != NULL && td->td_proc->p_stat == SRUN, ("msleep"));
458 
459 	td->td_wchan = ident;
460 	td->td_wmesg = wmesg;
461 	td->td_kse->ke_slptime = 0;	/* XXXKSE */
462 	td->td_ksegrp->kg_slptime = 0;
463 	td->td_priority = priority & PRIMASK;
464 	CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
465 	    td, p->p_pid, p->p_comm, wmesg, ident);
466 	TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq);
467 	if (timo)
468 		callout_reset(&td->td_slpcallout, timo, endtsleep, td);
469 	/*
470 	 * We put ourselves on the sleep queue and start our timeout
471 	 * before calling CURSIG, as we could stop there, and a wakeup
472 	 * or a SIGCONT (or both) could occur while we were stopped.
473 	 * A SIGCONT would cause us to be marked as SSLEEP
474 	 * without resuming us, thus we must be ready for sleep
475 	 * when CURSIG is called.  If the wakeup happens while we're
476 	 * stopped, td->td_wchan will be 0 upon return from CURSIG.
477 	 */
478 	if (catch) {
479 		CTR3(KTR_PROC, "msleep caught: proc %p (pid %d, %s)", p,
480 		    p->p_pid, p->p_comm);
481 		td->td_flags |= TDF_SINTR;
482 		mtx_unlock_spin(&sched_lock);
483 		PROC_LOCK(p);
484 		sig = CURSIG(p);
485 		mtx_lock_spin(&sched_lock);
486 		PROC_UNLOCK(p);
487 		if (sig != 0) {
488 			if (td->td_wchan != NULL)
489 				unsleep(td);
490 		} else if (td->td_wchan == NULL)
491 			catch = 0;
492 	} else
493 		sig = 0;
494 	if (td->td_wchan != NULL) {
495 		td->td_proc->p_stat = SSLEEP;
496 		p->p_stats->p_ru.ru_nvcsw++;
497 		mi_switch();
498 	}
499 	CTR3(KTR_PROC, "msleep resume: proc %p (pid %d, %s)", td, p->p_pid,
500 	    p->p_comm);
501 	KASSERT(td->td_proc->p_stat == SRUN, ("running but not SRUN"));
502 	td->td_flags &= ~TDF_SINTR;
503 	if (td->td_flags & TDF_TIMEOUT) {
504 		td->td_flags &= ~TDF_TIMEOUT;
505 		if (sig == 0)
506 			rval = EWOULDBLOCK;
507 	} else if (td->td_flags & TDF_TIMOFAIL)
508 		td->td_flags &= ~TDF_TIMOFAIL;
509 	else if (timo && callout_stop(&td->td_slpcallout) == 0) {
510 		/*
511 		 * This isn't supposed to be pretty.  If we are here, then
512 		 * the endtsleep() callout is currently executing on another
513 		 * CPU and is either spinning on the sched_lock or will be
514 		 * soon.  If we don't synchronize here, there is a chance
515 		 * that this process may msleep() again before the callout
516 		 * has a chance to run and the callout may end up waking up
517 		 * the wrong msleep().  Yuck.
518 		 */
519 		td->td_flags |= TDF_TIMEOUT;
520 		p->p_stats->p_ru.ru_nivcsw++;
521 		mi_switch();
522 	}
523 	mtx_unlock_spin(&sched_lock);
524 
525 	if (rval == 0 && catch) {
526 		PROC_LOCK(p);
527 		/* XXX: shouldn't we always be calling CURSIG() */
528 		if (sig != 0 || (sig = CURSIG(p))) {
529 			if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
530 				rval = EINTR;
531 			else
532 				rval = ERESTART;
533 		}
534 		PROC_UNLOCK(p);
535 	}
536 	PICKUP_GIANT();
537 #ifdef KTRACE
538 	mtx_lock(&Giant);
539 	if (KTRPOINT(p, KTR_CSW))
540 		ktrcsw(p->p_tracep, 0, 0);
541 	mtx_unlock(&Giant);
542 #endif
543 	if (mtx != NULL) {
544 		mtx_lock(mtx);
545 		WITNESS_RESTORE(&mtx->mtx_object, mtx);
546 	}
547 	return (rval);
548 }
549 
550 /*
551  * Implement timeout for msleep()
552  *
553  * If process hasn't been awakened (wchan non-zero),
554  * set timeout flag and undo the sleep.  If proc
555  * is stopped, just unsleep so it will remain stopped.
556  * MP-safe, called without the Giant mutex.
557  */
558 static void
559 endtsleep(arg)
560 	void *arg;
561 {
562 	register struct thread *td = arg;
563 
564 	CTR3(KTR_PROC, "endtsleep: thread %p (pid %d, %s)", td, td->td_proc->p_pid,
565 	    td->td_proc->p_comm);
566 	mtx_lock_spin(&sched_lock);
567 	/*
568 	 * This is the other half of the synchronization with msleep()
569 	 * described above.  If the PS_TIMEOUT flag is set, we lost the
570 	 * race and just need to put the process back on the runqueue.
571 	 */
572 	if ((td->td_flags & TDF_TIMEOUT) != 0) {
573 		td->td_flags &= ~TDF_TIMEOUT;
574 		setrunqueue(td);
575 	} else if (td->td_wchan != NULL) {
576 		if (td->td_proc->p_stat == SSLEEP)  /* XXXKSE */
577 			setrunnable(td);
578 		else
579 			unsleep(td);
580 		td->td_flags |= TDF_TIMEOUT;
581 	} else {
582 		td->td_flags |= TDF_TIMOFAIL;
583 	}
584 	mtx_unlock_spin(&sched_lock);
585 }
586 
587 /*
588  * Remove a process from its wait queue
589  */
590 void
591 unsleep(struct thread *td)
592 {
593 
594 	mtx_lock_spin(&sched_lock);
595 	if (td->td_wchan != NULL) {
596 		TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
597 		td->td_wchan = NULL;
598 	}
599 	mtx_unlock_spin(&sched_lock);
600 }
601 
602 /*
603  * Make all processes sleeping on the specified identifier runnable.
604  */
605 void
606 wakeup(ident)
607 	register void *ident;
608 {
609 	register struct slpquehead *qp;
610 	register struct thread *td;
611 	struct proc *p;
612 
613 	mtx_lock_spin(&sched_lock);
614 	qp = &slpque[LOOKUP(ident)];
615 restart:
616 	TAILQ_FOREACH(td, qp, td_slpq) {
617 		p = td->td_proc;
618 		if (td->td_wchan == ident) {
619 			TAILQ_REMOVE(qp, td, td_slpq);
620 			td->td_wchan = NULL;
621 			if (td->td_proc->p_stat == SSLEEP) {
622 				/* OPTIMIZED EXPANSION OF setrunnable(p); */
623 				CTR3(KTR_PROC, "wakeup: thread %p (pid %d, %s)",
624 				    td, p->p_pid, p->p_comm);
625 				if (td->td_ksegrp->kg_slptime > 1)
626 					updatepri(td);
627 				td->td_ksegrp->kg_slptime = 0;
628 				td->td_kse->ke_slptime = 0;
629 				td->td_proc->p_stat = SRUN;
630 				if (p->p_sflag & PS_INMEM) {
631 					setrunqueue(td);
632 					maybe_resched(td);
633 				} else {
634 					p->p_sflag |= PS_SWAPINREQ;
635 					wakeup((caddr_t)&proc0);
636 				}
637 				/* END INLINE EXPANSION */
638 				goto restart;
639 			}
640 		}
641 	}
642 	mtx_unlock_spin(&sched_lock);
643 }
644 
645 /*
646  * Make a process sleeping on the specified identifier runnable.
647  * May wake more than one process if a target process is currently
648  * swapped out.
649  */
650 void
651 wakeup_one(ident)
652 	register void *ident;
653 {
654 	register struct slpquehead *qp;
655 	register struct thread *td;
656 	register struct proc *p;
657 
658 	mtx_lock_spin(&sched_lock);
659 	qp = &slpque[LOOKUP(ident)];
660 
661 	TAILQ_FOREACH(td, qp, td_slpq) {
662 		p = td->td_proc;
663 		if (td->td_wchan == ident) {
664 			TAILQ_REMOVE(qp, td, td_slpq);
665 			td->td_wchan = NULL;
666 			if (td->td_proc->p_stat == SSLEEP) {
667 				/* OPTIMIZED EXPANSION OF setrunnable(p); */
668 				CTR3(KTR_PROC, "wakeup1: proc %p (pid %d, %s)",
669 				    p, p->p_pid, p->p_comm);
670 				if (td->td_ksegrp->kg_slptime > 1)
671 					updatepri(td);
672 				td->td_ksegrp->kg_slptime = 0;
673 				td->td_kse->ke_slptime = 0;
674 				td->td_proc->p_stat = SRUN;
675 				if (p->p_sflag & PS_INMEM) {
676 					setrunqueue(td);
677 					maybe_resched(td);
678 					break;
679 				} else {
680 					p->p_sflag |= PS_SWAPINREQ;
681 					wakeup((caddr_t)&proc0);
682 				}
683 				/* END INLINE EXPANSION */
684 			}
685 		}
686 	}
687 	mtx_unlock_spin(&sched_lock);
688 }
689 
690 /*
691  * The machine independent parts of mi_switch().
692  */
693 void
694 mi_switch()
695 {
696 	struct bintime new_switchtime;
697 	struct thread *td = curthread;	/* XXX */
698 	register struct proc *p = td->td_proc;	/* XXX */
699 #if 0
700 	register struct rlimit *rlim;
701 #endif
702 	u_int sched_nest;
703 
704 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
705 #ifdef INVARIANTS
706 	if (p->p_stat != SMTX && p->p_stat != SRUN)
707 		mtx_assert(&Giant, MA_NOTOWNED);
708 #endif
709 
710 	/*
711 	 * Compute the amount of time during which the current
712 	 * process was running, and add that to its total so far.
713 	 */
714 	binuptime(&new_switchtime);
715 	bintime_add(&p->p_runtime, &new_switchtime);
716 	bintime_sub(&p->p_runtime, PCPU_PTR(switchtime));
717 
718 #ifdef DDB
719 	/*
720 	 * Don't perform context switches from the debugger.
721 	 */
722 	if (db_active) {
723 		mtx_unlock_spin(&sched_lock);
724 		db_error("Context switches not allowed in the debugger.");
725 	}
726 #endif
727 
728 #if 0
729 	/*
730 	 * Check if the process exceeds its cpu resource allocation.
731 	 * If over max, kill it.
732 	 *
733 	 * XXX drop sched_lock, pickup Giant
734 	 */
735 	if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY &&
736 	    p->p_runtime > p->p_limit->p_cpulimit) {
737 		rlim = &p->p_rlimit[RLIMIT_CPU];
738 		if (p->p_runtime / (rlim_t)1000000 >= rlim->rlim_max) {
739 			mtx_unlock_spin(&sched_lock);
740 			PROC_LOCK(p);
741 			killproc(p, "exceeded maximum CPU limit");
742 			mtx_lock_spin(&sched_lock);
743 			PROC_UNLOCK(p);
744 		} else {
745 			mtx_unlock_spin(&sched_lock);
746 			PROC_LOCK(p);
747 			psignal(p, SIGXCPU);
748 			mtx_lock_spin(&sched_lock);
749 			PROC_UNLOCK(p);
750 			if (rlim->rlim_cur < rlim->rlim_max) {
751 				/* XXX: we should make a private copy */
752 				rlim->rlim_cur += 5;
753 			}
754 		}
755 	}
756 #endif
757 
758 	/*
759 	 * Pick a new current process and record its start time.
760 	 */
761 	cnt.v_swtch++;
762 	PCPU_SET(switchtime, new_switchtime);
763 	CTR3(KTR_PROC, "mi_switch: old proc %p (pid %d, %s)", p, p->p_pid,
764 	    p->p_comm);
765 	sched_nest = sched_lock.mtx_recurse;
766 	td->td_lastcpu = td->td_kse->ke_oncpu;
767 	td->td_kse->ke_oncpu = NOCPU;
768 	td->td_kse->ke_flags &= ~KEF_NEEDRESCHED;
769 	cpu_switch();
770 	td->td_kse->ke_oncpu = PCPU_GET(cpuid);
771 	sched_lock.mtx_recurse = sched_nest;
772 	sched_lock.mtx_lock = (uintptr_t)td;
773 	CTR3(KTR_PROC, "mi_switch: new proc %p (pid %d, %s)", p, p->p_pid,
774 	    p->p_comm);
775 	if (PCPU_GET(switchtime.sec) == 0)
776 		binuptime(PCPU_PTR(switchtime));
777 	PCPU_SET(switchticks, ticks);
778 }
779 
780 /*
781  * Change process state to be runnable,
782  * placing it on the run queue if it is in memory,
783  * and awakening the swapper if it isn't in memory.
784  */
785 void
786 setrunnable(struct thread *td)
787 {
788 	struct proc *p = td->td_proc;
789 
790 	mtx_lock_spin(&sched_lock);
791 	switch (p->p_stat) {
792 	case SZOMB: /* not a thread flag XXXKSE */
793 		panic("setrunnable(1)");
794 	}
795 	switch (td->td_proc->p_stat) {
796 	case 0:
797 	case SRUN:
798 	case SWAIT:
799 	default:
800 		panic("setrunnable(2)");
801 	case SSTOP:
802 	case SSLEEP:			/* e.g. when sending signals */
803 		if (td->td_flags & TDF_CVWAITQ)
804 			cv_waitq_remove(td);
805 		else
806 			unsleep(td);
807 		break;
808 
809 	case SIDL:
810 		break;
811 	}
812 	td->td_proc->p_stat = SRUN;
813 	if (td->td_ksegrp->kg_slptime > 1)
814 		updatepri(td);
815 	td->td_ksegrp->kg_slptime = 0;
816 	td->td_kse->ke_slptime = 0;
817 	if ((p->p_sflag & PS_INMEM) == 0) {
818 		p->p_sflag |= PS_SWAPINREQ;
819 		wakeup((caddr_t)&proc0);
820 	} else {
821 		setrunqueue(td);
822 		maybe_resched(td);
823 	}
824 	mtx_unlock_spin(&sched_lock);
825 }
826 
827 /*
828  * Compute the priority of a process when running in user mode.
829  * Arrange to reschedule if the resulting priority is better
830  * than that of the current process.
831  */
832 void
833 resetpriority(kg)
834 	register struct ksegrp *kg;
835 {
836 	register unsigned int newpriority;
837 	struct thread *td;
838 
839 	mtx_lock_spin(&sched_lock);
840 	if (kg->kg_pri_class == PRI_TIMESHARE) {
841 		newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
842 		    NICE_WEIGHT * (kg->kg_nice - PRIO_MIN);
843 		newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
844 		    PRI_MAX_TIMESHARE);
845 		kg->kg_user_pri = newpriority;
846 	}
847 	FOREACH_THREAD_IN_GROUP(kg, td) {
848 		maybe_resched(td);
849 	}
850 	mtx_unlock_spin(&sched_lock);
851 }
852 
853 /*
854  * Compute a tenex style load average of a quantity on
855  * 1, 5 and 15 minute intervals.
856  * XXXKSE   Needs complete rewrite when correct info is available.
857  * Completely Bogus.. only works with 1:1 (but compiles ok now :-)
858  */
859 static void
860 loadav(void *arg)
861 {
862 	int i, nrun;
863 	struct loadavg *avg;
864 	struct proc *p;
865 	struct ksegrp *kg;
866 
867 	avg = &averunnable;
868 	sx_slock(&allproc_lock);
869 	nrun = 0;
870 	FOREACH_PROC_IN_SYSTEM(p) {
871 		FOREACH_KSEGRP_IN_PROC(p, kg) {
872 			switch (p->p_stat) {
873 			case SRUN:
874 				if ((p->p_flag & P_NOLOAD) != 0)
875 					goto nextproc;
876 				/* FALLTHROUGH */
877 			case SIDL:
878 				nrun++;
879 			}
880 nextproc:
881 			continue;
882 		}
883 	}
884 	sx_sunlock(&allproc_lock);
885 	for (i = 0; i < 3; i++)
886 		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
887 		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
888 
889 	/*
890 	 * Schedule the next update to occur after 5 seconds, but add a
891 	 * random variation to avoid synchronisation with processes that
892 	 * run at regular intervals.
893 	 */
894 	callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
895 	    loadav, NULL);
896 }
897 
898 /* ARGSUSED */
899 static void
900 sched_setup(dummy)
901 	void *dummy;
902 {
903 
904 	callout_init(&schedcpu_callout, 1);
905 	callout_init(&roundrobin_callout, 0);
906 	callout_init(&loadav_callout, 0);
907 
908 	/* Kick off timeout driven events by calling first time. */
909 	roundrobin(NULL);
910 	schedcpu(NULL);
911 	loadav(NULL);
912 }
913 
914 /*
915  * We adjust the priority of the current process.  The priority of
916  * a process gets worse as it accumulates CPU time.  The cpu usage
917  * estimator (p_estcpu) is increased here.  resetpriority() will
918  * compute a different priority each time p_estcpu increases by
919  * INVERSE_ESTCPU_WEIGHT
920  * (until MAXPRI is reached).  The cpu usage estimator ramps up
921  * quite quickly when the process is running (linearly), and decays
922  * away exponentially, at a rate which is proportionally slower when
923  * the system is busy.  The basic principle is that the system will
924  * 90% forget that the process used a lot of CPU time in 5 * loadav
925  * seconds.  This causes the system to favor processes which haven't
926  * run much recently, and to round-robin among other processes.
927  */
928 void
929 schedclock(td)
930 	struct thread *td;
931 {
932 	struct kse *ke = td->td_kse;
933 	struct ksegrp *kg = td->td_ksegrp;
934 
935 	if (td) {
936 		ke->ke_cpticks++;
937 		kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
938 		if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
939 			resetpriority(td->td_ksegrp);
940 			if (td->td_priority >= PUSER)
941 				td->td_priority = kg->kg_user_pri;
942 		}
943 	} else {
944 		panic("schedclock");
945 	}
946 }
947 
948 /*
949  * General purpose yield system call
950  */
951 int
952 yield(struct thread *td, struct yield_args *uap)
953 {
954 	struct ksegrp *kg = td->td_ksegrp;
955 
956 	mtx_assert(&Giant, MA_NOTOWNED);
957 	mtx_lock_spin(&sched_lock);
958 	td->td_priority = PRI_MAX_TIMESHARE;
959 	setrunqueue(td);
960 	kg->kg_proc->p_stats->p_ru.ru_nvcsw++;
961 	mi_switch();
962 	mtx_unlock_spin(&sched_lock);
963 	td->td_retval[0] = 0;
964 
965 	return (0);
966 }
967 
968