xref: /freebsd/sys/kern/kern_synch.c (revision 77b7cdf1999ee965ad494fddd184b18f532ac91a)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
39  * $FreeBSD$
40  */
41 
42 #include "opt_ddb.h"
43 #include "opt_ktrace.h"
44 #ifdef __i386__
45 #include "opt_swtch.h"
46 #endif
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/condvar.h>
51 #include <sys/kernel.h>
52 #include <sys/ktr.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/resourcevar.h>
57 #include <sys/sched.h>
58 #include <sys/signalvar.h>
59 #include <sys/smp.h>
60 #include <sys/sx.h>
61 #include <sys/sysctl.h>
62 #include <sys/sysproto.h>
63 #include <sys/vmmeter.h>
64 #ifdef DDB
65 #include <ddb/ddb.h>
66 #endif
67 #ifdef KTRACE
68 #include <sys/uio.h>
69 #include <sys/ktrace.h>
70 #endif
71 
72 #include <machine/cpu.h>
73 #ifdef SWTCH_OPTIM_STATS
74 #include <machine/md_var.h>
75 #endif
76 
77 static void sched_setup(void *dummy);
78 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
79 
80 int	hogticks;
81 int	lbolt;
82 
83 static struct callout loadav_callout;
84 static struct callout lbolt_callout;
85 
86 struct loadavg averunnable =
87 	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
88 /*
89  * Constants for averages over 1, 5, and 15 minutes
90  * when sampling at 5 second intervals.
91  */
92 static fixpt_t cexp[3] = {
93 	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
94 	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
95 	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
96 };
97 
98 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
99 static int      fscale __unused = FSCALE;
100 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
101 
102 static void	endtsleep(void *);
103 static void	loadav(void *arg);
104 static void	lboltcb(void *arg);
105 
106 /*
107  * We're only looking at 7 bits of the address; everything is
108  * aligned to 4, lots of things are aligned to greater powers
109  * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
110  */
111 #define TABLESIZE	128
112 static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE];
113 #define LOOKUP(x)	(((intptr_t)(x) >> 8) & (TABLESIZE - 1))
114 
115 void
116 sleepinit(void)
117 {
118 	int i;
119 
120 	hogticks = (hz / 10) * 2;	/* Default only. */
121 	for (i = 0; i < TABLESIZE; i++)
122 		TAILQ_INIT(&slpque[i]);
123 }
124 
125 /*
126  * General sleep call.  Suspends the current process until a wakeup is
127  * performed on the specified identifier.  The process will then be made
128  * runnable with the specified priority.  Sleeps at most timo/hz seconds
129  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
130  * before and after sleeping, else signals are not checked.  Returns 0 if
131  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
132  * signal needs to be delivered, ERESTART is returned if the current system
133  * call should be restarted if possible, and EINTR is returned if the system
134  * call should be interrupted by the signal (return EINTR).
135  *
136  * The mutex argument is exited before the caller is suspended, and
137  * entered before msleep returns.  If priority includes the PDROP
138  * flag the mutex is not entered before returning.
139  */
140 
141 int
142 msleep(ident, mtx, priority, wmesg, timo)
143 	void *ident;
144 	struct mtx *mtx;
145 	int priority, timo;
146 	const char *wmesg;
147 {
148 	struct thread *td = curthread;
149 	struct proc *p = td->td_proc;
150 	int sig, catch = priority & PCATCH;
151 	int rval = 0;
152 	WITNESS_SAVE_DECL(mtx);
153 
154 #ifdef KTRACE
155 	if (KTRPOINT(td, KTR_CSW))
156 		ktrcsw(1, 0);
157 #endif
158 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mtx->mtx_object,
159 	    "Sleeping on \"%s\"", wmesg);
160 	KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL,
161 	    ("sleeping without a mutex"));
162 	/*
163 	 * If we are capable of async syscalls and there isn't already
164 	 * another one ready to return, start a new thread
165 	 * and queue it as ready to run. Note that there is danger here
166 	 * because we need to make sure that we don't sleep allocating
167 	 * the thread (recursion here might be bad).
168 	 */
169 	mtx_lock_spin(&sched_lock);
170 	if (p->p_flag & P_THREADED || p->p_numthreads > 1) {
171 		/*
172 		 * Just don't bother if we are exiting
173 		 * and not the exiting thread or thread was marked as
174 		 * interrupted.
175 		 */
176 		if (catch &&
177 		    (((p->p_flag & P_WEXIT) && (p->p_singlethread != td)) ||
178 		     (td->td_flags & TDF_INTERRUPT))) {
179 			td->td_flags &= ~TDF_INTERRUPT;
180 			mtx_unlock_spin(&sched_lock);
181 			return (EINTR);
182 		}
183 	}
184 	if (cold ) {
185 		/*
186 		 * During autoconfiguration, just give interrupts
187 		 * a chance, then just return.
188 		 * Don't run any other procs or panic below,
189 		 * in case this is the idle process and already asleep.
190 		 */
191 		if (mtx != NULL && priority & PDROP)
192 			mtx_unlock(mtx);
193 		mtx_unlock_spin(&sched_lock);
194 		return (0);
195 	}
196 
197 	DROP_GIANT();
198 
199 	if (mtx != NULL) {
200 		mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
201 		WITNESS_SAVE(&mtx->mtx_object, mtx);
202 		mtx_unlock(mtx);
203 		if (priority & PDROP)
204 			mtx = NULL;
205 	}
206 
207 	KASSERT(p != NULL, ("msleep1"));
208 	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
209 
210 	CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
211 	    td, p->p_pid, p->p_comm, wmesg, ident);
212 
213 	td->td_wchan = ident;
214 	td->td_wmesg = wmesg;
215 	TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq);
216 	TD_SET_ON_SLEEPQ(td);
217 	if (timo)
218 		callout_reset(&td->td_slpcallout, timo, endtsleep, td);
219 	/*
220 	 * We put ourselves on the sleep queue and start our timeout
221 	 * before calling thread_suspend_check, as we could stop there, and
222 	 * a wakeup or a SIGCONT (or both) could occur while we were stopped.
223 	 * without resuming us, thus we must be ready for sleep
224 	 * when cursig is called.  If the wakeup happens while we're
225 	 * stopped, td->td_wchan will be 0 upon return from cursig.
226 	 */
227 	if (catch) {
228 		CTR3(KTR_PROC, "msleep caught: thread %p (pid %d, %s)", td,
229 		    p->p_pid, p->p_comm);
230 		td->td_flags |= TDF_SINTR;
231 		mtx_unlock_spin(&sched_lock);
232 		PROC_LOCK(p);
233 		sig = cursig(td);
234 		if (sig == 0 && thread_suspend_check(1))
235 			sig = SIGSTOP;
236 		mtx_lock_spin(&sched_lock);
237 		PROC_UNLOCK(p);
238 		if (sig != 0) {
239 			if (TD_ON_SLEEPQ(td))
240 				unsleep(td);
241 		} else if (!TD_ON_SLEEPQ(td))
242 			catch = 0;
243 	} else
244 		sig = 0;
245 
246 	/*
247 	 * Let the scheduler know we're about to voluntarily go to sleep.
248 	 */
249 	sched_sleep(td, priority & PRIMASK);
250 
251 	if (TD_ON_SLEEPQ(td)) {
252 		p->p_stats->p_ru.ru_nvcsw++;
253 		TD_SET_SLEEPING(td);
254 		mi_switch();
255 	}
256 	/*
257 	 * We're awake from voluntary sleep.
258 	 */
259 	CTR3(KTR_PROC, "msleep resume: thread %p (pid %d, %s)", td, p->p_pid,
260 	    p->p_comm);
261 	KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
262 	td->td_flags &= ~TDF_SINTR;
263 	if (td->td_flags & TDF_TIMEOUT) {
264 		td->td_flags &= ~TDF_TIMEOUT;
265 		if (sig == 0)
266 			rval = EWOULDBLOCK;
267 	} else if (td->td_flags & TDF_TIMOFAIL) {
268 		td->td_flags &= ~TDF_TIMOFAIL;
269 	} else if (timo && callout_stop(&td->td_slpcallout) == 0) {
270 		/*
271 		 * This isn't supposed to be pretty.  If we are here, then
272 		 * the endtsleep() callout is currently executing on another
273 		 * CPU and is either spinning on the sched_lock or will be
274 		 * soon.  If we don't synchronize here, there is a chance
275 		 * that this process may msleep() again before the callout
276 		 * has a chance to run and the callout may end up waking up
277 		 * the wrong msleep().  Yuck.
278 		 */
279 		TD_SET_SLEEPING(td);
280 		p->p_stats->p_ru.ru_nivcsw++;
281 		mi_switch();
282 		td->td_flags &= ~TDF_TIMOFAIL;
283 	}
284 	if ((td->td_flags & TDF_INTERRUPT) && (priority & PCATCH) &&
285 	    (rval == 0)) {
286 		td->td_flags &= ~TDF_INTERRUPT;
287 		rval = EINTR;
288 	}
289 	mtx_unlock_spin(&sched_lock);
290 
291 	if (rval == 0 && catch) {
292 		PROC_LOCK(p);
293 		/* XXX: shouldn't we always be calling cursig() */
294 		if (sig != 0 || (sig = cursig(td))) {
295 			if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
296 				rval = EINTR;
297 			else
298 				rval = ERESTART;
299 		}
300 		PROC_UNLOCK(p);
301 	}
302 #ifdef KTRACE
303 	if (KTRPOINT(td, KTR_CSW))
304 		ktrcsw(0, 0);
305 #endif
306 	PICKUP_GIANT();
307 	if (mtx != NULL) {
308 		mtx_lock(mtx);
309 		WITNESS_RESTORE(&mtx->mtx_object, mtx);
310 	}
311 	return (rval);
312 }
313 
314 /*
315  * Implement timeout for msleep()
316  *
317  * If process hasn't been awakened (wchan non-zero),
318  * set timeout flag and undo the sleep.  If proc
319  * is stopped, just unsleep so it will remain stopped.
320  * MP-safe, called without the Giant mutex.
321  */
322 static void
323 endtsleep(arg)
324 	void *arg;
325 {
326 	register struct thread *td = arg;
327 
328 	CTR3(KTR_PROC, "endtsleep: thread %p (pid %d, %s)",
329 	    td, td->td_proc->p_pid, td->td_proc->p_comm);
330 	mtx_lock_spin(&sched_lock);
331 	/*
332 	 * This is the other half of the synchronization with msleep()
333 	 * described above.  If the TDS_TIMEOUT flag is set, we lost the
334 	 * race and just need to put the process back on the runqueue.
335 	 */
336 	if (TD_ON_SLEEPQ(td)) {
337 		TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
338 		TD_CLR_ON_SLEEPQ(td);
339 		td->td_flags |= TDF_TIMEOUT;
340 		td->td_wmesg = NULL;
341 	} else {
342 		td->td_flags |= TDF_TIMOFAIL;
343 	}
344 	TD_CLR_SLEEPING(td);
345 	setrunnable(td);
346 	mtx_unlock_spin(&sched_lock);
347 }
348 
349 /*
350  * Abort a thread, as if an interrupt had occured.  Only abort
351  * interruptable waits (unfortunatly it isn't only safe to abort others).
352  * This is about identical to cv_abort().
353  * Think about merging them?
354  * Also, whatever the signal code does...
355  */
356 void
357 abortsleep(struct thread *td)
358 {
359 
360 	mtx_assert(&sched_lock, MA_OWNED);
361 	/*
362 	 * If the TDF_TIMEOUT flag is set, just leave. A
363 	 * timeout is scheduled anyhow.
364 	 */
365 	if ((td->td_flags & (TDF_TIMEOUT | TDF_SINTR)) == TDF_SINTR) {
366 		if (TD_ON_SLEEPQ(td)) {
367 			unsleep(td);
368 			TD_CLR_SLEEPING(td);
369 			setrunnable(td);
370 		}
371 	}
372 }
373 
374 /*
375  * Remove a process from its wait queue
376  */
377 void
378 unsleep(struct thread *td)
379 {
380 
381 	mtx_lock_spin(&sched_lock);
382 	if (TD_ON_SLEEPQ(td)) {
383 		TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
384 		TD_CLR_ON_SLEEPQ(td);
385 		td->td_wmesg = NULL;
386 	}
387 	mtx_unlock_spin(&sched_lock);
388 }
389 
390 /*
391  * Make all processes sleeping on the specified identifier runnable.
392  */
393 void
394 wakeup(ident)
395 	register void *ident;
396 {
397 	register struct slpquehead *qp;
398 	register struct thread *td;
399 	struct thread *ntd;
400 	struct proc *p;
401 
402 	mtx_lock_spin(&sched_lock);
403 	qp = &slpque[LOOKUP(ident)];
404 restart:
405 	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
406 		ntd = TAILQ_NEXT(td, td_slpq);
407 		if (td->td_wchan == ident) {
408 			unsleep(td);
409 			TD_CLR_SLEEPING(td);
410 			setrunnable(td);
411 			p = td->td_proc;
412 			CTR3(KTR_PROC,"wakeup: thread %p (pid %d, %s)",
413 			    td, p->p_pid, p->p_comm);
414 			goto restart;
415 		}
416 	}
417 	mtx_unlock_spin(&sched_lock);
418 }
419 
420 /*
421  * Make a process sleeping on the specified identifier runnable.
422  * May wake more than one process if a target process is currently
423  * swapped out.
424  */
425 void
426 wakeup_one(ident)
427 	register void *ident;
428 {
429 	register struct slpquehead *qp;
430 	register struct thread *td;
431 	register struct proc *p;
432 	struct thread *ntd;
433 
434 	mtx_lock_spin(&sched_lock);
435 	qp = &slpque[LOOKUP(ident)];
436 	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
437 		ntd = TAILQ_NEXT(td, td_slpq);
438 		if (td->td_wchan == ident) {
439 			unsleep(td);
440 			TD_CLR_SLEEPING(td);
441 			setrunnable(td);
442 			p = td->td_proc;
443 			CTR3(KTR_PROC,"wakeup1: thread %p (pid %d, %s)",
444 			    td, p->p_pid, p->p_comm);
445 			break;
446 		}
447 	}
448 	mtx_unlock_spin(&sched_lock);
449 }
450 
451 /*
452  * The machine independent parts of mi_switch().
453  */
454 void
455 mi_switch(void)
456 {
457 	struct bintime new_switchtime;
458 	struct thread *td;
459 #if defined(__i386__) || defined(__sparc64__) || defined(__amd64__)
460 	struct thread *newtd;
461 #endif
462 	struct proc *p;
463 	u_int sched_nest;
464 
465 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
466 	td = curthread;			/* XXX */
467 	p = td->td_proc;		/* XXX */
468 	KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
469 #ifdef INVARIANTS
470 	if (!TD_ON_LOCK(td) &&
471 	    !TD_ON_RUNQ(td) &&
472 	    !TD_IS_RUNNING(td))
473 		mtx_assert(&Giant, MA_NOTOWNED);
474 #endif
475 	KASSERT(td->td_critnest == 1,
476 	    ("mi_switch: switch in a critical section"));
477 
478 	/*
479 	 * Compute the amount of time during which the current
480 	 * process was running, and add that to its total so far.
481 	 */
482 	binuptime(&new_switchtime);
483 	bintime_add(&p->p_runtime, &new_switchtime);
484 	bintime_sub(&p->p_runtime, PCPU_PTR(switchtime));
485 
486 #ifdef DDB
487 	/*
488 	 * Don't perform context switches from the debugger.
489 	 */
490 	if (db_active) {
491 		mtx_unlock_spin(&sched_lock);
492 		db_print_backtrace();
493 		db_error("Context switches not allowed in the debugger.");
494 	}
495 #endif
496 
497 	/*
498 	 * Check if the process exceeds its cpu resource allocation.  If
499 	 * over max, arrange to kill the process in ast().
500 	 */
501 	if (p->p_cpulimit != RLIM_INFINITY &&
502 	    p->p_runtime.sec > p->p_cpulimit) {
503 		p->p_sflag |= PS_XCPU;
504 		td->td_flags |= TDF_ASTPENDING;
505 	}
506 
507 	/*
508 	 * Finish up stats for outgoing thread.
509 	 */
510 	cnt.v_swtch++;
511 	PCPU_SET(switchtime, new_switchtime);
512 	CTR3(KTR_PROC, "mi_switch: old thread %p (pid %d, %s)", td, p->p_pid,
513 	    p->p_comm);
514 	sched_nest = sched_lock.mtx_recurse;
515 	if (td->td_proc->p_flag & P_THREADED)
516 		thread_switchout(td);
517 	sched_switchout(td);
518 
519 #if defined(__i386__) || defined(__sparc64__) || defined(__amd64__)
520 	newtd = choosethread();
521 	if (td != newtd)
522 		cpu_switch(td, newtd);	/* SHAZAM!! */
523 #ifdef SWTCH_OPTIM_STATS
524 	else
525 		stupid_switch++;
526 #endif
527 #else
528 	cpu_switch();		/* SHAZAM!!*/
529 #endif
530 
531 	sched_lock.mtx_recurse = sched_nest;
532 	sched_lock.mtx_lock = (uintptr_t)td;
533 	sched_switchin(td);
534 
535 	/*
536 	 * Start setting up stats etc. for the incoming thread.
537 	 * Similar code in fork_exit() is returned to by cpu_switch()
538 	 * in the case of a new thread/process.
539 	 */
540 	CTR3(KTR_PROC, "mi_switch: new thread %p (pid %d, %s)", td, p->p_pid,
541 	    p->p_comm);
542 	if (PCPU_GET(switchtime.sec) == 0)
543 		binuptime(PCPU_PTR(switchtime));
544 	PCPU_SET(switchticks, ticks);
545 
546 	/*
547 	 * Call the switchin function while still holding the scheduler lock
548 	 * (used by the idlezero code and the general page-zeroing code)
549 	 */
550 	if (td->td_switchin)
551 		td->td_switchin();
552 
553 	/*
554 	 * If the last thread was exiting, finish cleaning it up.
555 	 */
556 	if ((td = PCPU_GET(deadthread))) {
557 		PCPU_SET(deadthread, NULL);
558 		thread_stash(td);
559 	}
560 }
561 
562 /*
563  * Change process state to be runnable,
564  * placing it on the run queue if it is in memory,
565  * and awakening the swapper if it isn't in memory.
566  */
567 void
568 setrunnable(struct thread *td)
569 {
570 	struct proc *p = td->td_proc;
571 
572 	mtx_assert(&sched_lock, MA_OWNED);
573 	switch (p->p_state) {
574 	case PRS_ZOMBIE:
575 		panic("setrunnable(1)");
576 	default:
577 		break;
578 	}
579 	switch (td->td_state) {
580 	case TDS_RUNNING:
581 	case TDS_RUNQ:
582 		return;
583 	case TDS_INHIBITED:
584 		/*
585 		 * If we are only inhibited because we are swapped out
586 		 * then arange to swap in this process. Otherwise just return.
587 		 */
588 		if (td->td_inhibitors != TDI_SWAPPED)
589 			return;
590 	case TDS_CAN_RUN:
591 		break;
592 	default:
593 		printf("state is 0x%x", td->td_state);
594 		panic("setrunnable(2)");
595 	}
596 	if ((p->p_sflag & PS_INMEM) == 0) {
597 		if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
598 			p->p_sflag |= PS_SWAPINREQ;
599 			wakeup(&proc0);
600 		}
601 	} else
602 		sched_wakeup(td);
603 }
604 
605 /*
606  * Compute a tenex style load average of a quantity on
607  * 1, 5 and 15 minute intervals.
608  * XXXKSE   Needs complete rewrite when correct info is available.
609  * Completely Bogus.. only works with 1:1 (but compiles ok now :-)
610  */
611 static void
612 loadav(void *arg)
613 {
614 	int i, nrun;
615 	struct loadavg *avg;
616 	struct proc *p;
617 	struct thread *td;
618 
619 	avg = &averunnable;
620 	sx_slock(&allproc_lock);
621 	nrun = 0;
622 	FOREACH_PROC_IN_SYSTEM(p) {
623 		FOREACH_THREAD_IN_PROC(p, td) {
624 			switch (td->td_state) {
625 			case TDS_RUNQ:
626 			case TDS_RUNNING:
627 				if ((p->p_flag & P_NOLOAD) != 0)
628 					goto nextproc;
629 				nrun++; /* XXXKSE */
630 			default:
631 				break;
632 			}
633 nextproc:
634 			continue;
635 		}
636 	}
637 	sx_sunlock(&allproc_lock);
638 	for (i = 0; i < 3; i++)
639 		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
640 		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
641 
642 	/*
643 	 * Schedule the next update to occur after 5 seconds, but add a
644 	 * random variation to avoid synchronisation with processes that
645 	 * run at regular intervals.
646 	 */
647 	callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
648 	    loadav, NULL);
649 }
650 
651 static void
652 lboltcb(void *arg)
653 {
654 	wakeup(&lbolt);
655 	callout_reset(&lbolt_callout, hz, lboltcb, NULL);
656 }
657 
658 /* ARGSUSED */
659 static void
660 sched_setup(dummy)
661 	void *dummy;
662 {
663 	callout_init(&loadav_callout, 0);
664 	callout_init(&lbolt_callout, 1);
665 
666 	/* Kick off timeout driven events by calling first time. */
667 	loadav(NULL);
668 	lboltcb(NULL);
669 }
670 
671 /*
672  * General purpose yield system call
673  */
674 int
675 yield(struct thread *td, struct yield_args *uap)
676 {
677 	struct ksegrp *kg = td->td_ksegrp;
678 
679 	mtx_assert(&Giant, MA_NOTOWNED);
680 	mtx_lock_spin(&sched_lock);
681 	kg->kg_proc->p_stats->p_ru.ru_nvcsw++;
682 	sched_prio(td, PRI_MAX_TIMESHARE);
683 	mi_switch();
684 	mtx_unlock_spin(&sched_lock);
685 	td->td_retval[0] = 0;
686 
687 	return (0);
688 }
689 
690