xref: /freebsd/sys/kern/kern_synch.c (revision 6adf353a56a161443406b44a45d00c688ca7b857)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
39  * $FreeBSD$
40  */
41 
42 #include "opt_ktrace.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/condvar.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/proc.h>
52 #include <sys/resourcevar.h>
53 #include <sys/signalvar.h>
54 #include <sys/smp.h>
55 #include <sys/sx.h>
56 #include <sys/sysctl.h>
57 #include <sys/sysproto.h>
58 #include <sys/vmmeter.h>
59 #include <vm/vm.h>
60 #include <vm/vm_extern.h>
61 #ifdef KTRACE
62 #include <sys/uio.h>
63 #include <sys/ktrace.h>
64 #endif
65 
66 #include <machine/cpu.h>
67 
68 static void sched_setup __P((void *dummy));
69 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
70 
71 int	hogticks;
72 int	lbolt;
73 int	sched_quantum;		/* Roundrobin scheduling quantum in ticks. */
74 
75 static struct callout schedcpu_callout;
76 static struct callout roundrobin_callout;
77 
78 static void	endtsleep __P((void *));
79 static void	roundrobin __P((void *arg));
80 static void	schedcpu __P((void *arg));
81 
82 static int
83 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
84 {
85 	int error, new_val;
86 
87 	new_val = sched_quantum * tick;
88 	error = sysctl_handle_int(oidp, &new_val, 0, req);
89         if (error != 0 || req->newptr == NULL)
90 		return (error);
91 	if (new_val < tick)
92 		return (EINVAL);
93 	sched_quantum = new_val / tick;
94 	hogticks = 2 * sched_quantum;
95 	return (0);
96 }
97 
98 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
99 	0, sizeof sched_quantum, sysctl_kern_quantum, "I", "");
100 
101 /*
102  * Arrange to reschedule if necessary, taking the priorities and
103  * schedulers into account.
104  */
105 void
106 maybe_resched(p)
107 	struct proc *p;
108 {
109 
110 	mtx_assert(&sched_lock, MA_OWNED);
111 	if (p->p_pri.pri_level < curproc->p_pri.pri_level)
112 		need_resched(curproc);
113 }
114 
115 int
116 roundrobin_interval(void)
117 {
118 	return (sched_quantum);
119 }
120 
121 /*
122  * Force switch among equal priority processes every 100ms.
123  * We don't actually need to force a context switch of the current process.
124  * The act of firing the event triggers a context switch to softclock() and
125  * then switching back out again which is equivalent to a preemption, thus
126  * no further work is needed on the local CPU.
127  */
128 /* ARGSUSED */
129 static void
130 roundrobin(arg)
131 	void *arg;
132 {
133 
134 #ifdef SMP
135 	mtx_lock_spin(&sched_lock);
136 	forward_roundrobin();
137 	mtx_unlock_spin(&sched_lock);
138 #endif
139 
140 	callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
141 }
142 
143 /*
144  * Constants for digital decay and forget:
145  *	90% of (p_estcpu) usage in 5 * loadav time
146  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
147  *          Note that, as ps(1) mentions, this can let percentages
148  *          total over 100% (I've seen 137.9% for 3 processes).
149  *
150  * Note that schedclock() updates p_estcpu and p_cpticks asynchronously.
151  *
152  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
153  * That is, the system wants to compute a value of decay such
154  * that the following for loop:
155  * 	for (i = 0; i < (5 * loadavg); i++)
156  * 		p_estcpu *= decay;
157  * will compute
158  * 	p_estcpu *= 0.1;
159  * for all values of loadavg:
160  *
161  * Mathematically this loop can be expressed by saying:
162  * 	decay ** (5 * loadavg) ~= .1
163  *
164  * The system computes decay as:
165  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
166  *
167  * We wish to prove that the system's computation of decay
168  * will always fulfill the equation:
169  * 	decay ** (5 * loadavg) ~= .1
170  *
171  * If we compute b as:
172  * 	b = 2 * loadavg
173  * then
174  * 	decay = b / (b + 1)
175  *
176  * We now need to prove two things:
177  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
178  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
179  *
180  * Facts:
181  *         For x close to zero, exp(x) =~ 1 + x, since
182  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
183  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
184  *         For x close to zero, ln(1+x) =~ x, since
185  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
186  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
187  *         ln(.1) =~ -2.30
188  *
189  * Proof of (1):
190  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
191  *	solving for factor,
192  *      ln(factor) =~ (-2.30/5*loadav), or
193  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
194  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
195  *
196  * Proof of (2):
197  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
198  *	solving for power,
199  *      power*ln(b/(b+1)) =~ -2.30, or
200  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
201  *
202  * Actual power values for the implemented algorithm are as follows:
203  *      loadav: 1       2       3       4
204  *      power:  5.68    10.32   14.94   19.55
205  */
206 
207 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
208 #define	loadfactor(loadav)	(2 * (loadav))
209 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
210 
211 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
212 static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
213 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
214 
215 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
216 static int	fscale __unused = FSCALE;
217 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
218 
219 /*
220  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
221  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
222  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
223  *
224  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
225  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
226  *
227  * If you don't want to bother with the faster/more-accurate formula, you
228  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
229  * (more general) method of calculating the %age of CPU used by a process.
230  */
231 #define	CCPU_SHIFT	11
232 
233 /*
234  * Recompute process priorities, every hz ticks.
235  * MP-safe, called without the Giant mutex.
236  */
237 /* ARGSUSED */
238 static void
239 schedcpu(arg)
240 	void *arg;
241 {
242 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
243 	register struct proc *p;
244 	register int realstathz;
245 
246 	realstathz = stathz ? stathz : hz;
247 	sx_slock(&allproc_lock);
248 	LIST_FOREACH(p, &allproc, p_list) {
249 		/*
250 		 * Increment time in/out of memory and sleep time
251 		 * (if sleeping).  We ignore overflow; with 16-bit int's
252 		 * (remember them?) overflow takes 45 days.
253 		 */
254 		mtx_lock_spin(&sched_lock);
255 		p->p_swtime++;
256 		if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
257 			p->p_slptime++;
258 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
259 		/*
260 		 * If the process has slept the entire second,
261 		 * stop recalculating its priority until it wakes up.
262 		 */
263 		if (p->p_slptime > 1) {
264 			mtx_unlock_spin(&sched_lock);
265 			continue;
266 		}
267 
268 		/*
269 		 * p_pctcpu is only for ps.
270 		 */
271 #if	(FSHIFT >= CCPU_SHIFT)
272 		p->p_pctcpu += (realstathz == 100)?
273 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
274                 	100 * (((fixpt_t) p->p_cpticks)
275 				<< (FSHIFT - CCPU_SHIFT)) / realstathz;
276 #else
277 		p->p_pctcpu += ((FSCALE - ccpu) *
278 			(p->p_cpticks * FSCALE / realstathz)) >> FSHIFT;
279 #endif
280 		p->p_cpticks = 0;
281 		p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
282 		resetpriority(p);
283 		if (p->p_pri.pri_level >= PUSER) {
284 			if (p->p_oncpu == NOCPU && 	/* idle */
285 			    p->p_stat == SRUN &&
286 			    (p->p_sflag & PS_INMEM) &&
287 			    (p->p_pri.pri_level / RQ_PPQ) !=
288 			    (p->p_pri.pri_user / RQ_PPQ)) {
289 				remrunqueue(p);
290 				p->p_pri.pri_level = p->p_pri.pri_user;
291 				setrunqueue(p);
292 			} else
293 				p->p_pri.pri_level = p->p_pri.pri_user;
294 		}
295 		mtx_unlock_spin(&sched_lock);
296 	}
297 	sx_sunlock(&allproc_lock);
298 	vmmeter();
299 	wakeup((caddr_t)&lbolt);
300 	callout_reset(&schedcpu_callout, hz, schedcpu, NULL);
301 }
302 
303 /*
304  * Recalculate the priority of a process after it has slept for a while.
305  * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
306  * least six times the loadfactor will decay p_estcpu to zero.
307  */
308 void
309 updatepri(p)
310 	register struct proc *p;
311 {
312 	register unsigned int newcpu = p->p_estcpu;
313 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
314 
315 	if (p->p_slptime > 5 * loadfac)
316 		p->p_estcpu = 0;
317 	else {
318 		p->p_slptime--;	/* the first time was done in schedcpu */
319 		while (newcpu && --p->p_slptime)
320 			newcpu = decay_cpu(loadfac, newcpu);
321 		p->p_estcpu = newcpu;
322 	}
323 	resetpriority(p);
324 }
325 
326 /*
327  * We're only looking at 7 bits of the address; everything is
328  * aligned to 4, lots of things are aligned to greater powers
329  * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
330  */
331 #define TABLESIZE	128
332 static TAILQ_HEAD(slpquehead, proc) slpque[TABLESIZE];
333 #define LOOKUP(x)	(((intptr_t)(x) >> 8) & (TABLESIZE - 1))
334 
335 void
336 sleepinit(void)
337 {
338 	int i;
339 
340 	sched_quantum = hz/10;
341 	hogticks = 2 * sched_quantum;
342 	for (i = 0; i < TABLESIZE; i++)
343 		TAILQ_INIT(&slpque[i]);
344 }
345 
346 /*
347  * General sleep call.  Suspends the current process until a wakeup is
348  * performed on the specified identifier.  The process will then be made
349  * runnable with the specified priority.  Sleeps at most timo/hz seconds
350  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
351  * before and after sleeping, else signals are not checked.  Returns 0 if
352  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
353  * signal needs to be delivered, ERESTART is returned if the current system
354  * call should be restarted if possible, and EINTR is returned if the system
355  * call should be interrupted by the signal (return EINTR).
356  *
357  * The mutex argument is exited before the caller is suspended, and
358  * entered before msleep returns.  If priority includes the PDROP
359  * flag the mutex is not entered before returning.
360  */
361 int
362 msleep(ident, mtx, priority, wmesg, timo)
363 	void *ident;
364 	struct mtx *mtx;
365 	int priority, timo;
366 	const char *wmesg;
367 {
368 	struct proc *p = curproc;
369 	int sig, catch = priority & PCATCH;
370 	int rval = 0;
371 	WITNESS_SAVE_DECL(mtx);
372 
373 #ifdef KTRACE
374 	if (p && KTRPOINT(p, KTR_CSW))
375 		ktrcsw(p->p_tracep, 1, 0);
376 #endif
377 	WITNESS_SLEEP(0, &mtx->mtx_object);
378 	KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL,
379 	    ("sleeping without a mutex"));
380 	mtx_lock_spin(&sched_lock);
381 	if (cold || panicstr) {
382 		/*
383 		 * After a panic, or during autoconfiguration,
384 		 * just give interrupts a chance, then just return;
385 		 * don't run any other procs or panic below,
386 		 * in case this is the idle process and already asleep.
387 		 */
388 		if (mtx != NULL && priority & PDROP)
389 			mtx_unlock_flags(mtx, MTX_NOSWITCH);
390 		mtx_unlock_spin(&sched_lock);
391 		return (0);
392 	}
393 
394 	DROP_GIANT_NOSWITCH();
395 
396 	if (mtx != NULL) {
397 		mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
398 		WITNESS_SAVE(&mtx->mtx_object, mtx);
399 		mtx_unlock_flags(mtx, MTX_NOSWITCH);
400 		if (priority & PDROP)
401 			mtx = NULL;
402 	}
403 
404 	KASSERT(p != NULL, ("msleep1"));
405 	KASSERT(ident != NULL && p->p_stat == SRUN, ("msleep"));
406 	/*
407 	 * Process may be sitting on a slpque if asleep() was called, remove
408 	 * it before re-adding.
409 	 */
410 	if (p->p_wchan != NULL)
411 		unsleep(p);
412 
413 	p->p_wchan = ident;
414 	p->p_wmesg = wmesg;
415 	p->p_slptime = 0;
416 	p->p_pri.pri_level = priority & PRIMASK;
417 	CTR5(KTR_PROC, "msleep: proc %p (pid %d, %s) on %s (%p)", p, p->p_pid,
418 	    p->p_comm, wmesg, ident);
419 	TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_slpq);
420 	if (timo)
421 		callout_reset(&p->p_slpcallout, timo, endtsleep, p);
422 	/*
423 	 * We put ourselves on the sleep queue and start our timeout
424 	 * before calling CURSIG, as we could stop there, and a wakeup
425 	 * or a SIGCONT (or both) could occur while we were stopped.
426 	 * A SIGCONT would cause us to be marked as SSLEEP
427 	 * without resuming us, thus we must be ready for sleep
428 	 * when CURSIG is called.  If the wakeup happens while we're
429 	 * stopped, p->p_wchan will be 0 upon return from CURSIG.
430 	 */
431 	if (catch) {
432 		CTR3(KTR_PROC, "msleep caught: proc %p (pid %d, %s)", p,
433 		    p->p_pid, p->p_comm);
434 		p->p_sflag |= PS_SINTR;
435 		mtx_unlock_spin(&sched_lock);
436 		PROC_LOCK(p);
437 		sig = CURSIG(p);
438 		mtx_lock_spin(&sched_lock);
439 		PROC_UNLOCK_NOSWITCH(p);
440 		if (sig != 0) {
441 			if (p->p_wchan)
442 				unsleep(p);
443 		} else if (p->p_wchan == NULL)
444 			catch = 0;
445 	} else
446 		sig = 0;
447 	if (p->p_wchan != NULL) {
448 		p->p_stat = SSLEEP;
449 		p->p_stats->p_ru.ru_nvcsw++;
450 		mi_switch();
451 	}
452 	CTR3(KTR_PROC, "msleep resume: proc %p (pid %d, %s)", p, p->p_pid,
453 	    p->p_comm);
454 	KASSERT(p->p_stat == SRUN, ("running but not SRUN"));
455 	p->p_sflag &= ~PS_SINTR;
456 	if (p->p_sflag & PS_TIMEOUT) {
457 		p->p_sflag &= ~PS_TIMEOUT;
458 		if (sig == 0)
459 			rval = EWOULDBLOCK;
460 	} else if (timo)
461 		callout_stop(&p->p_slpcallout);
462 	mtx_unlock_spin(&sched_lock);
463 
464 	if (rval == 0 && catch) {
465 		PROC_LOCK(p);
466 		/* XXX: shouldn't we always be calling CURSIG() */
467 		if (sig != 0 || (sig = CURSIG(p))) {
468 			if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
469 				rval = EINTR;
470 			else
471 				rval = ERESTART;
472 		}
473 		PROC_UNLOCK(p);
474 	}
475 	PICKUP_GIANT();
476 #ifdef KTRACE
477 	mtx_lock(&Giant);
478 	if (KTRPOINT(p, KTR_CSW))
479 		ktrcsw(p->p_tracep, 0, 0);
480 	mtx_unlock(&Giant);
481 #endif
482 	if (mtx != NULL) {
483 		mtx_lock(mtx);
484 		WITNESS_RESTORE(&mtx->mtx_object, mtx);
485 	}
486 	return (rval);
487 }
488 
489 /*
490  * asleep() - async sleep call.  Place process on wait queue and return
491  * immediately without blocking.  The process stays runnable until await()
492  * is called.  If ident is NULL, remove process from wait queue if it is still
493  * on one.
494  *
495  * Only the most recent sleep condition is effective when making successive
496  * calls to asleep() or when calling msleep().
497  *
498  * The timeout, if any, is not initiated until await() is called.  The sleep
499  * priority, signal, and timeout is specified in the asleep() call but may be
500  * overriden in the await() call.
501  *
502  * <<<<<<<< EXPERIMENTAL, UNTESTED >>>>>>>>>>
503  */
504 
505 int
506 asleep(void *ident, int priority, const char *wmesg, int timo)
507 {
508 	struct proc *p = curproc;
509 
510 	/*
511 	 * Remove preexisting wait condition (if any) and place process
512 	 * on appropriate slpque, but do not put process to sleep.
513 	 */
514 
515 	mtx_lock_spin(&sched_lock);
516 
517 	if (p->p_wchan != NULL)
518 		unsleep(p);
519 
520 	if (ident) {
521 		p->p_wchan = ident;
522 		p->p_wmesg = wmesg;
523 		p->p_slptime = 0;
524 		p->p_asleep.as_priority = priority;
525 		p->p_asleep.as_timo = timo;
526 		TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_slpq);
527 	}
528 
529 	mtx_unlock_spin(&sched_lock);
530 
531 	return(0);
532 }
533 
534 /*
535  * await() - wait for async condition to occur.   The process blocks until
536  * wakeup() is called on the most recent asleep() address.  If wakeup is called
537  * prior to await(), await() winds up being a NOP.
538  *
539  * If await() is called more then once (without an intervening asleep() call),
540  * await() is still effectively a NOP but it calls mi_switch() to give other
541  * processes some cpu before returning.  The process is left runnable.
542  *
543  * <<<<<<<< EXPERIMENTAL, UNTESTED >>>>>>>>>>
544  */
545 
546 int
547 await(int priority, int timo)
548 {
549 	struct proc *p = curproc;
550 	int rval = 0;
551 
552 	WITNESS_SLEEP(0, NULL);
553 	mtx_lock_spin(&sched_lock);
554 	if (cold || panicstr) {
555 		/*
556 		 * After a panic, or during autoconfiguration,
557 		 * just give interrupts a chance, then just return;
558 		 * don't run any other procs or panic below,
559 		 * in case this is the idle process and already asleep.
560 		 */
561 		mtx_unlock_spin(&sched_lock);
562 		return (0);
563 	}
564 	DROP_GIANT_NOSWITCH();
565 
566 	if (p->p_wchan != NULL) {
567 		int sig;
568 		int catch;
569 
570 #ifdef KTRACE
571 		if (p && KTRPOINT(p, KTR_CSW))
572 			ktrcsw(p->p_tracep, 1, 0);
573 #endif
574 		/*
575 		 * The call to await() can override defaults specified in
576 		 * the original asleep().
577 		 */
578 		if (priority < 0)
579 			priority = p->p_asleep.as_priority;
580 		if (timo < 0)
581 			timo = p->p_asleep.as_timo;
582 
583 		/*
584 		 * Install timeout
585 		 */
586 
587 		if (timo)
588 			callout_reset(&p->p_slpcallout, timo, endtsleep, p);
589 
590 		sig = 0;
591 		catch = priority & PCATCH;
592 
593 		if (catch) {
594 			p->p_sflag |= PS_SINTR;
595 			mtx_unlock_spin(&sched_lock);
596 			PROC_LOCK(p);
597 			sig = CURSIG(p);
598 			mtx_lock_spin(&sched_lock);
599 			PROC_UNLOCK_NOSWITCH(p);
600 			if (sig != 0) {
601 				if (p->p_wchan)
602 					unsleep(p);
603 			} else if (p->p_wchan == NULL)
604 				catch = 0;
605 		}
606 		if (p->p_wchan != NULL) {
607 			p->p_stat = SSLEEP;
608 			p->p_stats->p_ru.ru_nvcsw++;
609 			mi_switch();
610 		}
611 		KASSERT(p->p_stat == SRUN, ("running but not SRUN"));
612 		p->p_sflag &= ~PS_SINTR;
613 		if (p->p_sflag & PS_TIMEOUT) {
614 			p->p_sflag &= ~PS_TIMEOUT;
615 			if (sig == 0)
616 				rval = EWOULDBLOCK;
617 		} else if (timo)
618 			callout_stop(&p->p_slpcallout);
619 		mtx_unlock_spin(&sched_lock);
620 		if (rval == 0 && catch) {
621 			PROC_LOCK(p);
622 			if (sig != 0 || (sig = CURSIG(p))) {
623 				if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
624 					rval = EINTR;
625 				else
626 					rval = ERESTART;
627 			}
628 			PROC_UNLOCK(p);
629 		}
630 #ifdef KTRACE
631 		mtx_lock(&Giant);
632 		if (KTRPOINT(p, KTR_CSW))
633 			ktrcsw(p->p_tracep, 0, 0);
634 		mtx_unlock(&Giant);
635 #endif
636 	} else {
637 		/*
638 		 * If as_priority is 0, await() has been called without an
639 		 * intervening asleep().  We are still effectively a NOP,
640 		 * but we call mi_switch() for safety.
641 		 */
642 
643 		if (p->p_asleep.as_priority == 0) {
644 			p->p_stats->p_ru.ru_nvcsw++;
645 			mi_switch();
646 		}
647 		mtx_unlock_spin(&sched_lock);
648 	}
649 
650 	/*
651 	 * clear p_asleep.as_priority as an indication that await() has been
652 	 * called.  If await() is called again without an intervening asleep(),
653 	 * await() is still effectively a NOP but the above mi_switch() code
654 	 * is triggered as a safety.
655 	 */
656 	if (rval == 0)
657 		p->p_asleep.as_priority = 0;
658 
659 	PICKUP_GIANT();
660 	return (rval);
661 }
662 
663 /*
664  * Implement timeout for msleep or asleep()/await()
665  *
666  * If process hasn't been awakened (wchan non-zero),
667  * set timeout flag and undo the sleep.  If proc
668  * is stopped, just unsleep so it will remain stopped.
669  * MP-safe, called without the Giant mutex.
670  */
671 static void
672 endtsleep(arg)
673 	void *arg;
674 {
675 	register struct proc *p;
676 
677 	p = (struct proc *)arg;
678 	CTR3(KTR_PROC, "endtsleep: proc %p (pid %d, %s)", p, p->p_pid,
679 	    p->p_comm);
680 	mtx_lock_spin(&sched_lock);
681 	if (p->p_wchan) {
682 		if (p->p_stat == SSLEEP)
683 			setrunnable(p);
684 		else
685 			unsleep(p);
686 		p->p_sflag |= PS_TIMEOUT;
687 	}
688 	mtx_unlock_spin(&sched_lock);
689 }
690 
691 /*
692  * Remove a process from its wait queue
693  */
694 void
695 unsleep(p)
696 	register struct proc *p;
697 {
698 
699 	mtx_lock_spin(&sched_lock);
700 	if (p->p_wchan) {
701 		TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_slpq);
702 		p->p_wchan = NULL;
703 	}
704 	mtx_unlock_spin(&sched_lock);
705 }
706 
707 /*
708  * Make all processes sleeping on the specified identifier runnable.
709  */
710 void
711 wakeup(ident)
712 	register void *ident;
713 {
714 	register struct slpquehead *qp;
715 	register struct proc *p;
716 
717 	mtx_lock_spin(&sched_lock);
718 	qp = &slpque[LOOKUP(ident)];
719 restart:
720 	TAILQ_FOREACH(p, qp, p_slpq) {
721 		if (p->p_wchan == ident) {
722 			TAILQ_REMOVE(qp, p, p_slpq);
723 			p->p_wchan = NULL;
724 			if (p->p_stat == SSLEEP) {
725 				/* OPTIMIZED EXPANSION OF setrunnable(p); */
726 				CTR3(KTR_PROC, "wakeup: proc %p (pid %d, %s)",
727 				    p, p->p_pid, p->p_comm);
728 				if (p->p_slptime > 1)
729 					updatepri(p);
730 				p->p_slptime = 0;
731 				p->p_stat = SRUN;
732 				if (p->p_sflag & PS_INMEM) {
733 					setrunqueue(p);
734 					maybe_resched(p);
735 				} else {
736 					p->p_sflag |= PS_SWAPINREQ;
737 					wakeup((caddr_t)&proc0);
738 				}
739 				/* END INLINE EXPANSION */
740 				goto restart;
741 			}
742 		}
743 	}
744 	mtx_unlock_spin(&sched_lock);
745 }
746 
747 /*
748  * Make a process sleeping on the specified identifier runnable.
749  * May wake more than one process if a target process is currently
750  * swapped out.
751  */
752 void
753 wakeup_one(ident)
754 	register void *ident;
755 {
756 	register struct slpquehead *qp;
757 	register struct proc *p;
758 
759 	mtx_lock_spin(&sched_lock);
760 	qp = &slpque[LOOKUP(ident)];
761 
762 	TAILQ_FOREACH(p, qp, p_slpq) {
763 		if (p->p_wchan == ident) {
764 			TAILQ_REMOVE(qp, p, p_slpq);
765 			p->p_wchan = NULL;
766 			if (p->p_stat == SSLEEP) {
767 				/* OPTIMIZED EXPANSION OF setrunnable(p); */
768 				CTR3(KTR_PROC, "wakeup1: proc %p (pid %d, %s)",
769 				    p, p->p_pid, p->p_comm);
770 				if (p->p_slptime > 1)
771 					updatepri(p);
772 				p->p_slptime = 0;
773 				p->p_stat = SRUN;
774 				if (p->p_sflag & PS_INMEM) {
775 					setrunqueue(p);
776 					maybe_resched(p);
777 					break;
778 				} else {
779 					p->p_sflag |= PS_SWAPINREQ;
780 					wakeup((caddr_t)&proc0);
781 				}
782 				/* END INLINE EXPANSION */
783 			}
784 		}
785 	}
786 	mtx_unlock_spin(&sched_lock);
787 }
788 
789 /*
790  * The machine independent parts of mi_switch().
791  */
792 void
793 mi_switch()
794 {
795 	struct timeval new_switchtime;
796 	register struct proc *p = curproc;	/* XXX */
797 #if 0
798 	register struct rlimit *rlim;
799 #endif
800 	critical_t sched_crit;
801 	u_int sched_nest;
802 
803 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
804 
805 	/*
806 	 * Compute the amount of time during which the current
807 	 * process was running, and add that to its total so far.
808 	 */
809 	microuptime(&new_switchtime);
810 	if (timevalcmp(&new_switchtime, PCPU_PTR(switchtime), <)) {
811 #if 0
812 		/* XXX: This doesn't play well with sched_lock right now. */
813 		printf("microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n",
814 		    PCPU_GET(switchtime.tv_sec), PCPU_GET(switchtime.tv_usec),
815 		    new_switchtime.tv_sec, new_switchtime.tv_usec);
816 #endif
817 		new_switchtime = PCPU_GET(switchtime);
818 	} else {
819 		p->p_runtime += (new_switchtime.tv_usec - PCPU_GET(switchtime.tv_usec)) +
820 		    (new_switchtime.tv_sec - PCPU_GET(switchtime.tv_sec)) *
821 		    (int64_t)1000000;
822 	}
823 
824 #if 0
825 	/*
826 	 * Check if the process exceeds its cpu resource allocation.
827 	 * If over max, kill it.
828 	 *
829 	 * XXX drop sched_lock, pickup Giant
830 	 */
831 	if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY &&
832 	    p->p_runtime > p->p_limit->p_cpulimit) {
833 		rlim = &p->p_rlimit[RLIMIT_CPU];
834 		if (p->p_runtime / (rlim_t)1000000 >= rlim->rlim_max) {
835 			mtx_unlock_spin(&sched_lock);
836 			PROC_LOCK(p);
837 			killproc(p, "exceeded maximum CPU limit");
838 			mtx_lock_spin(&sched_lock);
839 			PROC_UNLOCK_NOSWITCH(p);
840 		} else {
841 			mtx_unlock_spin(&sched_lock);
842 			PROC_LOCK(p);
843 			psignal(p, SIGXCPU);
844 			mtx_lock_spin(&sched_lock);
845 			PROC_UNLOCK_NOSWITCH(p);
846 			if (rlim->rlim_cur < rlim->rlim_max) {
847 				/* XXX: we should make a private copy */
848 				rlim->rlim_cur += 5;
849 			}
850 		}
851 	}
852 #endif
853 
854 	/*
855 	 * Pick a new current process and record its start time.
856 	 */
857 	cnt.v_swtch++;
858 	PCPU_SET(switchtime, new_switchtime);
859 	CTR3(KTR_PROC, "mi_switch: old proc %p (pid %d, %s)", p, p->p_pid,
860 	    p->p_comm);
861 	sched_crit = sched_lock.mtx_savecrit;
862 	sched_nest = sched_lock.mtx_recurse;
863 	p->p_lastcpu = p->p_oncpu;
864 	p->p_oncpu = NOCPU;
865 	clear_resched(p);
866 	cpu_switch();
867 	p->p_oncpu = PCPU_GET(cpuid);
868 	sched_lock.mtx_savecrit = sched_crit;
869 	sched_lock.mtx_recurse = sched_nest;
870 	sched_lock.mtx_lock = (uintptr_t)p;
871 	CTR3(KTR_PROC, "mi_switch: new proc %p (pid %d, %s)", p, p->p_pid,
872 	    p->p_comm);
873 	if (PCPU_GET(switchtime.tv_sec) == 0)
874 		microuptime(PCPU_PTR(switchtime));
875 	PCPU_SET(switchticks, ticks);
876 }
877 
878 /*
879  * Change process state to be runnable,
880  * placing it on the run queue if it is in memory,
881  * and awakening the swapper if it isn't in memory.
882  */
883 void
884 setrunnable(p)
885 	register struct proc *p;
886 {
887 
888 	mtx_lock_spin(&sched_lock);
889 	switch (p->p_stat) {
890 	case 0:
891 	case SRUN:
892 	case SZOMB:
893 	case SWAIT:
894 	default:
895 		panic("setrunnable");
896 	case SSTOP:
897 	case SSLEEP:			/* e.g. when sending signals */
898 		if (p->p_sflag & PS_CVWAITQ)
899 			cv_waitq_remove(p);
900 		else
901 			unsleep(p);
902 		break;
903 
904 	case SIDL:
905 		break;
906 	}
907 	p->p_stat = SRUN;
908 	if (p->p_slptime > 1)
909 		updatepri(p);
910 	p->p_slptime = 0;
911 	if ((p->p_sflag & PS_INMEM) == 0) {
912 		p->p_sflag |= PS_SWAPINREQ;
913 		wakeup((caddr_t)&proc0);
914 	} else {
915 		setrunqueue(p);
916 		maybe_resched(p);
917 	}
918 	mtx_unlock_spin(&sched_lock);
919 }
920 
921 /*
922  * Compute the priority of a process when running in user mode.
923  * Arrange to reschedule if the resulting priority is better
924  * than that of the current process.
925  */
926 void
927 resetpriority(p)
928 	register struct proc *p;
929 {
930 	register unsigned int newpriority;
931 
932 	mtx_lock_spin(&sched_lock);
933 	if (p->p_pri.pri_class == PRI_TIMESHARE) {
934 		newpriority = PUSER + p->p_estcpu / INVERSE_ESTCPU_WEIGHT +
935 		    NICE_WEIGHT * (p->p_nice - PRIO_MIN);
936 		newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
937 		    PRI_MAX_TIMESHARE);
938 		p->p_pri.pri_user = newpriority;
939 	}
940 	maybe_resched(p);
941 	mtx_unlock_spin(&sched_lock);
942 }
943 
944 /* ARGSUSED */
945 static void
946 sched_setup(dummy)
947 	void *dummy;
948 {
949 
950 	callout_init(&schedcpu_callout, 1);
951 	callout_init(&roundrobin_callout, 0);
952 
953 	/* Kick off timeout driven events by calling first time. */
954 	roundrobin(NULL);
955 	schedcpu(NULL);
956 }
957 
958 /*
959  * We adjust the priority of the current process.  The priority of
960  * a process gets worse as it accumulates CPU time.  The cpu usage
961  * estimator (p_estcpu) is increased here.  resetpriority() will
962  * compute a different priority each time p_estcpu increases by
963  * INVERSE_ESTCPU_WEIGHT
964  * (until MAXPRI is reached).  The cpu usage estimator ramps up
965  * quite quickly when the process is running (linearly), and decays
966  * away exponentially, at a rate which is proportionally slower when
967  * the system is busy.  The basic principle is that the system will
968  * 90% forget that the process used a lot of CPU time in 5 * loadav
969  * seconds.  This causes the system to favor processes which haven't
970  * run much recently, and to round-robin among other processes.
971  */
972 void
973 schedclock(p)
974 	struct proc *p;
975 {
976 
977 	p->p_cpticks++;
978 	p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
979 	if ((p->p_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
980 		resetpriority(p);
981 		if (p->p_pri.pri_level >= PUSER)
982 			p->p_pri.pri_level = p->p_pri.pri_user;
983 	}
984 }
985 
986 /*
987  * General purpose yield system call
988  */
989 int
990 yield(struct proc *p, struct yield_args *uap)
991 {
992 
993 	p->p_retval[0] = 0;
994 
995 	mtx_lock_spin(&sched_lock);
996 	DROP_GIANT_NOSWITCH();
997 	p->p_pri.pri_level = PRI_MAX_TIMESHARE;
998 	setrunqueue(p);
999 	p->p_stats->p_ru.ru_nvcsw++;
1000 	mi_switch();
1001 	mtx_unlock_spin(&sched_lock);
1002 	PICKUP_GIANT();
1003 
1004 	return (0);
1005 }
1006