xref: /freebsd/sys/kern/kern_synch.c (revision ae83180158c4c937f170e31eff311b18c0286a93)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
39  * $FreeBSD$
40  */
41 
42 #include "opt_ddb.h"
43 #include "opt_ktrace.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/condvar.h>
48 #include <sys/kernel.h>
49 #include <sys/ktr.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/resourcevar.h>
54 #include <sys/signalvar.h>
55 #include <sys/smp.h>
56 #include <sys/sx.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysproto.h>
59 #include <sys/vmmeter.h>
60 #ifdef DDB
61 #include <ddb/ddb.h>
62 #endif
63 #ifdef KTRACE
64 #include <sys/uio.h>
65 #include <sys/ktrace.h>
66 #endif
67 
68 #include <machine/cpu.h>
69 
70 static void sched_setup(void *dummy);
71 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
72 
73 int	hogticks;
74 int	lbolt;
75 int	sched_quantum;		/* Roundrobin scheduling quantum in ticks. */
76 
77 static struct callout loadav_callout;
78 static struct callout schedcpu_callout;
79 static struct callout roundrobin_callout;
80 
81 struct loadavg averunnable =
82 	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
83 /*
84  * Constants for averages over 1, 5, and 15 minutes
85  * when sampling at 5 second intervals.
86  */
87 static fixpt_t cexp[3] = {
88 	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
89 	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
90 	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
91 };
92 
93 static void	endtsleep(void *);
94 static void	loadav(void *arg);
95 static void	roundrobin(void *arg);
96 static void	schedcpu(void *arg);
97 
98 static int
99 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
100 {
101 	int error, new_val;
102 
103 	new_val = sched_quantum * tick;
104 	error = sysctl_handle_int(oidp, &new_val, 0, req);
105         if (error != 0 || req->newptr == NULL)
106 		return (error);
107 	if (new_val < tick)
108 		return (EINVAL);
109 	sched_quantum = new_val / tick;
110 	hogticks = 2 * sched_quantum;
111 	return (0);
112 }
113 
114 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
115 	0, sizeof sched_quantum, sysctl_kern_quantum, "I",
116 	"Roundrobin scheduling quantum in microseconds");
117 
118 /*
119  * Arrange to reschedule if necessary, taking the priorities and
120  * schedulers into account.
121  */
122 void
123 maybe_resched(struct thread *td)
124 {
125 
126 	mtx_assert(&sched_lock, MA_OWNED);
127 	if (td->td_priority < curthread->td_priority)
128 		curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
129 }
130 
131 int
132 roundrobin_interval(void)
133 {
134 	return (sched_quantum);
135 }
136 
137 /*
138  * Force switch among equal priority processes every 100ms.
139  * We don't actually need to force a context switch of the current process.
140  * The act of firing the event triggers a context switch to softclock() and
141  * then switching back out again which is equivalent to a preemption, thus
142  * no further work is needed on the local CPU.
143  */
144 /* ARGSUSED */
145 static void
146 roundrobin(arg)
147 	void *arg;
148 {
149 
150 #ifdef SMP
151 	mtx_lock_spin(&sched_lock);
152 	forward_roundrobin();
153 	mtx_unlock_spin(&sched_lock);
154 #endif
155 
156 	callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
157 }
158 
159 /*
160  * Constants for digital decay and forget:
161  *	90% of (p_estcpu) usage in 5 * loadav time
162  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
163  *          Note that, as ps(1) mentions, this can let percentages
164  *          total over 100% (I've seen 137.9% for 3 processes).
165  *
166  * Note that schedclock() updates p_estcpu and p_cpticks asynchronously.
167  *
168  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
169  * That is, the system wants to compute a value of decay such
170  * that the following for loop:
171  * 	for (i = 0; i < (5 * loadavg); i++)
172  * 		p_estcpu *= decay;
173  * will compute
174  * 	p_estcpu *= 0.1;
175  * for all values of loadavg:
176  *
177  * Mathematically this loop can be expressed by saying:
178  * 	decay ** (5 * loadavg) ~= .1
179  *
180  * The system computes decay as:
181  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
182  *
183  * We wish to prove that the system's computation of decay
184  * will always fulfill the equation:
185  * 	decay ** (5 * loadavg) ~= .1
186  *
187  * If we compute b as:
188  * 	b = 2 * loadavg
189  * then
190  * 	decay = b / (b + 1)
191  *
192  * We now need to prove two things:
193  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
194  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
195  *
196  * Facts:
197  *         For x close to zero, exp(x) =~ 1 + x, since
198  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
199  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
200  *         For x close to zero, ln(1+x) =~ x, since
201  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
202  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
203  *         ln(.1) =~ -2.30
204  *
205  * Proof of (1):
206  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
207  *	solving for factor,
208  *      ln(factor) =~ (-2.30/5*loadav), or
209  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
210  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
211  *
212  * Proof of (2):
213  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
214  *	solving for power,
215  *      power*ln(b/(b+1)) =~ -2.30, or
216  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
217  *
218  * Actual power values for the implemented algorithm are as follows:
219  *      loadav: 1       2       3       4
220  *      power:  5.68    10.32   14.94   19.55
221  */
222 
223 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
224 #define	loadfactor(loadav)	(2 * (loadav))
225 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
226 
227 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
228 static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
229 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
230 
231 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
232 static int	fscale __unused = FSCALE;
233 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
234 
235 /*
236  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
237  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
238  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
239  *
240  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
241  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
242  *
243  * If you don't want to bother with the faster/more-accurate formula, you
244  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
245  * (more general) method of calculating the %age of CPU used by a process.
246  */
247 #define	CCPU_SHIFT	11
248 
249 /*
250  * Recompute process priorities, every hz ticks.
251  * MP-safe, called without the Giant mutex.
252  */
253 /* ARGSUSED */
254 static void
255 schedcpu(arg)
256 	void *arg;
257 {
258 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
259 	struct thread *td;
260 	struct proc *p;
261 	struct kse *ke;
262 	struct ksegrp *kg;
263 	int realstathz;
264 	int awake;
265 
266 	realstathz = stathz ? stathz : hz;
267 	sx_slock(&allproc_lock);
268 	FOREACH_PROC_IN_SYSTEM(p) {
269 		mtx_lock_spin(&sched_lock);
270 		p->p_swtime++;
271 		FOREACH_KSEGRP_IN_PROC(p, kg) {
272 			awake = 0;
273 			FOREACH_KSE_IN_GROUP(kg, ke) {
274 				/*
275 				 * Increment time in/out of memory and sleep
276 				 * time (if sleeping).  We ignore overflow;
277 				 * with 16-bit int's (remember them?)
278 				 * overflow takes 45 days.
279 				 */
280 				/*
281 				 * The kse slptimes are not touched in wakeup
282 				 * because the thread may not HAVE a KSE.
283 				 */
284 				if ((ke->ke_state == KES_ONRUNQ) ||
285 				    ((ke->ke_state == KES_THREAD) &&
286 				    (ke->ke_thread->td_state == TDS_RUNNING))) {
287 					ke->ke_slptime = 0;
288 					awake = 1;
289 				} else {
290 					/* XXXKSE
291 					 * This is probably a pointless
292 					 * statistic in a KSE world.
293 					 */
294 					ke->ke_slptime++;
295 				}
296 
297 				/*
298 				 * pctcpu is only for ps?
299 				 * Do it per kse.. and add them up at the end?
300 				 * XXXKSE
301 				 */
302 				ke->ke_pctcpu
303 				    = (ke->ke_pctcpu * ccpu) >> FSHIFT;
304 				/*
305 				 * If the kse has been idle the entire second,
306 				 * stop recalculating its priority until
307 				 * it wakes up.
308 				 */
309 				if (ke->ke_slptime > 1) {
310 					continue;
311 				}
312 
313 #if	(FSHIFT >= CCPU_SHIFT)
314 				ke->ke_pctcpu += (realstathz == 100) ?
315 				    ((fixpt_t) ke->ke_cpticks) <<
316 				    (FSHIFT - CCPU_SHIFT) :
317 				    100 * (((fixpt_t) ke->ke_cpticks) <<
318 				    (FSHIFT - CCPU_SHIFT)) / realstathz;
319 #else
320 				ke->ke_pctcpu += ((FSCALE - ccpu) *
321 				    (ke->ke_cpticks * FSCALE / realstathz)) >>
322 				    FSHIFT;
323 #endif
324 				ke->ke_cpticks = 0;
325 			} /* end of kse loop */
326 			/*
327 			 * If there are ANY running threads in this KSEGRP,
328 			 * then don't count it as sleeping.
329 			 */
330 			if (awake == 0) {
331 				kg->kg_slptime++;
332 			} else {
333 				kg->kg_slptime = 0;
334 			}
335 			kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
336 		      	resetpriority(kg);
337 			FOREACH_THREAD_IN_GROUP(kg, td) {
338 				int changedqueue;
339 				if (td->td_priority >= PUSER) {
340 					/*
341 					 * Only change the priority
342 					 * of threads that are still at their
343 					 * user priority.
344 					 * XXXKSE This is problematic
345 					 * as we may need to re-order
346 					 * the threads on the KSEG list.
347 					 */
348 					changedqueue =
349 					    ((td->td_priority / RQ_PPQ) !=
350 					     (kg->kg_user_pri / RQ_PPQ));
351 
352 					td->td_priority = kg->kg_user_pri;
353 					if (changedqueue &&
354 					    td->td_state == TDS_RUNQ) {
355 						/* this could be optimised */
356 						remrunqueue(td);
357 						td->td_priority =
358 						    kg->kg_user_pri;
359 						setrunqueue(td);
360 					} else {
361 						td->td_priority = kg->kg_user_pri;
362 					}
363 				}
364 			}
365 		} /* end of ksegrp loop */
366 		mtx_unlock_spin(&sched_lock);
367 	} /* end of process loop */
368 	sx_sunlock(&allproc_lock);
369 	wakeup(&lbolt);
370 	callout_reset(&schedcpu_callout, hz, schedcpu, NULL);
371 }
372 
373 /*
374  * Recalculate the priority of a process after it has slept for a while.
375  * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
376  * least six times the loadfactor will decay p_estcpu to zero.
377  */
378 void
379 updatepri(td)
380 	register struct thread *td;
381 {
382 	register struct ksegrp *kg;
383 	register unsigned int newcpu;
384 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
385 
386 	if (td == NULL)
387 		return;
388 	kg = td->td_ksegrp;
389 	newcpu = kg->kg_estcpu;
390 	if (kg->kg_slptime > 5 * loadfac)
391 		kg->kg_estcpu = 0;
392 	else {
393 		kg->kg_slptime--;	/* the first time was done in schedcpu */
394 		while (newcpu && --kg->kg_slptime)
395 			newcpu = decay_cpu(loadfac, newcpu);
396 		kg->kg_estcpu = newcpu;
397 	}
398 	resetpriority(td->td_ksegrp);
399 }
400 
401 /*
402  * We're only looking at 7 bits of the address; everything is
403  * aligned to 4, lots of things are aligned to greater powers
404  * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
405  */
406 #define TABLESIZE	128
407 static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE];
408 #define LOOKUP(x)	(((intptr_t)(x) >> 8) & (TABLESIZE - 1))
409 
410 void
411 sleepinit(void)
412 {
413 	int i;
414 
415 	sched_quantum = hz/10;
416 	hogticks = 2 * sched_quantum;
417 	for (i = 0; i < TABLESIZE; i++)
418 		TAILQ_INIT(&slpque[i]);
419 }
420 
421 /*
422  * General sleep call.  Suspends the current process until a wakeup is
423  * performed on the specified identifier.  The process will then be made
424  * runnable with the specified priority.  Sleeps at most timo/hz seconds
425  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
426  * before and after sleeping, else signals are not checked.  Returns 0 if
427  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
428  * signal needs to be delivered, ERESTART is returned if the current system
429  * call should be restarted if possible, and EINTR is returned if the system
430  * call should be interrupted by the signal (return EINTR).
431  *
432  * The mutex argument is exited before the caller is suspended, and
433  * entered before msleep returns.  If priority includes the PDROP
434  * flag the mutex is not entered before returning.
435  */
436 
437 int
438 msleep(ident, mtx, priority, wmesg, timo)
439 	void *ident;
440 	struct mtx *mtx;
441 	int priority, timo;
442 	const char *wmesg;
443 {
444 	struct thread *td = curthread;
445 	struct proc *p = td->td_proc;
446 	int sig, catch = priority & PCATCH;
447 	int rval = 0;
448 	WITNESS_SAVE_DECL(mtx);
449 
450 #ifdef KTRACE
451 	if (KTRPOINT(td, KTR_CSW))
452 		ktrcsw(1, 0);
453 #endif
454 	WITNESS_SLEEP(0, &mtx->mtx_object);
455 	KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL,
456 	    ("sleeping without a mutex"));
457 	/*
458 	 * If we are capable of async syscalls and there isn't already
459 	 * another one ready to return, start a new thread
460 	 * and queue it as ready to run. Note that there is danger here
461 	 * because we need to make sure that we don't sleep allocating
462 	 * the thread (recursion here might be bad).
463 	 * Hence the TDF_INMSLEEP flag.
464 	 */
465 	if (p->p_flag & P_KSES) {
466 		/* Just don't bother if we are exiting
467 				and not the exiting thread. */
468 		if ((p->p_flag & P_WEXIT) && catch && p->p_singlethread != td)
469 			return (EINTR);
470 		if (td->td_mailbox && (!(td->td_flags & TDF_INMSLEEP))) {
471 			/*
472 			 * If we have no queued work to do, then
473 			 * upcall to the UTS to see if it has more to do.
474 			 * We don't need to upcall now, just make it and
475 			 * queue it.
476 			 */
477 			mtx_lock_spin(&sched_lock);
478 			if (TAILQ_FIRST(&td->td_ksegrp->kg_runq) == NULL) {
479 				/* Don't recurse here! */
480 				td->td_flags |= TDF_INMSLEEP;
481 				thread_schedule_upcall(td, td->td_kse);
482 				td->td_flags &= ~TDF_INMSLEEP;
483 			}
484 			mtx_unlock_spin(&sched_lock);
485 		}
486 	}
487 	mtx_lock_spin(&sched_lock);
488 	if (cold ) {
489 		/*
490 		 * During autoconfiguration, just give interrupts
491 		 * a chance, then just return.
492 		 * Don't run any other procs or panic below,
493 		 * in case this is the idle process and already asleep.
494 		 */
495 		if (mtx != NULL && priority & PDROP)
496 			mtx_unlock(mtx);
497 		mtx_unlock_spin(&sched_lock);
498 		return (0);
499 	}
500 
501 	DROP_GIANT();
502 
503 	if (mtx != NULL) {
504 		mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
505 		WITNESS_SAVE(&mtx->mtx_object, mtx);
506 		mtx_unlock(mtx);
507 		if (priority & PDROP)
508 			mtx = NULL;
509 	}
510 
511 	KASSERT(p != NULL, ("msleep1"));
512 	KASSERT(ident != NULL && td->td_state == TDS_RUNNING, ("msleep"));
513 
514 	td->td_wchan = ident;
515 	td->td_wmesg = wmesg;
516 	td->td_kse->ke_slptime = 0;	/* XXXKSE */
517 	td->td_ksegrp->kg_slptime = 0;
518 	td->td_priority = priority & PRIMASK;
519 	CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
520 	    td, p->p_pid, p->p_comm, wmesg, ident);
521 	TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq);
522 	if (timo)
523 		callout_reset(&td->td_slpcallout, timo, endtsleep, td);
524 	/*
525 	 * We put ourselves on the sleep queue and start our timeout
526 	 * before calling thread_suspend_check, as we could stop there, and
527 	 * a wakeup or a SIGCONT (or both) could occur while we were stopped.
528 	 * without resuming us, thus we must be ready for sleep
529 	 * when cursig is called.  If the wakeup happens while we're
530 	 * stopped, td->td_wchan will be 0 upon return from cursig.
531 	 */
532 	if (catch) {
533 		CTR3(KTR_PROC, "msleep caught: thread %p (pid %d, %s)", td,
534 		    p->p_pid, p->p_comm);
535 		td->td_flags |= TDF_SINTR;
536 		mtx_unlock_spin(&sched_lock);
537 		PROC_LOCK(p);
538 		sig = cursig(td);
539 		if (sig == 0 && thread_suspend_check(1))
540 			sig = SIGSTOP;
541 		mtx_lock_spin(&sched_lock);
542 		PROC_UNLOCK(p);
543 		if (sig != 0) {
544 			if (td->td_wchan != NULL)
545 				unsleep(td);
546 		} else if (td->td_wchan == NULL)
547 			catch = 0;
548 	} else
549 		sig = 0;
550 	if (td->td_wchan != NULL) {
551 		p->p_stats->p_ru.ru_nvcsw++;
552 		td->td_state = TDS_SLP;
553 		mi_switch();
554 	}
555 	CTR3(KTR_PROC, "msleep resume: thread %p (pid %d, %s)", td, p->p_pid,
556 	    p->p_comm);
557 	KASSERT(td->td_state == TDS_RUNNING, ("running but not TDS_RUNNING"));
558 	td->td_flags &= ~TDF_SINTR;
559 	if (td->td_flags & TDF_TIMEOUT) {
560 		td->td_flags &= ~TDF_TIMEOUT;
561 		if (sig == 0)
562 			rval = EWOULDBLOCK;
563 	} else if (td->td_flags & TDF_TIMOFAIL) {
564 		td->td_flags &= ~TDF_TIMOFAIL;
565 	} else if (timo && callout_stop(&td->td_slpcallout) == 0) {
566 		/*
567 		 * This isn't supposed to be pretty.  If we are here, then
568 		 * the endtsleep() callout is currently executing on another
569 		 * CPU and is either spinning on the sched_lock or will be
570 		 * soon.  If we don't synchronize here, there is a chance
571 		 * that this process may msleep() again before the callout
572 		 * has a chance to run and the callout may end up waking up
573 		 * the wrong msleep().  Yuck.
574 		 */
575 		td->td_flags |= TDF_TIMEOUT;
576 		td->td_state = TDS_SLP;
577 		p->p_stats->p_ru.ru_nivcsw++;
578 		mi_switch();
579 	}
580 	mtx_unlock_spin(&sched_lock);
581 
582 	if (rval == 0 && catch) {
583 		PROC_LOCK(p);
584 		/* XXX: shouldn't we always be calling cursig() */
585 		if (sig != 0 || (sig = cursig(td))) {
586 			if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
587 				rval = EINTR;
588 			else
589 				rval = ERESTART;
590 		}
591 		PROC_UNLOCK(p);
592 	}
593 #ifdef KTRACE
594 	if (KTRPOINT(td, KTR_CSW))
595 		ktrcsw(0, 0);
596 #endif
597 	PICKUP_GIANT();
598 	if (mtx != NULL) {
599 		mtx_lock(mtx);
600 		WITNESS_RESTORE(&mtx->mtx_object, mtx);
601 	}
602 	return (rval);
603 }
604 
605 /*
606  * Implement timeout for msleep()
607  *
608  * If process hasn't been awakened (wchan non-zero),
609  * set timeout flag and undo the sleep.  If proc
610  * is stopped, just unsleep so it will remain stopped.
611  * MP-safe, called without the Giant mutex.
612  */
613 static void
614 endtsleep(arg)
615 	void *arg;
616 {
617 	register struct thread *td = arg;
618 
619 	CTR3(KTR_PROC, "endtsleep: thread %p (pid %d, %s)", td, td->td_proc->p_pid,
620 	    td->td_proc->p_comm);
621 	mtx_lock_spin(&sched_lock);
622 	/*
623 	 * This is the other half of the synchronization with msleep()
624 	 * described above.  If the TDS_TIMEOUT flag is set, we lost the
625 	 * race and just need to put the process back on the runqueue.
626 	 */
627 	if ((td->td_flags & TDF_TIMEOUT) != 0) {
628 		td->td_flags &= ~TDF_TIMEOUT;
629 		if (td->td_proc->p_sflag & PS_INMEM) {
630 			setrunqueue(td);
631 			maybe_resched(td);
632 		} else {
633 			td->td_state = TDS_SWAPPED;
634 			if ((td->td_proc->p_sflag & PS_SWAPPINGIN) == 0) {
635 				td->td_proc->p_sflag |= PS_SWAPINREQ;
636 				wakeup(&proc0);
637 			}
638 		}
639 	} else if (td->td_wchan != NULL) {
640 		if (td->td_state == TDS_SLP)  /* XXXKSE */
641 			setrunnable(td);
642 		else
643 			unsleep(td);
644 		td->td_flags |= TDF_TIMEOUT;
645 	} else {
646 		td->td_flags |= TDF_TIMOFAIL;
647 	}
648 	mtx_unlock_spin(&sched_lock);
649 }
650 
651 /*
652  * Abort a thread, as if an interrupt had occured.  Only abort
653  * interruptable waits (unfortunatly it isn't only safe to abort others).
654  * This is about identical to cv_abort().
655  * Think about merging them?
656  * Also, whatever the signal code does...
657  */
658 void
659 abortsleep(struct thread *td)
660 {
661 
662 	mtx_lock_spin(&sched_lock);
663 	/*
664 	 * If the TDF_TIMEOUT flag is set, just leave. A
665 	 * timeout is scheduled anyhow.
666 	 */
667 	if ((td->td_flags & (TDF_TIMEOUT | TDF_SINTR)) == TDF_SINTR) {
668 		if (td->td_wchan != NULL) {
669 			if (td->td_state == TDS_SLP) {  /* XXXKSE */
670 				setrunnable(td);
671 			} else {
672 				/*
673 				 * Probably in a suspended state..
674 				 * um.. dunno XXXKSE
675 				 */
676 				unsleep(td);
677 			}
678 		}
679 	}
680 	mtx_unlock_spin(&sched_lock);
681 }
682 
683 /*
684  * Remove a process from its wait queue
685  */
686 void
687 unsleep(struct thread *td)
688 {
689 
690 	mtx_lock_spin(&sched_lock);
691 	if (td->td_wchan != NULL) {
692 		TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
693 		td->td_wchan = NULL;
694 	}
695 	mtx_unlock_spin(&sched_lock);
696 }
697 
698 /*
699  * Make all processes sleeping on the specified identifier runnable.
700  */
701 void
702 wakeup(ident)
703 	register void *ident;
704 {
705 	register struct slpquehead *qp;
706 	register struct thread *td;
707 	struct thread *ntd;
708 	struct proc *p;
709 
710 	mtx_lock_spin(&sched_lock);
711 	qp = &slpque[LOOKUP(ident)];
712 restart:
713 	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
714 		ntd = TAILQ_NEXT(td, td_slpq);
715 		p = td->td_proc;
716 		if (td->td_wchan == ident) {
717 			TAILQ_REMOVE(qp, td, td_slpq);
718 			td->td_wchan = NULL;
719 			if (td->td_state == TDS_SLP) {
720 				/* OPTIMIZED EXPANSION OF setrunnable(p); */
721 				CTR3(KTR_PROC, "wakeup: thread %p (pid %d, %s)",
722 				    td, p->p_pid, p->p_comm);
723 				if (td->td_ksegrp->kg_slptime > 1)
724 					updatepri(td);
725 				td->td_ksegrp->kg_slptime = 0;
726 				if (p->p_sflag & PS_INMEM) {
727 					setrunqueue(td);
728 					maybe_resched(td);
729 				} else {
730 					td->td_state = TDS_SWAPPED;
731 					if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
732 						p->p_sflag |= PS_SWAPINREQ;
733 						wakeup(&proc0);
734 					}
735 				}
736 				/* END INLINE EXPANSION */
737 			}
738 			goto restart;
739 		}
740 	}
741 	mtx_unlock_spin(&sched_lock);
742 }
743 
744 /*
745  * Make a process sleeping on the specified identifier runnable.
746  * May wake more than one process if a target process is currently
747  * swapped out.
748  */
749 void
750 wakeup_one(ident)
751 	register void *ident;
752 {
753 	register struct slpquehead *qp;
754 	register struct thread *td;
755 	register struct proc *p;
756 	struct thread *ntd;
757 
758 	mtx_lock_spin(&sched_lock);
759 	qp = &slpque[LOOKUP(ident)];
760 restart:
761 	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
762 		ntd = TAILQ_NEXT(td, td_slpq);
763 		p = td->td_proc;
764 		if (td->td_wchan == ident) {
765 			TAILQ_REMOVE(qp, td, td_slpq);
766 			td->td_wchan = NULL;
767 			if (td->td_state == TDS_SLP) {
768 				/* OPTIMIZED EXPANSION OF setrunnable(p); */
769 				CTR3(KTR_PROC,"wakeup1: thread %p (pid %d, %s)",
770 				    td, p->p_pid, p->p_comm);
771 				if (td->td_ksegrp->kg_slptime > 1)
772 					updatepri(td);
773 				td->td_ksegrp->kg_slptime = 0;
774 				if (p->p_sflag & PS_INMEM) {
775 					setrunqueue(td);
776 					maybe_resched(td);
777 					break;
778 				} else {
779 					td->td_state = TDS_SWAPPED;
780 					if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
781 						p->p_sflag |= PS_SWAPINREQ;
782 						wakeup(&proc0);
783 					}
784 				}
785 				/* END INLINE EXPANSION */
786 				goto restart;
787 			}
788 		}
789 	}
790 	mtx_unlock_spin(&sched_lock);
791 }
792 
793 /*
794  * The machine independent parts of mi_switch().
795  */
796 void
797 mi_switch()
798 {
799 	struct bintime new_switchtime;
800 	struct thread *td = curthread;	/* XXX */
801 	struct proc *p = td->td_proc;	/* XXX */
802 	struct kse *ke = td->td_kse;
803 #if 0
804 	register struct rlimit *rlim;
805 #endif
806 	u_int sched_nest;
807 
808 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
809 	KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?"));
810 	KASSERT((td->td_state != TDS_RUNQ), ("mi_switch: called by old code"));
811 #ifdef INVARIANTS
812 	if (td->td_state != TDS_MTX &&
813 	    td->td_state != TDS_RUNQ &&
814 	    td->td_state != TDS_RUNNING)
815 		mtx_assert(&Giant, MA_NOTOWNED);
816 #endif
817 	KASSERT(td->td_critnest == 1,
818 	    ("mi_switch: switch in a critical section"));
819 
820 	/*
821 	 * Compute the amount of time during which the current
822 	 * process was running, and add that to its total so far.
823 	 */
824 	binuptime(&new_switchtime);
825 	bintime_add(&p->p_runtime, &new_switchtime);
826 	bintime_sub(&p->p_runtime, PCPU_PTR(switchtime));
827 
828 #ifdef DDB
829 	/*
830 	 * Don't perform context switches from the debugger.
831 	 */
832 	if (db_active) {
833 		mtx_unlock_spin(&sched_lock);
834 		db_error("Context switches not allowed in the debugger.");
835 	}
836 #endif
837 
838 #if 0
839 	/*
840 	 * Check if the process exceeds its cpu resource allocation.
841 	 * If over max, kill it.
842 	 *
843 	 * XXX drop sched_lock, pickup Giant
844 	 */
845 	if (p->p_state != PRS_ZOMBIE &&
846 	    p->p_limit->p_cpulimit != RLIM_INFINITY &&
847 	    p->p_runtime > p->p_limit->p_cpulimit) {
848 		rlim = &p->p_rlimit[RLIMIT_CPU];
849 		if (p->p_runtime / (rlim_t)1000000 >= rlim->rlim_max) {
850 			mtx_unlock_spin(&sched_lock);
851 			PROC_LOCK(p);
852 			killproc(p, "exceeded maximum CPU limit");
853 			mtx_lock_spin(&sched_lock);
854 			PROC_UNLOCK(p);
855 		} else {
856 			mtx_unlock_spin(&sched_lock);
857 			PROC_LOCK(p);
858 			psignal(p, SIGXCPU);
859 			mtx_lock_spin(&sched_lock);
860 			PROC_UNLOCK(p);
861 			if (rlim->rlim_cur < rlim->rlim_max) {
862 				/* XXX: we should make a private copy */
863 				rlim->rlim_cur += 5;
864 			}
865 		}
866 	}
867 #endif
868 
869 	/*
870 	 * Finish up stats for outgoing thread.
871 	 */
872 	cnt.v_swtch++;
873 	PCPU_SET(switchtime, new_switchtime);
874 	CTR3(KTR_PROC, "mi_switch: old thread %p (pid %d, %s)", td, p->p_pid,
875 	    p->p_comm);
876 	sched_nest = sched_lock.mtx_recurse;
877 	td->td_lastcpu = ke->ke_oncpu;
878 	ke->ke_oncpu = NOCPU;
879 	ke->ke_flags &= ~KEF_NEEDRESCHED;
880 	/*
881 	 * At the last moment, if this thread is still marked RUNNING,
882 	 * then put it back on the run queue as it has not been suspended
883 	 * or stopped or any thing else similar.
884 	 */
885 	if (td->td_state == TDS_RUNNING) {
886 		KASSERT(((ke->ke_flags & KEF_IDLEKSE) == 0),
887 		    ("Idle thread in mi_switch with wrong state"));
888 		/* Put us back on the run queue (kse and all). */
889 		setrunqueue(td);
890 	} else if (td->td_flags & TDF_UNBOUND) {
891 		/*
892 		 * We will not be on the run queue. So we must be
893 		 * sleeping or similar. If it's available,
894 		 * someone else can use the KSE if they need it.
895 		 * XXXKSE KSE loaning will change this.
896 		 */
897 		td->td_kse = NULL;
898 		kse_reassign(ke);
899 	}
900 
901 	cpu_switch();		/* SHAZAM!!*/
902 
903 	/*
904 	 * Start setting up stats etc. for the incoming thread.
905 	 * Similar code in fork_exit() is returned to by cpu_switch()
906 	 * in the case of a new thread/process.
907 	 */
908 	td->td_kse->ke_oncpu = PCPU_GET(cpuid);
909 	sched_lock.mtx_recurse = sched_nest;
910 	sched_lock.mtx_lock = (uintptr_t)td;
911 	CTR3(KTR_PROC, "mi_switch: new thread %p (pid %d, %s)", td, p->p_pid,
912 	    p->p_comm);
913 	if (PCPU_GET(switchtime.sec) == 0)
914 		binuptime(PCPU_PTR(switchtime));
915 	PCPU_SET(switchticks, ticks);
916 
917 	/*
918 	 * Call the switchin function while still holding the scheduler lock
919 	 * (used by the idlezero code and the general page-zeroing code)
920 	 */
921 	if (td->td_switchin)
922 		td->td_switchin();
923 }
924 
925 /*
926  * Change process state to be runnable,
927  * placing it on the run queue if it is in memory,
928  * and awakening the swapper if it isn't in memory.
929  */
930 void
931 setrunnable(struct thread *td)
932 {
933 	struct proc *p = td->td_proc;
934 
935 	mtx_assert(&sched_lock, MA_OWNED);
936 	switch (p->p_state) {
937 	case PRS_ZOMBIE:
938 		panic("setrunnable(1)");
939 	default:
940 		break;
941 	}
942 	switch (td->td_state) {
943 	case 0:
944 	case TDS_RUNNING:
945 	case TDS_IWAIT:
946 	case TDS_SWAPPED:
947 	default:
948 		printf("state is %d", td->td_state);
949 		panic("setrunnable(2)");
950 	case TDS_SUSPENDED:
951 		thread_unsuspend(p);
952 		break;
953 	case TDS_SLP:			/* e.g. when sending signals */
954 		if (td->td_flags & TDF_CVWAITQ)
955 			cv_waitq_remove(td);
956 		else
957 			unsleep(td);
958 	case TDS_UNQUEUED:  /* being put back onto the queue */
959 	case TDS_NEW:	/* not yet had time to suspend */
960 	case TDS_RUNQ:	/* not yet had time to suspend */
961 		break;
962 	}
963 	if (td->td_ksegrp->kg_slptime > 1)
964 		updatepri(td);
965 	td->td_ksegrp->kg_slptime = 0;
966 	if ((p->p_sflag & PS_INMEM) == 0) {
967 		td->td_state = TDS_SWAPPED;
968 		if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
969 			p->p_sflag |= PS_SWAPINREQ;
970 			wakeup(&proc0);
971 		}
972 	} else {
973 		if (td->td_state != TDS_RUNQ)
974 			setrunqueue(td); /* XXXKSE */
975 		maybe_resched(td);
976 	}
977 }
978 
979 /*
980  * Compute the priority of a process when running in user mode.
981  * Arrange to reschedule if the resulting priority is better
982  * than that of the current process.
983  */
984 void
985 resetpriority(kg)
986 	register struct ksegrp *kg;
987 {
988 	register unsigned int newpriority;
989 	struct thread *td;
990 
991 	mtx_lock_spin(&sched_lock);
992 	if (kg->kg_pri_class == PRI_TIMESHARE) {
993 		newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
994 		    NICE_WEIGHT * (kg->kg_nice - PRIO_MIN);
995 		newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
996 		    PRI_MAX_TIMESHARE);
997 		kg->kg_user_pri = newpriority;
998 	}
999 	FOREACH_THREAD_IN_GROUP(kg, td) {
1000 		maybe_resched(td);			/* XXXKSE silly */
1001 	}
1002 	mtx_unlock_spin(&sched_lock);
1003 }
1004 
1005 /*
1006  * Compute a tenex style load average of a quantity on
1007  * 1, 5 and 15 minute intervals.
1008  * XXXKSE   Needs complete rewrite when correct info is available.
1009  * Completely Bogus.. only works with 1:1 (but compiles ok now :-)
1010  */
1011 static void
1012 loadav(void *arg)
1013 {
1014 	int i, nrun;
1015 	struct loadavg *avg;
1016 	struct proc *p;
1017 	struct thread *td;
1018 
1019 	avg = &averunnable;
1020 	sx_slock(&allproc_lock);
1021 	nrun = 0;
1022 	FOREACH_PROC_IN_SYSTEM(p) {
1023 		FOREACH_THREAD_IN_PROC(p, td) {
1024 			switch (td->td_state) {
1025 			case TDS_RUNQ:
1026 			case TDS_RUNNING:
1027 				if ((p->p_flag & P_NOLOAD) != 0)
1028 					goto nextproc;
1029 				nrun++; /* XXXKSE */
1030 			default:
1031 				break;
1032 			}
1033 nextproc:
1034 			continue;
1035 		}
1036 	}
1037 	sx_sunlock(&allproc_lock);
1038 	for (i = 0; i < 3; i++)
1039 		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
1040 		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
1041 
1042 	/*
1043 	 * Schedule the next update to occur after 5 seconds, but add a
1044 	 * random variation to avoid synchronisation with processes that
1045 	 * run at regular intervals.
1046 	 */
1047 	callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
1048 	    loadav, NULL);
1049 }
1050 
1051 /* ARGSUSED */
1052 static void
1053 sched_setup(dummy)
1054 	void *dummy;
1055 {
1056 
1057 	callout_init(&schedcpu_callout, 1);
1058 	callout_init(&roundrobin_callout, 0);
1059 	callout_init(&loadav_callout, 0);
1060 
1061 	/* Kick off timeout driven events by calling first time. */
1062 	roundrobin(NULL);
1063 	schedcpu(NULL);
1064 	loadav(NULL);
1065 }
1066 
1067 /*
1068  * We adjust the priority of the current process.  The priority of
1069  * a process gets worse as it accumulates CPU time.  The cpu usage
1070  * estimator (p_estcpu) is increased here.  resetpriority() will
1071  * compute a different priority each time p_estcpu increases by
1072  * INVERSE_ESTCPU_WEIGHT
1073  * (until MAXPRI is reached).  The cpu usage estimator ramps up
1074  * quite quickly when the process is running (linearly), and decays
1075  * away exponentially, at a rate which is proportionally slower when
1076  * the system is busy.  The basic principle is that the system will
1077  * 90% forget that the process used a lot of CPU time in 5 * loadav
1078  * seconds.  This causes the system to favor processes which haven't
1079  * run much recently, and to round-robin among other processes.
1080  */
1081 void
1082 schedclock(td)
1083 	struct thread *td;
1084 {
1085 	struct kse *ke;
1086 	struct ksegrp *kg;
1087 
1088 	KASSERT((td != NULL), ("schedclock: null thread pointer"));
1089 	ke = td->td_kse;
1090 	kg = td->td_ksegrp;
1091 	ke->ke_cpticks++;
1092 	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
1093 	if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
1094 		resetpriority(kg);
1095 		if (td->td_priority >= PUSER)
1096 			td->td_priority = kg->kg_user_pri;
1097 	}
1098 }
1099 
1100 /*
1101  * General purpose yield system call
1102  */
1103 int
1104 yield(struct thread *td, struct yield_args *uap)
1105 {
1106 	struct ksegrp *kg = td->td_ksegrp;
1107 
1108 	mtx_assert(&Giant, MA_NOTOWNED);
1109 	mtx_lock_spin(&sched_lock);
1110 	td->td_priority = PRI_MAX_TIMESHARE;
1111 	kg->kg_proc->p_stats->p_ru.ru_nvcsw++;
1112 	mi_switch();
1113 	mtx_unlock_spin(&sched_lock);
1114 	td->td_retval[0] = 0;
1115 
1116 	return (0);
1117 }
1118 
1119