xref: /freebsd/sys/kern/kern_synch.c (revision 11f0b352e05306cf6f1f85e9087022c0a92624a3)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
39  * $FreeBSD$
40  */
41 
42 #include "opt_ddb.h"
43 #include "opt_ktrace.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/condvar.h>
48 #include <sys/kernel.h>
49 #include <sys/ktr.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/resourcevar.h>
54 #include <sys/signalvar.h>
55 #include <sys/smp.h>
56 #include <sys/sx.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysproto.h>
59 #include <sys/vmmeter.h>
60 #ifdef DDB
61 #include <ddb/ddb.h>
62 #endif
63 #ifdef KTRACE
64 #include <sys/uio.h>
65 #include <sys/ktrace.h>
66 #endif
67 
68 #include <machine/cpu.h>
69 
70 static void sched_setup(void *dummy);
71 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
72 
73 int	hogticks;
74 int	lbolt;
75 int	sched_quantum;		/* Roundrobin scheduling quantum in ticks. */
76 
77 static struct callout loadav_callout;
78 static struct callout schedcpu_callout;
79 static struct callout roundrobin_callout;
80 
81 struct loadavg averunnable =
82 	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
83 /*
84  * Constants for averages over 1, 5, and 15 minutes
85  * when sampling at 5 second intervals.
86  */
87 static fixpt_t cexp[3] = {
88 	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
89 	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
90 	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
91 };
92 
93 static void	endtsleep(void *);
94 static void	loadav(void *arg);
95 static void	roundrobin(void *arg);
96 static void	schedcpu(void *arg);
97 
98 static int
99 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
100 {
101 	int error, new_val;
102 
103 	new_val = sched_quantum * tick;
104 	error = sysctl_handle_int(oidp, &new_val, 0, req);
105         if (error != 0 || req->newptr == NULL)
106 		return (error);
107 	if (new_val < tick)
108 		return (EINVAL);
109 	sched_quantum = new_val / tick;
110 	hogticks = 2 * sched_quantum;
111 	return (0);
112 }
113 
114 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
115 	0, sizeof sched_quantum, sysctl_kern_quantum, "I",
116 	"Roundrobin scheduling quantum in microseconds");
117 
118 /*
119  * Arrange to reschedule if necessary, taking the priorities and
120  * schedulers into account.
121  */
122 void
123 maybe_resched(struct thread *td)
124 {
125 
126 	mtx_assert(&sched_lock, MA_OWNED);
127 	if (td->td_priority < curthread->td_priority)
128 		curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
129 }
130 
131 int
132 roundrobin_interval(void)
133 {
134 	return (sched_quantum);
135 }
136 
137 /*
138  * Force switch among equal priority processes every 100ms.
139  * We don't actually need to force a context switch of the current process.
140  * The act of firing the event triggers a context switch to softclock() and
141  * then switching back out again which is equivalent to a preemption, thus
142  * no further work is needed on the local CPU.
143  */
144 /* ARGSUSED */
145 static void
146 roundrobin(arg)
147 	void *arg;
148 {
149 
150 #ifdef SMP
151 	mtx_lock_spin(&sched_lock);
152 	forward_roundrobin();
153 	mtx_unlock_spin(&sched_lock);
154 #endif
155 
156 	callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
157 }
158 
159 /*
160  * Constants for digital decay and forget:
161  *	90% of (p_estcpu) usage in 5 * loadav time
162  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
163  *          Note that, as ps(1) mentions, this can let percentages
164  *          total over 100% (I've seen 137.9% for 3 processes).
165  *
166  * Note that schedclock() updates p_estcpu and p_cpticks asynchronously.
167  *
168  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
169  * That is, the system wants to compute a value of decay such
170  * that the following for loop:
171  * 	for (i = 0; i < (5 * loadavg); i++)
172  * 		p_estcpu *= decay;
173  * will compute
174  * 	p_estcpu *= 0.1;
175  * for all values of loadavg:
176  *
177  * Mathematically this loop can be expressed by saying:
178  * 	decay ** (5 * loadavg) ~= .1
179  *
180  * The system computes decay as:
181  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
182  *
183  * We wish to prove that the system's computation of decay
184  * will always fulfill the equation:
185  * 	decay ** (5 * loadavg) ~= .1
186  *
187  * If we compute b as:
188  * 	b = 2 * loadavg
189  * then
190  * 	decay = b / (b + 1)
191  *
192  * We now need to prove two things:
193  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
194  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
195  *
196  * Facts:
197  *         For x close to zero, exp(x) =~ 1 + x, since
198  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
199  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
200  *         For x close to zero, ln(1+x) =~ x, since
201  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
202  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
203  *         ln(.1) =~ -2.30
204  *
205  * Proof of (1):
206  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
207  *	solving for factor,
208  *      ln(factor) =~ (-2.30/5*loadav), or
209  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
210  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
211  *
212  * Proof of (2):
213  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
214  *	solving for power,
215  *      power*ln(b/(b+1)) =~ -2.30, or
216  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
217  *
218  * Actual power values for the implemented algorithm are as follows:
219  *      loadav: 1       2       3       4
220  *      power:  5.68    10.32   14.94   19.55
221  */
222 
223 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
224 #define	loadfactor(loadav)	(2 * (loadav))
225 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
226 
227 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
228 static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
229 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
230 
231 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
232 static int	fscale __unused = FSCALE;
233 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
234 
235 /*
236  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
237  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
238  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
239  *
240  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
241  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
242  *
243  * If you don't want to bother with the faster/more-accurate formula, you
244  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
245  * (more general) method of calculating the %age of CPU used by a process.
246  */
247 #define	CCPU_SHIFT	11
248 
249 /*
250  * Recompute process priorities, every hz ticks.
251  * MP-safe, called without the Giant mutex.
252  */
253 /* ARGSUSED */
254 static void
255 schedcpu(arg)
256 	void *arg;
257 {
258 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
259 	struct thread *td;
260 	struct proc *p;
261 	struct kse *ke;
262 	struct ksegrp *kg;
263 	int realstathz;
264 	int awake;
265 
266 	realstathz = stathz ? stathz : hz;
267 	sx_slock(&allproc_lock);
268 	FOREACH_PROC_IN_SYSTEM(p) {
269 		mtx_lock_spin(&sched_lock);
270 		p->p_swtime++;
271 		FOREACH_KSEGRP_IN_PROC(p, kg) {
272 			awake = 0;
273 			FOREACH_KSE_IN_GROUP(kg, ke) {
274 				/*
275 				 * Increment time in/out of memory and sleep
276 				 * time (if sleeping).  We ignore overflow;
277 				 * with 16-bit int's (remember them?)
278 				 * overflow takes 45 days.
279 				 */
280 				/* XXXKSE **WRONG***/
281 				/*
282 				 * the kse slptimes are not touched in wakeup
283 				 * because the thread may not HAVE a KSE
284 				 */
285 				if (ke->ke_state == KES_ONRUNQ &&
286 				    ke->ke_state == KES_RUNNING) {
287 					ke->ke_slptime++;
288 				} else {
289 					ke->ke_slptime = 0;
290 					awake = 1;
291 				}
292 
293 				/*
294 				 * pctcpu is only for ps?
295 				 * Do it per kse.. and add them up at the end?
296 				 * XXXKSE
297 				 */
298 				ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >> FSHIFT;
299 				/*
300 				 * If the kse has been idle the entire second,
301 				 * stop recalculating its priority until
302 				 * it wakes up.
303 				 */
304 				if (ke->ke_slptime > 1) {
305 					continue;
306 				}
307 
308 #if	(FSHIFT >= CCPU_SHIFT)
309 				ke->ke_pctcpu += (realstathz == 100) ?
310 				    ((fixpt_t) ke->ke_cpticks) <<
311 				    (FSHIFT - CCPU_SHIFT) :
312 				    100 * (((fixpt_t) ke->ke_cpticks) <<
313 				    (FSHIFT - CCPU_SHIFT)) / realstathz;
314 #else
315 				ke->ke_pctcpu += ((FSCALE - ccpu) *
316 				    (ke->ke_cpticks * FSCALE / realstathz)) >>
317 				    FSHIFT;
318 #endif
319 				ke->ke_cpticks = 0;
320 			} /* end of kse loop */
321 			if (awake == 0) {
322 				kg->kg_slptime++;
323 			} else {
324 				kg->kg_slptime = 0;
325 			}
326 			kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
327 		      	resetpriority(kg);
328 			FOREACH_THREAD_IN_GROUP(kg, td) {
329 				int changedqueue;
330 				if (td->td_priority >= PUSER) {
331 					/*
332 					 * Only change the priority
333 					 * of threads that are still at their
334 					 * user priority.
335 					 * XXXKSE This is problematic
336 					 * as we may need to re-order
337 					 * the threads on the KSEG list.
338 					 */
339 					changedqueue =
340 					    ((td->td_priority / RQ_PPQ) !=
341 					     (kg->kg_user_pri / RQ_PPQ));
342 
343 					td->td_priority = kg->kg_user_pri;
344 					if (changedqueue &&
345 					    td->td_state == TDS_RUNQ) {
346 						/* this could be optimised */
347 						remrunqueue(td);
348 						td->td_priority =
349 						    kg->kg_user_pri;
350 						setrunqueue(td);
351 					} else {
352 						td->td_priority = kg->kg_user_pri;
353 					}
354 				}
355 			}
356 		} /* end of ksegrp loop */
357 		mtx_unlock_spin(&sched_lock);
358 	} /* end of process loop */
359 	sx_sunlock(&allproc_lock);
360 	wakeup(&lbolt);
361 	callout_reset(&schedcpu_callout, hz, schedcpu, NULL);
362 }
363 
364 /*
365  * Recalculate the priority of a process after it has slept for a while.
366  * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
367  * least six times the loadfactor will decay p_estcpu to zero.
368  */
369 void
370 updatepri(td)
371 	register struct thread *td;
372 {
373 	register struct ksegrp *kg;
374 	register unsigned int newcpu;
375 	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
376 
377 	if (td == NULL)
378 		return;
379 	kg = td->td_ksegrp;
380 	newcpu = kg->kg_estcpu;
381 	if (kg->kg_slptime > 5 * loadfac)
382 		kg->kg_estcpu = 0;
383 	else {
384 		kg->kg_slptime--;	/* the first time was done in schedcpu */
385 		while (newcpu && --kg->kg_slptime)
386 			newcpu = decay_cpu(loadfac, newcpu);
387 		kg->kg_estcpu = newcpu;
388 	}
389 	resetpriority(td->td_ksegrp);
390 }
391 
392 /*
393  * We're only looking at 7 bits of the address; everything is
394  * aligned to 4, lots of things are aligned to greater powers
395  * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
396  */
397 #define TABLESIZE	128
398 static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE];
399 #define LOOKUP(x)	(((intptr_t)(x) >> 8) & (TABLESIZE - 1))
400 
401 void
402 sleepinit(void)
403 {
404 	int i;
405 
406 	sched_quantum = hz/10;
407 	hogticks = 2 * sched_quantum;
408 	for (i = 0; i < TABLESIZE; i++)
409 		TAILQ_INIT(&slpque[i]);
410 }
411 
412 /*
413  * General sleep call.  Suspends the current process until a wakeup is
414  * performed on the specified identifier.  The process will then be made
415  * runnable with the specified priority.  Sleeps at most timo/hz seconds
416  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
417  * before and after sleeping, else signals are not checked.  Returns 0 if
418  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
419  * signal needs to be delivered, ERESTART is returned if the current system
420  * call should be restarted if possible, and EINTR is returned if the system
421  * call should be interrupted by the signal (return EINTR).
422  *
423  * The mutex argument is exited before the caller is suspended, and
424  * entered before msleep returns.  If priority includes the PDROP
425  * flag the mutex is not entered before returning.
426  */
427 
428 int
429 msleep(ident, mtx, priority, wmesg, timo)
430 	void *ident;
431 	struct mtx *mtx;
432 	int priority, timo;
433 	const char *wmesg;
434 {
435 	struct thread *td = curthread;
436 	struct proc *p = td->td_proc;
437 	int sig, catch = priority & PCATCH;
438 	int rval = 0;
439 	WITNESS_SAVE_DECL(mtx);
440 
441 #ifdef KTRACE
442 	if (KTRPOINT(td, KTR_CSW))
443 		ktrcsw(1, 0);
444 #endif
445 	KASSERT((td->td_kse != NULL), ("msleep: NULL KSE?"));
446 	KASSERT((td->td_kse->ke_state == KES_RUNNING), ("msleep: kse state?"));
447 	WITNESS_SLEEP(0, &mtx->mtx_object);
448 	KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL,
449 	    ("sleeping without a mutex"));
450 	/*
451 	 * If we are capable of async syscalls and there isn't already
452 	 * another one ready to return, start a new thread
453 	 * and queue it as ready to run. Note that there is danger here
454 	 * because we need to make sure that we don't sleep allocating
455 	 * the thread (recursion here might be bad).
456 	 * Hence the TDF_INMSLEEP flag.
457 	 */
458 	if (p->p_flag & P_KSES) {
459 		/* Just don't bother if we are exiting
460 				and not the exiting thread. */
461 		if ((p->p_flag & P_WEXIT) && catch && p->p_singlethread != td)
462 			return (EINTR);
463 		if (td->td_mailbox && (!(td->td_flags & TDF_INMSLEEP))) {
464 			/*
465 			 * If we have no queued work to do, then
466 			 * upcall to the UTS to see if it has more to do.
467 			 * We don't need to upcall now, just make it and
468 			 * queue it.
469 			 */
470 			mtx_lock_spin(&sched_lock);
471 			if (TAILQ_FIRST(&td->td_ksegrp->kg_runq) == NULL) {
472 				/* Don't recurse here! */
473 	KASSERT((td->td_kse->ke_state == KES_RUNNING), ("msleep: kse stateX?"));
474 				td->td_flags |= TDF_INMSLEEP;
475 				thread_schedule_upcall(td, td->td_kse);
476 				td->td_flags &= ~TDF_INMSLEEP;
477 	KASSERT((td->td_kse->ke_state == KES_RUNNING), ("msleep: kse stateY?"));
478 			}
479 			mtx_unlock_spin(&sched_lock);
480 		}
481 		KASSERT((td->td_kse != NULL), ("msleep: NULL KSE2?"));
482 		KASSERT((td->td_kse->ke_state == KES_RUNNING),
483 		    ("msleep: kse state2?"));
484 		KASSERT((td->td_kse->ke_thread == td),
485 		    ("msleep: kse/thread mismatch?"));
486 	}
487 	mtx_lock_spin(&sched_lock);
488 	if (cold || panicstr) {
489 		/*
490 		 * After a panic, or during autoconfiguration,
491 		 * just give interrupts a chance, then just return;
492 		 * don't run any other procs or panic below,
493 		 * in case this is the idle process and already asleep.
494 		 */
495 		if (mtx != NULL && priority & PDROP)
496 			mtx_unlock(mtx);
497 		mtx_unlock_spin(&sched_lock);
498 		return (0);
499 	}
500 
501 	DROP_GIANT();
502 
503 	if (mtx != NULL) {
504 		mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
505 		WITNESS_SAVE(&mtx->mtx_object, mtx);
506 		mtx_unlock(mtx);
507 		if (priority & PDROP)
508 			mtx = NULL;
509 	}
510 
511 	KASSERT(p != NULL, ("msleep1"));
512 	KASSERT(ident != NULL && td->td_state == TDS_RUNNING, ("msleep"));
513 
514 	td->td_wchan = ident;
515 	td->td_wmesg = wmesg;
516 	td->td_kse->ke_slptime = 0;	/* XXXKSE */
517 	td->td_ksegrp->kg_slptime = 0;
518 	td->td_priority = priority & PRIMASK;
519 	CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
520 	    td, p->p_pid, p->p_comm, wmesg, ident);
521 	TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq);
522 	if (timo)
523 		callout_reset(&td->td_slpcallout, timo, endtsleep, td);
524 	/*
525 	 * We put ourselves on the sleep queue and start our timeout
526 	 * before calling thread_suspend_check, as we could stop there, and
527 	 * a wakeup or a SIGCONT (or both) could occur while we were stopped.
528 	 * without resuming us, thus we must be ready for sleep
529 	 * when cursig is called.  If the wakeup happens while we're
530 	 * stopped, td->td_wchan will be 0 upon return from cursig.
531 	 */
532 	if (catch) {
533 		CTR3(KTR_PROC, "msleep caught: thread %p (pid %d, %s)", td,
534 		    p->p_pid, p->p_comm);
535 		td->td_flags |= TDF_SINTR;
536 		mtx_unlock_spin(&sched_lock);
537 		PROC_LOCK(p);
538 		sig = cursig(td);
539 		if (sig == 0) {
540 			if (thread_suspend_check(1)) {
541 				sig = SIGSTOP;
542 			}
543 		}
544 		mtx_lock_spin(&sched_lock);
545 		PROC_UNLOCK(p);
546 		if (sig != 0) {
547 			if (td->td_wchan != NULL)
548 				unsleep(td);
549 		} else if (td->td_wchan == NULL)
550 			catch = 0;
551 	} else {
552 		sig = 0;
553 	}
554 	if (td->td_wchan != NULL) {
555 		p->p_stats->p_ru.ru_nvcsw++;
556 		td->td_state = TDS_SLP;
557 		mi_switch();
558 	}
559 	CTR3(KTR_PROC, "msleep resume: thread %p (pid %d, %s)", td, p->p_pid,
560 	    p->p_comm);
561 	KASSERT(td->td_state == TDS_RUNNING, ("running but not TDS_RUNNING"));
562 	td->td_flags &= ~TDF_SINTR;
563 	if (td->td_flags & TDF_TIMEOUT) {
564 		td->td_flags &= ~TDF_TIMEOUT;
565 		if (sig == 0)
566 			rval = EWOULDBLOCK;
567 	} else if (td->td_flags & TDF_TIMOFAIL) {
568 		td->td_flags &= ~TDF_TIMOFAIL;
569 	} else if (timo && callout_stop(&td->td_slpcallout) == 0) {
570 		/*
571 		 * This isn't supposed to be pretty.  If we are here, then
572 		 * the endtsleep() callout is currently executing on another
573 		 * CPU and is either spinning on the sched_lock or will be
574 		 * soon.  If we don't synchronize here, there is a chance
575 		 * that this process may msleep() again before the callout
576 		 * has a chance to run and the callout may end up waking up
577 		 * the wrong msleep().  Yuck.
578 		 */
579 		td->td_flags |= TDF_TIMEOUT;
580 		td->td_state = TDS_SLP;
581 		p->p_stats->p_ru.ru_nivcsw++;
582 		mi_switch();
583 	}
584 	mtx_unlock_spin(&sched_lock);
585 
586 	if (rval == 0 && catch) {
587 		PROC_LOCK(p);
588 		/* XXX: shouldn't we always be calling cursig() */
589 		if (sig != 0 || (sig = cursig(td))) {
590 			if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
591 				rval = EINTR;
592 			else
593 				rval = ERESTART;
594 		}
595 		PROC_UNLOCK(p);
596 	}
597 #ifdef KTRACE
598 	if (KTRPOINT(td, KTR_CSW))
599 		ktrcsw(0, 0);
600 #endif
601 	PICKUP_GIANT();
602 	if (mtx != NULL) {
603 		mtx_lock(mtx);
604 		WITNESS_RESTORE(&mtx->mtx_object, mtx);
605 	}
606 	return (rval);
607 }
608 
609 /*
610  * Implement timeout for msleep()
611  *
612  * If process hasn't been awakened (wchan non-zero),
613  * set timeout flag and undo the sleep.  If proc
614  * is stopped, just unsleep so it will remain stopped.
615  * MP-safe, called without the Giant mutex.
616  */
617 static void
618 endtsleep(arg)
619 	void *arg;
620 {
621 	register struct thread *td = arg;
622 
623 	CTR3(KTR_PROC, "endtsleep: thread %p (pid %d, %s)", td, td->td_proc->p_pid,
624 	    td->td_proc->p_comm);
625 	mtx_lock_spin(&sched_lock);
626 	/*
627 	 * This is the other half of the synchronization with msleep()
628 	 * described above.  If the PS_TIMEOUT flag is set, we lost the
629 	 * race and just need to put the process back on the runqueue.
630 	 */
631 	if ((td->td_flags & TDF_TIMEOUT) != 0) {
632 		td->td_flags &= ~TDF_TIMEOUT;
633 		setrunqueue(td);
634 	} else if (td->td_wchan != NULL) {
635 		if (td->td_state == TDS_SLP)  /* XXXKSE */
636 			setrunnable(td);
637 		else
638 			unsleep(td);
639 		td->td_flags |= TDF_TIMEOUT;
640 	} else {
641 		td->td_flags |= TDF_TIMOFAIL;
642 	}
643 	mtx_unlock_spin(&sched_lock);
644 }
645 
646 /*
647  * Abort a thread, as if an interrupt had occured.  Only abort
648  * interruptable waits (unfortunatly it isn't only safe to abort others).
649  * This is about identical to cv_abort().
650  * Think about merging them?
651  * Also, whatever the signal code does...
652  */
653 void
654 abortsleep(struct thread *td)
655 {
656 
657 	mtx_lock_spin(&sched_lock);
658 	/*
659 	 * If the TDF_TIMEOUT flag is set, just leave. A
660 	 * timeout is scheduled anyhow.
661 	 */
662 	if ((td->td_flags & (TDF_TIMEOUT | TDF_SINTR)) == TDF_SINTR) {
663 		if (td->td_wchan != NULL) {
664 			if (td->td_state == TDS_SLP) {  /* XXXKSE */
665 				setrunnable(td);
666 			} else {
667 				/*
668 				 * Probably in a suspended state..
669 				 * um.. dunno XXXKSE
670 				 */
671 				unsleep(td);
672 			}
673 		}
674 	}
675 	mtx_unlock_spin(&sched_lock);
676 }
677 
678 /*
679  * Remove a process from its wait queue
680  */
681 void
682 unsleep(struct thread *td)
683 {
684 
685 	mtx_lock_spin(&sched_lock);
686 	if (td->td_wchan != NULL) {
687 		TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
688 		td->td_wchan = NULL;
689 	}
690 	mtx_unlock_spin(&sched_lock);
691 }
692 
693 /*
694  * Make all processes sleeping on the specified identifier runnable.
695  */
696 void
697 wakeup(ident)
698 	register void *ident;
699 {
700 	register struct slpquehead *qp;
701 	register struct thread *td;
702 	struct thread *ntd;
703 	struct proc *p;
704 
705 	mtx_lock_spin(&sched_lock);
706 	qp = &slpque[LOOKUP(ident)];
707 restart:
708 	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
709 		ntd = TAILQ_NEXT(td, td_slpq);
710 		p = td->td_proc;
711 		if (td->td_wchan == ident) {
712 			TAILQ_REMOVE(qp, td, td_slpq);
713 			td->td_wchan = NULL;
714 			if (td->td_state == TDS_SLP) {
715 				/* OPTIMIZED EXPANSION OF setrunnable(p); */
716 				CTR3(KTR_PROC, "wakeup: thread %p (pid %d, %s)",
717 				    td, p->p_pid, p->p_comm);
718 				if (td->td_ksegrp->kg_slptime > 1)
719 					updatepri(td);
720 				td->td_ksegrp->kg_slptime = 0;
721 				if (p->p_sflag & PS_INMEM) {
722 					setrunqueue(td);
723 					maybe_resched(td);
724 				} else {
725 /* XXXKSE Wrong! */			td->td_state = TDS_RUNQ;
726 					p->p_sflag |= PS_SWAPINREQ;
727 					wakeup(&proc0);
728 				}
729 				/* END INLINE EXPANSION */
730 			}
731 			goto restart;
732 		}
733 	}
734 	mtx_unlock_spin(&sched_lock);
735 }
736 
737 /*
738  * Make a process sleeping on the specified identifier runnable.
739  * May wake more than one process if a target process is currently
740  * swapped out.
741  */
742 void
743 wakeup_one(ident)
744 	register void *ident;
745 {
746 	register struct slpquehead *qp;
747 	register struct thread *td;
748 	register struct proc *p;
749 	struct thread *ntd;
750 
751 	mtx_lock_spin(&sched_lock);
752 	qp = &slpque[LOOKUP(ident)];
753 restart:
754 	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
755 		ntd = TAILQ_NEXT(td, td_slpq);
756 		p = td->td_proc;
757 		if (td->td_wchan == ident) {
758 			TAILQ_REMOVE(qp, td, td_slpq);
759 			td->td_wchan = NULL;
760 			if (td->td_state == TDS_SLP) {
761 				/* OPTIMIZED EXPANSION OF setrunnable(p); */
762 				CTR3(KTR_PROC,"wakeup1: thread %p (pid %d, %s)",
763 				    td, p->p_pid, p->p_comm);
764 				if (td->td_ksegrp->kg_slptime > 1)
765 					updatepri(td);
766 				td->td_ksegrp->kg_slptime = 0;
767 				if (p->p_sflag & PS_INMEM) {
768 					setrunqueue(td);
769 					maybe_resched(td);
770 					break;
771 				} else {
772 /* XXXKSE Wrong */			td->td_state = TDS_RUNQ;
773 					p->p_sflag |= PS_SWAPINREQ;
774 					wakeup(&proc0);
775 				}
776 				/* END INLINE EXPANSION */
777 				goto restart;
778 			}
779 		}
780 	}
781 	mtx_unlock_spin(&sched_lock);
782 }
783 
784 /*
785  * The machine independent parts of mi_switch().
786  */
787 void
788 mi_switch()
789 {
790 	struct bintime new_switchtime;
791 	struct thread *td = curthread;	/* XXX */
792 	struct proc *p = td->td_proc;	/* XXX */
793 	struct kse *ke = td->td_kse;
794 #if 0
795 	register struct rlimit *rlim;
796 #endif
797 	u_int sched_nest;
798 
799 	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
800 	KASSERT((ke->ke_state == KES_RUNNING), ("mi_switch: kse state?"));
801 #ifdef INVARIANTS
802 	if (td->td_state != TDS_MTX &&
803 	    td->td_state != TDS_RUNQ &&
804 	    td->td_state != TDS_RUNNING)
805 		mtx_assert(&Giant, MA_NOTOWNED);
806 #endif
807 
808 	/*
809 	 * Compute the amount of time during which the current
810 	 * process was running, and add that to its total so far.
811 	 */
812 	binuptime(&new_switchtime);
813 	bintime_add(&p->p_runtime, &new_switchtime);
814 	bintime_sub(&p->p_runtime, PCPU_PTR(switchtime));
815 
816 #ifdef DDB
817 	/*
818 	 * Don't perform context switches from the debugger.
819 	 */
820 	if (db_active) {
821 		mtx_unlock_spin(&sched_lock);
822 		db_error("Context switches not allowed in the debugger.");
823 	}
824 #endif
825 
826 #if 0
827 	/*
828 	 * Check if the process exceeds its cpu resource allocation.
829 	 * If over max, kill it.
830 	 *
831 	 * XXX drop sched_lock, pickup Giant
832 	 */
833 	if (p->p_state != PRS_ZOMBIE &&
834 	    p->p_limit->p_cpulimit != RLIM_INFINITY &&
835 	    p->p_runtime > p->p_limit->p_cpulimit) {
836 		rlim = &p->p_rlimit[RLIMIT_CPU];
837 		if (p->p_runtime / (rlim_t)1000000 >= rlim->rlim_max) {
838 			mtx_unlock_spin(&sched_lock);
839 			PROC_LOCK(p);
840 			killproc(p, "exceeded maximum CPU limit");
841 			mtx_lock_spin(&sched_lock);
842 			PROC_UNLOCK(p);
843 		} else {
844 			mtx_unlock_spin(&sched_lock);
845 			PROC_LOCK(p);
846 			psignal(p, SIGXCPU);
847 			mtx_lock_spin(&sched_lock);
848 			PROC_UNLOCK(p);
849 			if (rlim->rlim_cur < rlim->rlim_max) {
850 				/* XXX: we should make a private copy */
851 				rlim->rlim_cur += 5;
852 			}
853 		}
854 	}
855 #endif
856 
857 	/*
858 	 * Pick a new current process and record its start time.
859 	 */
860 	cnt.v_swtch++;
861 	PCPU_SET(switchtime, new_switchtime);
862 	CTR3(KTR_PROC, "mi_switch: old thread %p (pid %d, %s)", td, p->p_pid,
863 	    p->p_comm);
864 	sched_nest = sched_lock.mtx_recurse;
865 	td->td_lastcpu = ke->ke_oncpu;
866 	ke->ke_oncpu = NOCPU;
867 	ke->ke_flags &= ~KEF_NEEDRESCHED;
868 	/*
869 	 * At the last moment: if this KSE is not on the run queue,
870 	 * it needs to be freed correctly and the thread treated accordingly.
871 	 */
872 	if ((td->td_state == TDS_RUNNING) &&
873 	    ((ke->ke_flags & KEF_IDLEKSE) == 0)) {
874 		/* Put us back on the run queue (kse and all). */
875 		setrunqueue(td);
876 	} else if ((td->td_flags & TDF_UNBOUND) &&
877 	    (td->td_state != TDS_RUNQ)) { /* in case of old code */
878 		/*
879 		 * We will not be on the run queue.
880 		 * Someone else can use the KSE if they need it.
881 		 */
882 		td->td_kse = NULL;
883 		kse_reassign(ke);
884 	}
885 	cpu_switch();
886 	td->td_kse->ke_oncpu = PCPU_GET(cpuid);
887 	td->td_kse->ke_state = KES_RUNNING;
888 	sched_lock.mtx_recurse = sched_nest;
889 	sched_lock.mtx_lock = (uintptr_t)td;
890 	CTR3(KTR_PROC, "mi_switch: new thread %p (pid %d, %s)", td, p->p_pid,
891 	    p->p_comm);
892 	if (PCPU_GET(switchtime.sec) == 0)
893 		binuptime(PCPU_PTR(switchtime));
894 	PCPU_SET(switchticks, ticks);
895 }
896 
897 /*
898  * Change process state to be runnable,
899  * placing it on the run queue if it is in memory,
900  * and awakening the swapper if it isn't in memory.
901  */
902 void
903 setrunnable(struct thread *td)
904 {
905 	struct proc *p = td->td_proc;
906 
907 	mtx_assert(&sched_lock, MA_OWNED);
908 	switch (p->p_state) {
909 	case PRS_ZOMBIE:
910 		panic("setrunnable(1)");
911 	default:
912 		break;
913 	}
914 	switch (td->td_state) {
915 	case 0:
916 	case TDS_RUNNING:
917 	case TDS_IWAIT:
918 	default:
919 		printf("state is %d", td->td_state);
920 		panic("setrunnable(2)");
921 	case TDS_SUSPENDED:
922 		thread_unsuspend(p);
923 		break;
924 	case TDS_SLP:			/* e.g. when sending signals */
925 		if (td->td_flags & TDF_CVWAITQ)
926 			cv_waitq_remove(td);
927 		else
928 			unsleep(td);
929 	case TDS_UNQUEUED:  /* being put back onto the queue */
930 	case TDS_NEW:	/* not yet had time to suspend */
931 	case TDS_RUNQ:	/* not yet had time to suspend */
932 		break;
933 	}
934 	if (td->td_ksegrp->kg_slptime > 1)
935 		updatepri(td);
936 	td->td_ksegrp->kg_slptime = 0;
937 	if ((p->p_sflag & PS_INMEM) == 0) {
938 		td->td_state = TDS_RUNQ; /* XXXKSE not a good idea */
939 		p->p_sflag |= PS_SWAPINREQ;
940 		wakeup(&proc0);
941 	} else {
942 		if (td->td_state != TDS_RUNQ)
943 			setrunqueue(td); /* XXXKSE */
944 		maybe_resched(td);
945 	}
946 }
947 
948 /*
949  * Compute the priority of a process when running in user mode.
950  * Arrange to reschedule if the resulting priority is better
951  * than that of the current process.
952  */
953 void
954 resetpriority(kg)
955 	register struct ksegrp *kg;
956 {
957 	register unsigned int newpriority;
958 	struct thread *td;
959 
960 	mtx_lock_spin(&sched_lock);
961 	if (kg->kg_pri_class == PRI_TIMESHARE) {
962 		newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
963 		    NICE_WEIGHT * (kg->kg_nice - PRIO_MIN);
964 		newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
965 		    PRI_MAX_TIMESHARE);
966 		kg->kg_user_pri = newpriority;
967 	}
968 	FOREACH_THREAD_IN_GROUP(kg, td) {
969 		maybe_resched(td);			/* XXXKSE silly */
970 	}
971 	mtx_unlock_spin(&sched_lock);
972 }
973 
974 /*
975  * Compute a tenex style load average of a quantity on
976  * 1, 5 and 15 minute intervals.
977  * XXXKSE   Needs complete rewrite when correct info is available.
978  * Completely Bogus.. only works with 1:1 (but compiles ok now :-)
979  */
980 static void
981 loadav(void *arg)
982 {
983 	int i, nrun;
984 	struct loadavg *avg;
985 	struct proc *p;
986 	struct thread *td;
987 
988 	avg = &averunnable;
989 	sx_slock(&allproc_lock);
990 	nrun = 0;
991 	FOREACH_PROC_IN_SYSTEM(p) {
992 		FOREACH_THREAD_IN_PROC(p, td) {
993 			switch (td->td_state) {
994 			case TDS_RUNQ:
995 			case TDS_RUNNING:
996 				if ((p->p_flag & P_NOLOAD) != 0)
997 					goto nextproc;
998 				nrun++; /* XXXKSE */
999 			default:
1000 				break;
1001 			}
1002 nextproc:
1003 			continue;
1004 		}
1005 	}
1006 	sx_sunlock(&allproc_lock);
1007 	for (i = 0; i < 3; i++)
1008 		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
1009 		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
1010 
1011 	/*
1012 	 * Schedule the next update to occur after 5 seconds, but add a
1013 	 * random variation to avoid synchronisation with processes that
1014 	 * run at regular intervals.
1015 	 */
1016 	callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
1017 	    loadav, NULL);
1018 }
1019 
1020 /* ARGSUSED */
1021 static void
1022 sched_setup(dummy)
1023 	void *dummy;
1024 {
1025 
1026 	callout_init(&schedcpu_callout, 1);
1027 	callout_init(&roundrobin_callout, 0);
1028 	callout_init(&loadav_callout, 0);
1029 
1030 	/* Kick off timeout driven events by calling first time. */
1031 	roundrobin(NULL);
1032 	schedcpu(NULL);
1033 	loadav(NULL);
1034 }
1035 
1036 /*
1037  * We adjust the priority of the current process.  The priority of
1038  * a process gets worse as it accumulates CPU time.  The cpu usage
1039  * estimator (p_estcpu) is increased here.  resetpriority() will
1040  * compute a different priority each time p_estcpu increases by
1041  * INVERSE_ESTCPU_WEIGHT
1042  * (until MAXPRI is reached).  The cpu usage estimator ramps up
1043  * quite quickly when the process is running (linearly), and decays
1044  * away exponentially, at a rate which is proportionally slower when
1045  * the system is busy.  The basic principle is that the system will
1046  * 90% forget that the process used a lot of CPU time in 5 * loadav
1047  * seconds.  This causes the system to favor processes which haven't
1048  * run much recently, and to round-robin among other processes.
1049  */
1050 void
1051 schedclock(td)
1052 	struct thread *td;
1053 {
1054 	struct kse *ke;
1055 	struct ksegrp *kg;
1056 
1057 	KASSERT((td != NULL), ("schedlock: null thread pointer"));
1058 	ke = td->td_kse;
1059 	kg = td->td_ksegrp;
1060 	ke->ke_cpticks++;
1061 	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
1062 	if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
1063 		resetpriority(kg);
1064 		if (td->td_priority >= PUSER)
1065 			td->td_priority = kg->kg_user_pri;
1066 	}
1067 }
1068 
1069 /*
1070  * General purpose yield system call
1071  */
1072 int
1073 yield(struct thread *td, struct yield_args *uap)
1074 {
1075 	struct ksegrp *kg = td->td_ksegrp;
1076 
1077 	mtx_assert(&Giant, MA_NOTOWNED);
1078 	mtx_lock_spin(&sched_lock);
1079 	td->td_priority = PRI_MAX_TIMESHARE;
1080 	kg->kg_proc->p_stats->p_ru.ru_nvcsw++;
1081 	mi_switch();
1082 	mtx_unlock_spin(&sched_lock);
1083 	td->td_retval[0] = 0;
1084 
1085 	return (0);
1086 }
1087 
1088