xref: /freebsd/sys/kern/kern_synch.c (revision 54ebdd631db8c0bba2baab0155f603a8b5cf014a)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include "opt_ktrace.h"
41 #include "opt_sched.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/condvar.h>
46 #include <sys/kdb.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/proc.h>
52 #include <sys/resourcevar.h>
53 #include <sys/sched.h>
54 #include <sys/signalvar.h>
55 #include <sys/sleepqueue.h>
56 #include <sys/smp.h>
57 #include <sys/sx.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysproto.h>
60 #include <sys/vmmeter.h>
61 #ifdef KTRACE
62 #include <sys/uio.h>
63 #include <sys/ktrace.h>
64 #endif
65 
66 #include <machine/cpu.h>
67 
68 #ifdef XEN
69 #include <vm/vm.h>
70 #include <vm/vm_param.h>
71 #include <vm/pmap.h>
72 #endif
73 
74 static void synch_setup(void *dummy);
75 SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup,
76     NULL);
77 
78 int	hogticks;
79 static int pause_wchan;
80 
81 static struct callout loadav_callout;
82 
83 struct loadavg averunnable =
84 	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
85 /*
86  * Constants for averages over 1, 5, and 15 minutes
87  * when sampling at 5 second intervals.
88  */
89 static fixpt_t cexp[3] = {
90 	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
91 	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
92 	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
93 };
94 
95 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
96 static int      fscale __unused = FSCALE;
97 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
98 
99 static void	loadav(void *arg);
100 
101 void
102 sleepinit(void)
103 {
104 
105 	hogticks = (hz / 10) * 2;	/* Default only. */
106 	init_sleepqueues();
107 }
108 
109 /*
110  * General sleep call.  Suspends the current thread until a wakeup is
111  * performed on the specified identifier.  The thread will then be made
112  * runnable with the specified priority.  Sleeps at most timo/hz seconds
113  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
114  * before and after sleeping, else signals are not checked.  Returns 0 if
115  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
116  * signal needs to be delivered, ERESTART is returned if the current system
117  * call should be restarted if possible, and EINTR is returned if the system
118  * call should be interrupted by the signal (return EINTR).
119  *
120  * The lock argument is unlocked before the caller is suspended, and
121  * re-locked before _sleep() returns.  If priority includes the PDROP
122  * flag the lock is not re-locked before returning.
123  */
124 int
125 _sleep(void *ident, struct lock_object *lock, int priority,
126     const char *wmesg, int timo)
127 {
128 	struct thread *td;
129 	struct proc *p;
130 	struct lock_class *class;
131 	int catch, flags, lock_state, pri, rval;
132 	WITNESS_SAVE_DECL(lock_witness);
133 
134 	td = curthread;
135 	p = td->td_proc;
136 #ifdef KTRACE
137 	if (KTRPOINT(td, KTR_CSW))
138 		ktrcsw(1, 0);
139 #endif
140 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
141 	    "Sleeping on \"%s\"", wmesg);
142 	KASSERT(timo != 0 || mtx_owned(&Giant) || lock != NULL,
143 	    ("sleeping without a lock"));
144 	KASSERT(p != NULL, ("msleep1"));
145 	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
146 	if (priority & PDROP)
147 		KASSERT(lock != NULL && lock != &Giant.lock_object,
148 		    ("PDROP requires a non-Giant lock"));
149 	if (lock != NULL)
150 		class = LOCK_CLASS(lock);
151 	else
152 		class = NULL;
153 
154 	if (cold) {
155 		/*
156 		 * During autoconfiguration, just return;
157 		 * don't run any other threads or panic below,
158 		 * in case this is the idle thread and already asleep.
159 		 * XXX: this used to do "s = splhigh(); splx(safepri);
160 		 * splx(s);" to give interrupts a chance, but there is
161 		 * no way to give interrupts a chance now.
162 		 */
163 		if (lock != NULL && priority & PDROP)
164 			class->lc_unlock(lock);
165 		return (0);
166 	}
167 	catch = priority & PCATCH;
168 	pri = priority & PRIMASK;
169 	rval = 0;
170 
171 	/*
172 	 * If we are already on a sleep queue, then remove us from that
173 	 * sleep queue first.  We have to do this to handle recursive
174 	 * sleeps.
175 	 */
176 	if (TD_ON_SLEEPQ(td))
177 		sleepq_remove(td, td->td_wchan);
178 
179 	if (ident == &pause_wchan)
180 		flags = SLEEPQ_PAUSE;
181 	else
182 		flags = SLEEPQ_SLEEP;
183 	if (catch)
184 		flags |= SLEEPQ_INTERRUPTIBLE;
185 
186 	sleepq_lock(ident);
187 	CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)",
188 	    td->td_tid, p->p_pid, td->td_name, wmesg, ident);
189 
190 	if (lock == &Giant.lock_object)
191 		mtx_assert(&Giant, MA_OWNED);
192 	DROP_GIANT();
193 	if (lock != NULL && lock != &Giant.lock_object &&
194 	    !(class->lc_flags & LC_SLEEPABLE)) {
195 		WITNESS_SAVE(lock, lock_witness);
196 		lock_state = class->lc_unlock(lock);
197 	} else
198 		/* GCC needs to follow the Yellow Brick Road */
199 		lock_state = -1;
200 
201 	/*
202 	 * We put ourselves on the sleep queue and start our timeout
203 	 * before calling thread_suspend_check, as we could stop there,
204 	 * and a wakeup or a SIGCONT (or both) could occur while we were
205 	 * stopped without resuming us.  Thus, we must be ready for sleep
206 	 * when cursig() is called.  If the wakeup happens while we're
207 	 * stopped, then td will no longer be on a sleep queue upon
208 	 * return from cursig().
209 	 */
210 	sleepq_add(ident, lock, wmesg, flags, 0);
211 	if (timo)
212 		sleepq_set_timeout(ident, timo);
213 	if (lock != NULL && class->lc_flags & LC_SLEEPABLE) {
214 		sleepq_release(ident);
215 		WITNESS_SAVE(lock, lock_witness);
216 		lock_state = class->lc_unlock(lock);
217 		sleepq_lock(ident);
218 	}
219 	if (timo && catch)
220 		rval = sleepq_timedwait_sig(ident, pri);
221 	else if (timo)
222 		rval = sleepq_timedwait(ident, pri);
223 	else if (catch)
224 		rval = sleepq_wait_sig(ident, pri);
225 	else {
226 		sleepq_wait(ident, pri);
227 		rval = 0;
228 	}
229 #ifdef KTRACE
230 	if (KTRPOINT(td, KTR_CSW))
231 		ktrcsw(0, 0);
232 #endif
233 	PICKUP_GIANT();
234 	if (lock != NULL && lock != &Giant.lock_object && !(priority & PDROP)) {
235 		class->lc_lock(lock, lock_state);
236 		WITNESS_RESTORE(lock, lock_witness);
237 	}
238 	return (rval);
239 }
240 
241 int
242 msleep_spin(void *ident, struct mtx *mtx, const char *wmesg, int timo)
243 {
244 	struct thread *td;
245 	struct proc *p;
246 	int rval;
247 	WITNESS_SAVE_DECL(mtx);
248 
249 	td = curthread;
250 	p = td->td_proc;
251 	KASSERT(mtx != NULL, ("sleeping without a mutex"));
252 	KASSERT(p != NULL, ("msleep1"));
253 	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
254 
255 	if (cold) {
256 		/*
257 		 * During autoconfiguration, just return;
258 		 * don't run any other threads or panic below,
259 		 * in case this is the idle thread and already asleep.
260 		 * XXX: this used to do "s = splhigh(); splx(safepri);
261 		 * splx(s);" to give interrupts a chance, but there is
262 		 * no way to give interrupts a chance now.
263 		 */
264 		return (0);
265 	}
266 
267 	sleepq_lock(ident);
268 	CTR5(KTR_PROC, "msleep_spin: thread %ld (pid %ld, %s) on %s (%p)",
269 	    td->td_tid, p->p_pid, td->td_name, wmesg, ident);
270 
271 	DROP_GIANT();
272 	mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
273 	WITNESS_SAVE(&mtx->lock_object, mtx);
274 	mtx_unlock_spin(mtx);
275 
276 	/*
277 	 * We put ourselves on the sleep queue and start our timeout.
278 	 */
279 	sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0);
280 	if (timo)
281 		sleepq_set_timeout(ident, timo);
282 
283 	/*
284 	 * Can't call ktrace with any spin locks held so it can lock the
285 	 * ktrace_mtx lock, and WITNESS_WARN considers it an error to hold
286 	 * any spin lock.  Thus, we have to drop the sleepq spin lock while
287 	 * we handle those requests.  This is safe since we have placed our
288 	 * thread on the sleep queue already.
289 	 */
290 #ifdef KTRACE
291 	if (KTRPOINT(td, KTR_CSW)) {
292 		sleepq_release(ident);
293 		ktrcsw(1, 0);
294 		sleepq_lock(ident);
295 	}
296 #endif
297 #ifdef WITNESS
298 	sleepq_release(ident);
299 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "Sleeping on \"%s\"",
300 	    wmesg);
301 	sleepq_lock(ident);
302 #endif
303 	if (timo)
304 		rval = sleepq_timedwait(ident, 0);
305 	else {
306 		sleepq_wait(ident, 0);
307 		rval = 0;
308 	}
309 #ifdef KTRACE
310 	if (KTRPOINT(td, KTR_CSW))
311 		ktrcsw(0, 0);
312 #endif
313 	PICKUP_GIANT();
314 	mtx_lock_spin(mtx);
315 	WITNESS_RESTORE(&mtx->lock_object, mtx);
316 	return (rval);
317 }
318 
319 /*
320  * pause() is like tsleep() except that the intention is to not be
321  * explicitly woken up by another thread.  Instead, the current thread
322  * simply wishes to sleep until the timeout expires.  It is
323  * implemented using a dummy wait channel.
324  */
325 int
326 pause(const char *wmesg, int timo)
327 {
328 
329 	KASSERT(timo != 0, ("pause: timeout required"));
330 	return (tsleep(&pause_wchan, 0, wmesg, timo));
331 }
332 
333 /*
334  * Make all threads sleeping on the specified identifier runnable.
335  */
336 void
337 wakeup(void *ident)
338 {
339 	int wakeup_swapper;
340 
341 	sleepq_lock(ident);
342 	wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0);
343 	sleepq_release(ident);
344 	if (wakeup_swapper)
345 		kick_proc0();
346 }
347 
348 /*
349  * Make a thread sleeping on the specified identifier runnable.
350  * May wake more than one thread if a target thread is currently
351  * swapped out.
352  */
353 void
354 wakeup_one(void *ident)
355 {
356 	int wakeup_swapper;
357 
358 	sleepq_lock(ident);
359 	wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP, 0, 0);
360 	sleepq_release(ident);
361 	if (wakeup_swapper)
362 		kick_proc0();
363 }
364 
365 static void
366 kdb_switch(void)
367 {
368 	thread_unlock(curthread);
369 	kdb_backtrace();
370 	kdb_reenter();
371 	panic("%s: did not reenter debugger", __func__);
372 }
373 
374 /*
375  * The machine independent parts of context switching.
376  */
377 void
378 mi_switch(int flags, struct thread *newtd)
379 {
380 	uint64_t runtime, new_switchtime;
381 	struct thread *td;
382 	struct proc *p;
383 
384 	td = curthread;			/* XXX */
385 	THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
386 	p = td->td_proc;		/* XXX */
387 	KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
388 #ifdef INVARIANTS
389 	if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td))
390 		mtx_assert(&Giant, MA_NOTOWNED);
391 #endif
392 	KASSERT(td->td_critnest == 1 || (td->td_critnest == 2 &&
393 	    (td->td_owepreempt) && (flags & SW_INVOL) != 0 &&
394 	    newtd == NULL) || panicstr,
395 	    ("mi_switch: switch in a critical section"));
396 	KASSERT((flags & (SW_INVOL | SW_VOL)) != 0,
397 	    ("mi_switch: switch must be voluntary or involuntary"));
398 	KASSERT(newtd != curthread, ("mi_switch: preempting back to ourself"));
399 
400 	/*
401 	 * Don't perform context switches from the debugger.
402 	 */
403 	if (kdb_active)
404 		kdb_switch();
405 	if (flags & SW_VOL)
406 		td->td_ru.ru_nvcsw++;
407 	else
408 		td->td_ru.ru_nivcsw++;
409 #ifdef SCHED_STATS
410 	SCHED_STAT_INC(sched_switch_stats[flags & SW_TYPE_MASK]);
411 #endif
412 	/*
413 	 * Compute the amount of time during which the current
414 	 * thread was running, and add that to its total so far.
415 	 */
416 	new_switchtime = cpu_ticks();
417 	runtime = new_switchtime - PCPU_GET(switchtime);
418 	td->td_runtime += runtime;
419 	td->td_incruntime += runtime;
420 	PCPU_SET(switchtime, new_switchtime);
421 	td->td_generation++;	/* bump preempt-detect counter */
422 	PCPU_INC(cnt.v_swtch);
423 	PCPU_SET(switchticks, ticks);
424 	CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)",
425 	    td->td_tid, td->td_sched, p->p_pid, td->td_name);
426 #if (KTR_COMPILE & KTR_SCHED) != 0
427 	if (TD_IS_IDLETHREAD(td))
428 		CTR3(KTR_SCHED, "mi_switch: %p(%s) prio %d idle",
429 		    td, td->td_name, td->td_priority);
430 	else if (newtd != NULL)
431 		CTR5(KTR_SCHED,
432 		    "mi_switch: %p(%s) prio %d preempted by %p(%s)",
433 		    td, td->td_name, td->td_priority, newtd,
434 		    newtd->td_name);
435 	else
436 		CTR6(KTR_SCHED,
437 		    "mi_switch: %p(%s) prio %d inhibit %d wmesg %s lock %s",
438 		    td, td->td_name, td->td_priority,
439 		    td->td_inhibitors, td->td_wmesg, td->td_lockname);
440 #endif
441 #ifdef XEN
442 	PT_UPDATES_FLUSH();
443 #endif
444 	sched_switch(td, newtd, flags);
445 	CTR3(KTR_SCHED, "mi_switch: running %p(%s) prio %d",
446 	    td, td->td_name, td->td_priority);
447 
448 	CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)",
449 	    td->td_tid, td->td_sched, p->p_pid, td->td_name);
450 
451 	/*
452 	 * If the last thread was exiting, finish cleaning it up.
453 	 */
454 	if ((td = PCPU_GET(deadthread))) {
455 		PCPU_SET(deadthread, NULL);
456 		thread_stash(td);
457 	}
458 }
459 
460 /*
461  * Change thread state to be runnable, placing it on the run queue if
462  * it is in memory.  If it is swapped out, return true so our caller
463  * will know to awaken the swapper.
464  */
465 int
466 setrunnable(struct thread *td)
467 {
468 
469 	THREAD_LOCK_ASSERT(td, MA_OWNED);
470 	KASSERT(td->td_proc->p_state != PRS_ZOMBIE,
471 	    ("setrunnable: pid %d is a zombie", td->td_proc->p_pid));
472 	switch (td->td_state) {
473 	case TDS_RUNNING:
474 	case TDS_RUNQ:
475 		return (0);
476 	case TDS_INHIBITED:
477 		/*
478 		 * If we are only inhibited because we are swapped out
479 		 * then arange to swap in this process. Otherwise just return.
480 		 */
481 		if (td->td_inhibitors != TDI_SWAPPED)
482 			return (0);
483 		/* FALLTHROUGH */
484 	case TDS_CAN_RUN:
485 		break;
486 	default:
487 		printf("state is 0x%x", td->td_state);
488 		panic("setrunnable(2)");
489 	}
490 	if ((td->td_flags & TDF_INMEM) == 0) {
491 		if ((td->td_flags & TDF_SWAPINREQ) == 0) {
492 			td->td_flags |= TDF_SWAPINREQ;
493 			return (1);
494 		}
495 	} else
496 		sched_wakeup(td);
497 	return (0);
498 }
499 
500 /*
501  * Compute a tenex style load average of a quantity on
502  * 1, 5 and 15 minute intervals.
503  */
504 static void
505 loadav(void *arg)
506 {
507 	int i, nrun;
508 	struct loadavg *avg;
509 
510 	nrun = sched_load();
511 	avg = &averunnable;
512 
513 	for (i = 0; i < 3; i++)
514 		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
515 		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
516 
517 	/*
518 	 * Schedule the next update to occur after 5 seconds, but add a
519 	 * random variation to avoid synchronisation with processes that
520 	 * run at regular intervals.
521 	 */
522 	callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
523 	    loadav, NULL);
524 }
525 
526 /* ARGSUSED */
527 static void
528 synch_setup(void *dummy)
529 {
530 	callout_init(&loadav_callout, CALLOUT_MPSAFE);
531 
532 	/* Kick off timeout driven events by calling first time. */
533 	loadav(NULL);
534 }
535 
536 /*
537  * General purpose yield system call.
538  */
539 int
540 yield(struct thread *td, struct yield_args *uap)
541 {
542 
543 	thread_lock(td);
544 	sched_prio(td, PRI_MAX_TIMESHARE);
545 	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
546 	thread_unlock(td);
547 	td->td_retval[0] = 0;
548 	return (0);
549 }
550