1 /*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 39 * $FreeBSD$ 40 */ 41 42 #include "opt_ddb.h" 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/condvar.h> 48 #include <sys/kernel.h> 49 #include <sys/ktr.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/proc.h> 53 #include <sys/resourcevar.h> 54 #include <sys/sched.h> 55 #include <sys/signalvar.h> 56 #include <sys/smp.h> 57 #include <sys/sx.h> 58 #include <sys/sysctl.h> 59 #include <sys/sysproto.h> 60 #include <sys/vmmeter.h> 61 #ifdef DDB 62 #include <ddb/ddb.h> 63 #endif 64 #ifdef KTRACE 65 #include <sys/uio.h> 66 #include <sys/ktrace.h> 67 #endif 68 69 #include <machine/cpu.h> 70 71 static void sched_setup(void *dummy); 72 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) 73 74 int hogticks; 75 int lbolt; 76 77 static struct callout loadav_callout; 78 static struct callout lbolt_callout; 79 80 struct loadavg averunnable = 81 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ 82 /* 83 * Constants for averages over 1, 5, and 15 minutes 84 * when sampling at 5 second intervals. 85 */ 86 static fixpt_t cexp[3] = { 87 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 88 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 89 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 90 }; 91 92 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ 93 static int fscale __unused = FSCALE; 94 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 95 96 static void endtsleep(void *); 97 static void loadav(void *arg); 98 static void lboltcb(void *arg); 99 100 /* 101 * We're only looking at 7 bits of the address; everything is 102 * aligned to 4, lots of things are aligned to greater powers 103 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 104 */ 105 #define TABLESIZE 128 106 static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE]; 107 #define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1)) 108 109 void 110 sleepinit(void) 111 { 112 int i; 113 114 hogticks = (hz / 10) * 2; /* Default only. */ 115 for (i = 0; i < TABLESIZE; i++) 116 TAILQ_INIT(&slpque[i]); 117 } 118 119 /* 120 * General sleep call. Suspends the current process until a wakeup is 121 * performed on the specified identifier. The process will then be made 122 * runnable with the specified priority. Sleeps at most timo/hz seconds 123 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 124 * before and after sleeping, else signals are not checked. Returns 0 if 125 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 126 * signal needs to be delivered, ERESTART is returned if the current system 127 * call should be restarted if possible, and EINTR is returned if the system 128 * call should be interrupted by the signal (return EINTR). 129 * 130 * The mutex argument is exited before the caller is suspended, and 131 * entered before msleep returns. If priority includes the PDROP 132 * flag the mutex is not entered before returning. 133 */ 134 135 int 136 msleep(ident, mtx, priority, wmesg, timo) 137 void *ident; 138 struct mtx *mtx; 139 int priority, timo; 140 const char *wmesg; 141 { 142 struct thread *td = curthread; 143 struct proc *p = td->td_proc; 144 int sig, catch = priority & PCATCH; 145 int rval = 0; 146 WITNESS_SAVE_DECL(mtx); 147 148 #ifdef KTRACE 149 if (KTRPOINT(td, KTR_CSW)) 150 ktrcsw(1, 0); 151 #endif 152 WITNESS_SLEEP(0, &mtx->mtx_object); 153 KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL, 154 ("sleeping without a mutex")); 155 /* 156 * If we are capable of async syscalls and there isn't already 157 * another one ready to return, start a new thread 158 * and queue it as ready to run. Note that there is danger here 159 * because we need to make sure that we don't sleep allocating 160 * the thread (recursion here might be bad). 161 * Hence the TDF_INMSLEEP flag. 162 */ 163 if (p->p_flag & P_THREADED) { 164 /* 165 * Just don't bother if we are exiting 166 * and not the exiting thread or thread was marked as 167 * interrupted. 168 */ 169 if (catch && 170 (((p->p_flag & P_WEXIT) && (p->p_singlethread != td)) || 171 (td->td_flags & TDF_INTERRUPT))) { 172 td->td_flags &= ~TDF_INTERRUPT; 173 return (EINTR); 174 } 175 } 176 mtx_lock_spin(&sched_lock); 177 if (cold ) { 178 /* 179 * During autoconfiguration, just give interrupts 180 * a chance, then just return. 181 * Don't run any other procs or panic below, 182 * in case this is the idle process and already asleep. 183 */ 184 if (mtx != NULL && priority & PDROP) 185 mtx_unlock(mtx); 186 mtx_unlock_spin(&sched_lock); 187 return (0); 188 } 189 190 DROP_GIANT(); 191 192 if (mtx != NULL) { 193 mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED); 194 WITNESS_SAVE(&mtx->mtx_object, mtx); 195 mtx_unlock(mtx); 196 if (priority & PDROP) 197 mtx = NULL; 198 } 199 200 KASSERT(p != NULL, ("msleep1")); 201 KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep")); 202 203 CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)", 204 td, p->p_pid, p->p_comm, wmesg, ident); 205 206 td->td_wchan = ident; 207 td->td_wmesg = wmesg; 208 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq); 209 TD_SET_ON_SLEEPQ(td); 210 if (timo) 211 callout_reset(&td->td_slpcallout, timo, endtsleep, td); 212 /* 213 * We put ourselves on the sleep queue and start our timeout 214 * before calling thread_suspend_check, as we could stop there, and 215 * a wakeup or a SIGCONT (or both) could occur while we were stopped. 216 * without resuming us, thus we must be ready for sleep 217 * when cursig is called. If the wakeup happens while we're 218 * stopped, td->td_wchan will be 0 upon return from cursig. 219 */ 220 if (catch) { 221 CTR3(KTR_PROC, "msleep caught: thread %p (pid %d, %s)", td, 222 p->p_pid, p->p_comm); 223 td->td_flags |= TDF_SINTR; 224 mtx_unlock_spin(&sched_lock); 225 PROC_LOCK(p); 226 sig = cursig(td); 227 if (sig == 0 && thread_suspend_check(1)) 228 sig = SIGSTOP; 229 mtx_lock_spin(&sched_lock); 230 PROC_UNLOCK(p); 231 if (sig != 0) { 232 if (TD_ON_SLEEPQ(td)) 233 unsleep(td); 234 } else if (!TD_ON_SLEEPQ(td)) 235 catch = 0; 236 } else 237 sig = 0; 238 239 /* 240 * Let the scheduler know we're about to voluntarily go to sleep. 241 */ 242 sched_sleep(td, priority & PRIMASK); 243 244 if (TD_ON_SLEEPQ(td)) { 245 p->p_stats->p_ru.ru_nvcsw++; 246 TD_SET_SLEEPING(td); 247 mi_switch(); 248 } 249 /* 250 * We're awake from voluntary sleep. 251 */ 252 CTR3(KTR_PROC, "msleep resume: thread %p (pid %d, %s)", td, p->p_pid, 253 p->p_comm); 254 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING")); 255 td->td_flags &= ~TDF_SINTR; 256 if (td->td_flags & TDF_TIMEOUT) { 257 td->td_flags &= ~TDF_TIMEOUT; 258 if (sig == 0) 259 rval = EWOULDBLOCK; 260 } else if (td->td_flags & TDF_TIMOFAIL) { 261 td->td_flags &= ~TDF_TIMOFAIL; 262 } else if (timo && callout_stop(&td->td_slpcallout) == 0) { 263 /* 264 * This isn't supposed to be pretty. If we are here, then 265 * the endtsleep() callout is currently executing on another 266 * CPU and is either spinning on the sched_lock or will be 267 * soon. If we don't synchronize here, there is a chance 268 * that this process may msleep() again before the callout 269 * has a chance to run and the callout may end up waking up 270 * the wrong msleep(). Yuck. 271 */ 272 TD_SET_SLEEPING(td); 273 p->p_stats->p_ru.ru_nivcsw++; 274 mi_switch(); 275 td->td_flags &= ~TDF_TIMOFAIL; 276 } 277 if ((td->td_flags & TDF_INTERRUPT) && (priority & PCATCH) && 278 (rval == 0)) { 279 td->td_flags &= ~TDF_INTERRUPT; 280 rval = EINTR; 281 } 282 mtx_unlock_spin(&sched_lock); 283 284 if (rval == 0 && catch) { 285 PROC_LOCK(p); 286 /* XXX: shouldn't we always be calling cursig() */ 287 if (sig != 0 || (sig = cursig(td))) { 288 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 289 rval = EINTR; 290 else 291 rval = ERESTART; 292 } 293 PROC_UNLOCK(p); 294 } 295 #ifdef KTRACE 296 if (KTRPOINT(td, KTR_CSW)) 297 ktrcsw(0, 0); 298 #endif 299 PICKUP_GIANT(); 300 if (mtx != NULL) { 301 mtx_lock(mtx); 302 WITNESS_RESTORE(&mtx->mtx_object, mtx); 303 } 304 return (rval); 305 } 306 307 /* 308 * Implement timeout for msleep() 309 * 310 * If process hasn't been awakened (wchan non-zero), 311 * set timeout flag and undo the sleep. If proc 312 * is stopped, just unsleep so it will remain stopped. 313 * MP-safe, called without the Giant mutex. 314 */ 315 static void 316 endtsleep(arg) 317 void *arg; 318 { 319 register struct thread *td = arg; 320 321 CTR3(KTR_PROC, "endtsleep: thread %p (pid %d, %s)", 322 td, td->td_proc->p_pid, td->td_proc->p_comm); 323 mtx_lock_spin(&sched_lock); 324 /* 325 * This is the other half of the synchronization with msleep() 326 * described above. If the TDS_TIMEOUT flag is set, we lost the 327 * race and just need to put the process back on the runqueue. 328 */ 329 if (TD_ON_SLEEPQ(td)) { 330 TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq); 331 TD_CLR_ON_SLEEPQ(td); 332 td->td_flags |= TDF_TIMEOUT; 333 td->td_wmesg = NULL; 334 } else { 335 td->td_flags |= TDF_TIMOFAIL; 336 } 337 TD_CLR_SLEEPING(td); 338 setrunnable(td); 339 mtx_unlock_spin(&sched_lock); 340 } 341 342 /* 343 * Abort a thread, as if an interrupt had occured. Only abort 344 * interruptable waits (unfortunatly it isn't only safe to abort others). 345 * This is about identical to cv_abort(). 346 * Think about merging them? 347 * Also, whatever the signal code does... 348 */ 349 void 350 abortsleep(struct thread *td) 351 { 352 353 mtx_assert(&sched_lock, MA_OWNED); 354 /* 355 * If the TDF_TIMEOUT flag is set, just leave. A 356 * timeout is scheduled anyhow. 357 */ 358 if ((td->td_flags & (TDF_TIMEOUT | TDF_SINTR)) == TDF_SINTR) { 359 if (TD_ON_SLEEPQ(td)) { 360 unsleep(td); 361 TD_CLR_SLEEPING(td); 362 setrunnable(td); 363 } 364 } 365 } 366 367 /* 368 * Remove a process from its wait queue 369 */ 370 void 371 unsleep(struct thread *td) 372 { 373 374 mtx_lock_spin(&sched_lock); 375 if (TD_ON_SLEEPQ(td)) { 376 TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq); 377 TD_CLR_ON_SLEEPQ(td); 378 td->td_wmesg = NULL; 379 } 380 mtx_unlock_spin(&sched_lock); 381 } 382 383 /* 384 * Make all processes sleeping on the specified identifier runnable. 385 */ 386 void 387 wakeup(ident) 388 register void *ident; 389 { 390 register struct slpquehead *qp; 391 register struct thread *td; 392 struct thread *ntd; 393 struct proc *p; 394 395 mtx_lock_spin(&sched_lock); 396 qp = &slpque[LOOKUP(ident)]; 397 restart: 398 for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) { 399 ntd = TAILQ_NEXT(td, td_slpq); 400 if (td->td_wchan == ident) { 401 unsleep(td); 402 TD_CLR_SLEEPING(td); 403 setrunnable(td); 404 p = td->td_proc; 405 CTR3(KTR_PROC,"wakeup: thread %p (pid %d, %s)", 406 td, p->p_pid, p->p_comm); 407 goto restart; 408 } 409 } 410 mtx_unlock_spin(&sched_lock); 411 } 412 413 /* 414 * Make a process sleeping on the specified identifier runnable. 415 * May wake more than one process if a target process is currently 416 * swapped out. 417 */ 418 void 419 wakeup_one(ident) 420 register void *ident; 421 { 422 register struct slpquehead *qp; 423 register struct thread *td; 424 register struct proc *p; 425 struct thread *ntd; 426 427 mtx_lock_spin(&sched_lock); 428 qp = &slpque[LOOKUP(ident)]; 429 for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) { 430 ntd = TAILQ_NEXT(td, td_slpq); 431 if (td->td_wchan == ident) { 432 unsleep(td); 433 TD_CLR_SLEEPING(td); 434 setrunnable(td); 435 p = td->td_proc; 436 CTR3(KTR_PROC,"wakeup1: thread %p (pid %d, %s)", 437 td, p->p_pid, p->p_comm); 438 break; 439 } 440 } 441 mtx_unlock_spin(&sched_lock); 442 } 443 444 /* 445 * The machine independent parts of mi_switch(). 446 */ 447 void 448 mi_switch(void) 449 { 450 struct bintime new_switchtime; 451 struct thread *td = curthread; /* XXX */ 452 struct proc *p = td->td_proc; /* XXX */ 453 u_int sched_nest; 454 455 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 456 457 KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code")); 458 #ifdef INVARIANTS 459 if (!TD_ON_LOCK(td) && 460 !TD_ON_RUNQ(td) && 461 !TD_IS_RUNNING(td)) 462 mtx_assert(&Giant, MA_NOTOWNED); 463 #endif 464 KASSERT(td->td_critnest == 1, 465 ("mi_switch: switch in a critical section")); 466 467 /* 468 * Compute the amount of time during which the current 469 * process was running, and add that to its total so far. 470 */ 471 binuptime(&new_switchtime); 472 bintime_add(&p->p_runtime, &new_switchtime); 473 bintime_sub(&p->p_runtime, PCPU_PTR(switchtime)); 474 475 #ifdef DDB 476 /* 477 * Don't perform context switches from the debugger. 478 */ 479 if (db_active) { 480 mtx_unlock_spin(&sched_lock); 481 db_print_backtrace(); 482 db_error("Context switches not allowed in the debugger."); 483 } 484 #endif 485 486 /* 487 * Check if the process exceeds its cpu resource allocation. If 488 * over max, arrange to kill the process in ast(). 489 */ 490 if (p->p_cpulimit != RLIM_INFINITY && 491 p->p_runtime.sec > p->p_cpulimit) { 492 p->p_sflag |= PS_XCPU; 493 td->td_flags |= TDF_ASTPENDING; 494 } 495 496 /* 497 * Finish up stats for outgoing thread. 498 */ 499 cnt.v_swtch++; 500 PCPU_SET(switchtime, new_switchtime); 501 CTR3(KTR_PROC, "mi_switch: old thread %p (pid %d, %s)", td, p->p_pid, 502 p->p_comm); 503 504 sched_nest = sched_lock.mtx_recurse; 505 sched_switchout(td); 506 507 cpu_switch(); /* SHAZAM!!*/ 508 509 sched_lock.mtx_recurse = sched_nest; 510 sched_lock.mtx_lock = (uintptr_t)td; 511 sched_switchin(td); 512 513 /* 514 * Start setting up stats etc. for the incoming thread. 515 * Similar code in fork_exit() is returned to by cpu_switch() 516 * in the case of a new thread/process. 517 */ 518 CTR3(KTR_PROC, "mi_switch: new thread %p (pid %d, %s)", td, p->p_pid, 519 p->p_comm); 520 if (PCPU_GET(switchtime.sec) == 0) 521 binuptime(PCPU_PTR(switchtime)); 522 PCPU_SET(switchticks, ticks); 523 524 /* 525 * Call the switchin function while still holding the scheduler lock 526 * (used by the idlezero code and the general page-zeroing code) 527 */ 528 if (td->td_switchin) 529 td->td_switchin(); 530 531 /* 532 * If the last thread was exiting, finish cleaning it up. 533 */ 534 if ((td = PCPU_GET(deadthread))) { 535 PCPU_SET(deadthread, NULL); 536 thread_stash(td); 537 } 538 } 539 540 /* 541 * Change process state to be runnable, 542 * placing it on the run queue if it is in memory, 543 * and awakening the swapper if it isn't in memory. 544 */ 545 void 546 setrunnable(struct thread *td) 547 { 548 struct proc *p = td->td_proc; 549 550 mtx_assert(&sched_lock, MA_OWNED); 551 switch (p->p_state) { 552 case PRS_ZOMBIE: 553 panic("setrunnable(1)"); 554 default: 555 break; 556 } 557 switch (td->td_state) { 558 case TDS_RUNNING: 559 case TDS_RUNQ: 560 return; 561 case TDS_INHIBITED: 562 /* 563 * If we are only inhibited because we are swapped out 564 * then arange to swap in this process. Otherwise just return. 565 */ 566 if (td->td_inhibitors != TDI_SWAPPED) 567 return; 568 case TDS_CAN_RUN: 569 break; 570 default: 571 printf("state is 0x%x", td->td_state); 572 panic("setrunnable(2)"); 573 } 574 if ((p->p_sflag & PS_INMEM) == 0) { 575 if ((p->p_sflag & PS_SWAPPINGIN) == 0) { 576 p->p_sflag |= PS_SWAPINREQ; 577 wakeup(&proc0); 578 } 579 } else 580 sched_wakeup(td); 581 } 582 583 /* 584 * Compute a tenex style load average of a quantity on 585 * 1, 5 and 15 minute intervals. 586 * XXXKSE Needs complete rewrite when correct info is available. 587 * Completely Bogus.. only works with 1:1 (but compiles ok now :-) 588 */ 589 static void 590 loadav(void *arg) 591 { 592 int i, nrun; 593 struct loadavg *avg; 594 struct proc *p; 595 struct thread *td; 596 597 avg = &averunnable; 598 sx_slock(&allproc_lock); 599 nrun = 0; 600 FOREACH_PROC_IN_SYSTEM(p) { 601 FOREACH_THREAD_IN_PROC(p, td) { 602 switch (td->td_state) { 603 case TDS_RUNQ: 604 case TDS_RUNNING: 605 if ((p->p_flag & P_NOLOAD) != 0) 606 goto nextproc; 607 nrun++; /* XXXKSE */ 608 default: 609 break; 610 } 611 nextproc: 612 continue; 613 } 614 } 615 sx_sunlock(&allproc_lock); 616 for (i = 0; i < 3; i++) 617 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 618 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 619 620 /* 621 * Schedule the next update to occur after 5 seconds, but add a 622 * random variation to avoid synchronisation with processes that 623 * run at regular intervals. 624 */ 625 callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)), 626 loadav, NULL); 627 } 628 629 static void 630 lboltcb(void *arg) 631 { 632 wakeup(&lbolt); 633 callout_reset(&lbolt_callout, hz, lboltcb, NULL); 634 } 635 636 /* ARGSUSED */ 637 static void 638 sched_setup(dummy) 639 void *dummy; 640 { 641 callout_init(&loadav_callout, 0); 642 callout_init(&lbolt_callout, 1); 643 644 /* Kick off timeout driven events by calling first time. */ 645 loadav(NULL); 646 lboltcb(NULL); 647 } 648 649 /* 650 * General purpose yield system call 651 */ 652 int 653 yield(struct thread *td, struct yield_args *uap) 654 { 655 struct ksegrp *kg = td->td_ksegrp; 656 657 mtx_assert(&Giant, MA_NOTOWNED); 658 mtx_lock_spin(&sched_lock); 659 kg->kg_proc->p_stats->p_ru.ru_nvcsw++; 660 sched_prio(td, PRI_MAX_TIMESHARE); 661 mi_switch(); 662 mtx_unlock_spin(&sched_lock); 663 td->td_retval[0] = 0; 664 665 return (0); 666 } 667 668