1 /*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 39 * $FreeBSD$ 40 */ 41 42 #include "opt_ktrace.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/condvar.h> 47 #include <sys/kernel.h> 48 #include <sys/ktr.h> 49 #include <sys/lock.h> 50 #include <sys/mutex.h> 51 #include <sys/proc.h> 52 #include <sys/resourcevar.h> 53 #include <sys/signalvar.h> 54 #include <sys/smp.h> 55 #include <sys/sx.h> 56 #include <sys/sysctl.h> 57 #include <sys/sysproto.h> 58 #include <sys/vmmeter.h> 59 #include <vm/vm.h> 60 #include <vm/vm_extern.h> 61 #ifdef KTRACE 62 #include <sys/uio.h> 63 #include <sys/ktrace.h> 64 #endif 65 66 #include <machine/cpu.h> 67 68 static void sched_setup __P((void *dummy)); 69 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) 70 71 int hogticks; 72 int lbolt; 73 int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 74 75 static struct callout schedcpu_callout; 76 static struct callout roundrobin_callout; 77 78 static void endtsleep __P((void *)); 79 static void roundrobin __P((void *arg)); 80 static void schedcpu __P((void *arg)); 81 82 static int 83 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 84 { 85 int error, new_val; 86 87 new_val = sched_quantum * tick; 88 error = sysctl_handle_int(oidp, &new_val, 0, req); 89 if (error != 0 || req->newptr == NULL) 90 return (error); 91 if (new_val < tick) 92 return (EINVAL); 93 sched_quantum = new_val / tick; 94 hogticks = 2 * sched_quantum; 95 return (0); 96 } 97 98 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 99 0, sizeof sched_quantum, sysctl_kern_quantum, "I", ""); 100 101 /* 102 * Arrange to reschedule if necessary, taking the priorities and 103 * schedulers into account. 104 */ 105 void 106 maybe_resched(p) 107 struct proc *p; 108 { 109 110 mtx_assert(&sched_lock, MA_OWNED); 111 if (p->p_pri.pri_level < curproc->p_pri.pri_level) 112 need_resched(curproc); 113 } 114 115 int 116 roundrobin_interval(void) 117 { 118 return (sched_quantum); 119 } 120 121 /* 122 * Force switch among equal priority processes every 100ms. 123 * We don't actually need to force a context switch of the current process. 124 * The act of firing the event triggers a context switch to softclock() and 125 * then switching back out again which is equivalent to a preemption, thus 126 * no further work is needed on the local CPU. 127 */ 128 /* ARGSUSED */ 129 static void 130 roundrobin(arg) 131 void *arg; 132 { 133 134 #ifdef SMP 135 mtx_lock_spin(&sched_lock); 136 forward_roundrobin(); 137 mtx_unlock_spin(&sched_lock); 138 #endif 139 140 callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL); 141 } 142 143 /* 144 * Constants for digital decay and forget: 145 * 90% of (p_estcpu) usage in 5 * loadav time 146 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 147 * Note that, as ps(1) mentions, this can let percentages 148 * total over 100% (I've seen 137.9% for 3 processes). 149 * 150 * Note that schedclock() updates p_estcpu and p_cpticks asynchronously. 151 * 152 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. 153 * That is, the system wants to compute a value of decay such 154 * that the following for loop: 155 * for (i = 0; i < (5 * loadavg); i++) 156 * p_estcpu *= decay; 157 * will compute 158 * p_estcpu *= 0.1; 159 * for all values of loadavg: 160 * 161 * Mathematically this loop can be expressed by saying: 162 * decay ** (5 * loadavg) ~= .1 163 * 164 * The system computes decay as: 165 * decay = (2 * loadavg) / (2 * loadavg + 1) 166 * 167 * We wish to prove that the system's computation of decay 168 * will always fulfill the equation: 169 * decay ** (5 * loadavg) ~= .1 170 * 171 * If we compute b as: 172 * b = 2 * loadavg 173 * then 174 * decay = b / (b + 1) 175 * 176 * We now need to prove two things: 177 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 178 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 179 * 180 * Facts: 181 * For x close to zero, exp(x) =~ 1 + x, since 182 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 183 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 184 * For x close to zero, ln(1+x) =~ x, since 185 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 186 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 187 * ln(.1) =~ -2.30 188 * 189 * Proof of (1): 190 * Solve (factor)**(power) =~ .1 given power (5*loadav): 191 * solving for factor, 192 * ln(factor) =~ (-2.30/5*loadav), or 193 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 194 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 195 * 196 * Proof of (2): 197 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 198 * solving for power, 199 * power*ln(b/(b+1)) =~ -2.30, or 200 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 201 * 202 * Actual power values for the implemented algorithm are as follows: 203 * loadav: 1 2 3 4 204 * power: 5.68 10.32 14.94 19.55 205 */ 206 207 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 208 #define loadfactor(loadav) (2 * (loadav)) 209 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 210 211 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 212 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 213 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 214 215 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ 216 static int fscale __unused = FSCALE; 217 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 218 219 /* 220 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 221 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 222 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 223 * 224 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 225 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 226 * 227 * If you don't want to bother with the faster/more-accurate formula, you 228 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 229 * (more general) method of calculating the %age of CPU used by a process. 230 */ 231 #define CCPU_SHIFT 11 232 233 /* 234 * Recompute process priorities, every hz ticks. 235 * MP-safe, called without the Giant mutex. 236 */ 237 /* ARGSUSED */ 238 static void 239 schedcpu(arg) 240 void *arg; 241 { 242 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 243 register struct proc *p; 244 register int realstathz; 245 246 realstathz = stathz ? stathz : hz; 247 sx_slock(&allproc_lock); 248 LIST_FOREACH(p, &allproc, p_list) { 249 /* 250 * Increment time in/out of memory and sleep time 251 * (if sleeping). We ignore overflow; with 16-bit int's 252 * (remember them?) overflow takes 45 days. 253 */ 254 mtx_lock_spin(&sched_lock); 255 p->p_swtime++; 256 if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 257 p->p_slptime++; 258 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 259 /* 260 * If the process has slept the entire second, 261 * stop recalculating its priority until it wakes up. 262 */ 263 if (p->p_slptime > 1) { 264 mtx_unlock_spin(&sched_lock); 265 continue; 266 } 267 268 /* 269 * p_pctcpu is only for ps. 270 */ 271 #if (FSHIFT >= CCPU_SHIFT) 272 p->p_pctcpu += (realstathz == 100)? 273 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 274 100 * (((fixpt_t) p->p_cpticks) 275 << (FSHIFT - CCPU_SHIFT)) / realstathz; 276 #else 277 p->p_pctcpu += ((FSCALE - ccpu) * 278 (p->p_cpticks * FSCALE / realstathz)) >> FSHIFT; 279 #endif 280 p->p_cpticks = 0; 281 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu); 282 resetpriority(p); 283 if (p->p_pri.pri_level >= PUSER) { 284 if (p->p_oncpu == NOCPU && /* idle */ 285 p->p_stat == SRUN && 286 (p->p_sflag & PS_INMEM) && 287 (p->p_pri.pri_level / RQ_PPQ) != 288 (p->p_pri.pri_user / RQ_PPQ)) { 289 remrunqueue(p); 290 p->p_pri.pri_level = p->p_pri.pri_user; 291 setrunqueue(p); 292 } else 293 p->p_pri.pri_level = p->p_pri.pri_user; 294 } 295 mtx_unlock_spin(&sched_lock); 296 } 297 sx_sunlock(&allproc_lock); 298 vmmeter(); 299 wakeup((caddr_t)&lbolt); 300 callout_reset(&schedcpu_callout, hz, schedcpu, NULL); 301 } 302 303 /* 304 * Recalculate the priority of a process after it has slept for a while. 305 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 306 * least six times the loadfactor will decay p_estcpu to zero. 307 */ 308 void 309 updatepri(p) 310 register struct proc *p; 311 { 312 register unsigned int newcpu = p->p_estcpu; 313 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 314 315 if (p->p_slptime > 5 * loadfac) 316 p->p_estcpu = 0; 317 else { 318 p->p_slptime--; /* the first time was done in schedcpu */ 319 while (newcpu && --p->p_slptime) 320 newcpu = decay_cpu(loadfac, newcpu); 321 p->p_estcpu = newcpu; 322 } 323 resetpriority(p); 324 } 325 326 /* 327 * We're only looking at 7 bits of the address; everything is 328 * aligned to 4, lots of things are aligned to greater powers 329 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 330 */ 331 #define TABLESIZE 128 332 static TAILQ_HEAD(slpquehead, proc) slpque[TABLESIZE]; 333 #define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1)) 334 335 void 336 sleepinit(void) 337 { 338 int i; 339 340 sched_quantum = hz/10; 341 hogticks = 2 * sched_quantum; 342 for (i = 0; i < TABLESIZE; i++) 343 TAILQ_INIT(&slpque[i]); 344 } 345 346 /* 347 * General sleep call. Suspends the current process until a wakeup is 348 * performed on the specified identifier. The process will then be made 349 * runnable with the specified priority. Sleeps at most timo/hz seconds 350 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 351 * before and after sleeping, else signals are not checked. Returns 0 if 352 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 353 * signal needs to be delivered, ERESTART is returned if the current system 354 * call should be restarted if possible, and EINTR is returned if the system 355 * call should be interrupted by the signal (return EINTR). 356 * 357 * The mutex argument is exited before the caller is suspended, and 358 * entered before msleep returns. If priority includes the PDROP 359 * flag the mutex is not entered before returning. 360 */ 361 int 362 msleep(ident, mtx, priority, wmesg, timo) 363 void *ident; 364 struct mtx *mtx; 365 int priority, timo; 366 const char *wmesg; 367 { 368 struct proc *p = curproc; 369 int sig, catch = priority & PCATCH; 370 int rval = 0; 371 WITNESS_SAVE_DECL(mtx); 372 373 #ifdef KTRACE 374 if (p && KTRPOINT(p, KTR_CSW)) 375 ktrcsw(p->p_tracep, 1, 0); 376 #endif 377 WITNESS_SLEEP(0, &mtx->mtx_object); 378 KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL, 379 ("sleeping without a mutex")); 380 mtx_lock_spin(&sched_lock); 381 if (cold || panicstr) { 382 /* 383 * After a panic, or during autoconfiguration, 384 * just give interrupts a chance, then just return; 385 * don't run any other procs or panic below, 386 * in case this is the idle process and already asleep. 387 */ 388 if (mtx != NULL && priority & PDROP) 389 mtx_unlock_flags(mtx, MTX_NOSWITCH); 390 mtx_unlock_spin(&sched_lock); 391 return (0); 392 } 393 394 DROP_GIANT_NOSWITCH(); 395 396 if (mtx != NULL) { 397 mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED); 398 WITNESS_SAVE(&mtx->mtx_object, mtx); 399 mtx_unlock_flags(mtx, MTX_NOSWITCH); 400 if (priority & PDROP) 401 mtx = NULL; 402 } 403 404 KASSERT(p != NULL, ("msleep1")); 405 KASSERT(ident != NULL && p->p_stat == SRUN, ("msleep")); 406 /* 407 * Process may be sitting on a slpque if asleep() was called, remove 408 * it before re-adding. 409 */ 410 if (p->p_wchan != NULL) 411 unsleep(p); 412 413 p->p_wchan = ident; 414 p->p_wmesg = wmesg; 415 p->p_slptime = 0; 416 p->p_pri.pri_level = priority & PRIMASK; 417 CTR5(KTR_PROC, "msleep: proc %p (pid %d, %s) on %s (%p)", p, p->p_pid, 418 p->p_comm, wmesg, ident); 419 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_slpq); 420 if (timo) 421 callout_reset(&p->p_slpcallout, timo, endtsleep, p); 422 /* 423 * We put ourselves on the sleep queue and start our timeout 424 * before calling CURSIG, as we could stop there, and a wakeup 425 * or a SIGCONT (or both) could occur while we were stopped. 426 * A SIGCONT would cause us to be marked as SSLEEP 427 * without resuming us, thus we must be ready for sleep 428 * when CURSIG is called. If the wakeup happens while we're 429 * stopped, p->p_wchan will be 0 upon return from CURSIG. 430 */ 431 if (catch) { 432 CTR3(KTR_PROC, "msleep caught: proc %p (pid %d, %s)", p, 433 p->p_pid, p->p_comm); 434 p->p_sflag |= PS_SINTR; 435 mtx_unlock_spin(&sched_lock); 436 PROC_LOCK(p); 437 sig = CURSIG(p); 438 mtx_lock_spin(&sched_lock); 439 PROC_UNLOCK_NOSWITCH(p); 440 if (sig != 0) { 441 if (p->p_wchan) 442 unsleep(p); 443 } else if (p->p_wchan == NULL) 444 catch = 0; 445 } else 446 sig = 0; 447 if (p->p_wchan != NULL) { 448 p->p_stat = SSLEEP; 449 p->p_stats->p_ru.ru_nvcsw++; 450 mi_switch(); 451 } 452 CTR3(KTR_PROC, "msleep resume: proc %p (pid %d, %s)", p, p->p_pid, 453 p->p_comm); 454 KASSERT(p->p_stat == SRUN, ("running but not SRUN")); 455 p->p_sflag &= ~PS_SINTR; 456 if (p->p_sflag & PS_TIMEOUT) { 457 p->p_sflag &= ~PS_TIMEOUT; 458 if (sig == 0) 459 rval = EWOULDBLOCK; 460 } else if (timo) 461 callout_stop(&p->p_slpcallout); 462 mtx_unlock_spin(&sched_lock); 463 464 if (rval == 0 && catch) { 465 PROC_LOCK(p); 466 /* XXX: shouldn't we always be calling CURSIG() */ 467 if (sig != 0 || (sig = CURSIG(p))) { 468 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 469 rval = EINTR; 470 else 471 rval = ERESTART; 472 } 473 PROC_UNLOCK(p); 474 } 475 PICKUP_GIANT(); 476 #ifdef KTRACE 477 mtx_lock(&Giant); 478 if (KTRPOINT(p, KTR_CSW)) 479 ktrcsw(p->p_tracep, 0, 0); 480 mtx_unlock(&Giant); 481 #endif 482 if (mtx != NULL) { 483 mtx_lock(mtx); 484 WITNESS_RESTORE(&mtx->mtx_object, mtx); 485 } 486 return (rval); 487 } 488 489 /* 490 * asleep() - async sleep call. Place process on wait queue and return 491 * immediately without blocking. The process stays runnable until mawait() 492 * is called. If ident is NULL, remove process from wait queue if it is still 493 * on one. 494 * 495 * Only the most recent sleep condition is effective when making successive 496 * calls to asleep() or when calling msleep(). 497 * 498 * The timeout, if any, is not initiated until mawait() is called. The sleep 499 * priority, signal, and timeout is specified in the asleep() call but may be 500 * overriden in the mawait() call. 501 * 502 * <<<<<<<< EXPERIMENTAL, UNTESTED >>>>>>>>>> 503 */ 504 505 int 506 asleep(void *ident, int priority, const char *wmesg, int timo) 507 { 508 struct proc *p = curproc; 509 510 /* 511 * Remove preexisting wait condition (if any) and place process 512 * on appropriate slpque, but do not put process to sleep. 513 */ 514 515 mtx_lock_spin(&sched_lock); 516 517 if (p->p_wchan != NULL) 518 unsleep(p); 519 520 if (ident) { 521 p->p_wchan = ident; 522 p->p_wmesg = wmesg; 523 p->p_slptime = 0; 524 p->p_asleep.as_priority = priority; 525 p->p_asleep.as_timo = timo; 526 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_slpq); 527 } 528 529 mtx_unlock_spin(&sched_lock); 530 531 return(0); 532 } 533 534 /* 535 * mawait() - wait for async condition to occur. The process blocks until 536 * wakeup() is called on the most recent asleep() address. If wakeup is called 537 * prior to mawait(), mawait() winds up being a NOP. 538 * 539 * If mawait() is called more then once (without an intervening asleep() call), 540 * mawait() is still effectively a NOP but it calls mi_switch() to give other 541 * processes some cpu before returning. The process is left runnable. 542 * 543 * <<<<<<<< EXPERIMENTAL, UNTESTED >>>>>>>>>> 544 */ 545 546 int 547 mawait(struct mtx *mtx, int priority, int timo) 548 { 549 struct proc *p = curproc; 550 int rval = 0; 551 WITNESS_SAVE_DECL(mtx); 552 553 WITNESS_SLEEP(0, &mtx->mtx_object); 554 KASSERT(timo > 0 || mtx_owned(&Giant) || mtx != NULL, 555 ("sleeping without a mutex")); 556 mtx_lock_spin(&sched_lock); 557 DROP_GIANT_NOSWITCH(); 558 if (mtx != NULL) { 559 mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED); 560 WITNESS_SAVE(&mtx->mtx_object, mtx); 561 mtx_unlock_flags(mtx, MTX_NOSWITCH); 562 if (priority & PDROP) 563 mtx = NULL; 564 } 565 566 if (p->p_wchan != NULL) { 567 int sig; 568 int catch; 569 570 #ifdef KTRACE 571 if (p && KTRPOINT(p, KTR_CSW)) 572 ktrcsw(p->p_tracep, 1, 0); 573 #endif 574 /* 575 * The call to mawait() can override defaults specified in 576 * the original asleep(). 577 */ 578 if (priority < 0) 579 priority = p->p_asleep.as_priority; 580 if (timo < 0) 581 timo = p->p_asleep.as_timo; 582 583 /* 584 * Install timeout 585 */ 586 587 if (timo) 588 callout_reset(&p->p_slpcallout, timo, endtsleep, p); 589 590 sig = 0; 591 catch = priority & PCATCH; 592 593 if (catch) { 594 p->p_sflag |= PS_SINTR; 595 mtx_unlock_spin(&sched_lock); 596 PROC_LOCK(p); 597 sig = CURSIG(p); 598 mtx_lock_spin(&sched_lock); 599 PROC_UNLOCK_NOSWITCH(p); 600 if (sig != 0) { 601 if (p->p_wchan) 602 unsleep(p); 603 } else if (p->p_wchan == NULL) 604 catch = 0; 605 } 606 if (p->p_wchan != NULL) { 607 p->p_stat = SSLEEP; 608 p->p_stats->p_ru.ru_nvcsw++; 609 mi_switch(); 610 } 611 KASSERT(p->p_stat == SRUN, ("running but not SRUN")); 612 p->p_sflag &= ~PS_SINTR; 613 if (p->p_sflag & PS_TIMEOUT) { 614 p->p_sflag &= ~PS_TIMEOUT; 615 if (sig == 0) 616 rval = EWOULDBLOCK; 617 } else if (timo) 618 callout_stop(&p->p_slpcallout); 619 mtx_unlock_spin(&sched_lock); 620 if (rval == 0 && catch) { 621 PROC_LOCK(p); 622 if (sig != 0 || (sig = CURSIG(p))) { 623 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 624 rval = EINTR; 625 else 626 rval = ERESTART; 627 } 628 PROC_UNLOCK(p); 629 } 630 #ifdef KTRACE 631 mtx_lock(&Giant); 632 if (KTRPOINT(p, KTR_CSW)) 633 ktrcsw(p->p_tracep, 0, 0); 634 mtx_unlock(&Giant); 635 #endif 636 } else { 637 /* 638 * If as_priority is 0, mawait() has been called without an 639 * intervening asleep(). We are still effectively a NOP, 640 * but we call mi_switch() for safety. 641 */ 642 643 if (p->p_asleep.as_priority == 0) { 644 p->p_stats->p_ru.ru_nvcsw++; 645 mi_switch(); 646 } 647 mtx_unlock_spin(&sched_lock); 648 } 649 650 /* 651 * clear p_asleep.as_priority as an indication that mawait() has been 652 * called. If mawait() is called again without an intervening asleep(), 653 * mawait() is still effectively a NOP but the above mi_switch() code 654 * is triggered as a safety. 655 */ 656 if (rval == 0) 657 p->p_asleep.as_priority = 0; 658 659 PICKUP_GIANT(); 660 if (mtx != NULL) { 661 mtx_lock(mtx); 662 WITNESS_RESTORE(&mtx->mtx_object, mtx); 663 } 664 return (rval); 665 } 666 667 /* 668 * Implement timeout for msleep or asleep()/mawait() 669 * 670 * If process hasn't been awakened (wchan non-zero), 671 * set timeout flag and undo the sleep. If proc 672 * is stopped, just unsleep so it will remain stopped. 673 * MP-safe, called without the Giant mutex. 674 */ 675 static void 676 endtsleep(arg) 677 void *arg; 678 { 679 register struct proc *p; 680 681 p = (struct proc *)arg; 682 CTR3(KTR_PROC, "endtsleep: proc %p (pid %d, %s)", p, p->p_pid, 683 p->p_comm); 684 mtx_lock_spin(&sched_lock); 685 if (p->p_wchan) { 686 if (p->p_stat == SSLEEP) 687 setrunnable(p); 688 else 689 unsleep(p); 690 p->p_sflag |= PS_TIMEOUT; 691 } 692 mtx_unlock_spin(&sched_lock); 693 } 694 695 /* 696 * Remove a process from its wait queue 697 */ 698 void 699 unsleep(p) 700 register struct proc *p; 701 { 702 703 mtx_lock_spin(&sched_lock); 704 if (p->p_wchan) { 705 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_slpq); 706 p->p_wchan = NULL; 707 } 708 mtx_unlock_spin(&sched_lock); 709 } 710 711 /* 712 * Make all processes sleeping on the specified identifier runnable. 713 */ 714 void 715 wakeup(ident) 716 register void *ident; 717 { 718 register struct slpquehead *qp; 719 register struct proc *p; 720 721 mtx_lock_spin(&sched_lock); 722 qp = &slpque[LOOKUP(ident)]; 723 restart: 724 TAILQ_FOREACH(p, qp, p_slpq) { 725 if (p->p_wchan == ident) { 726 TAILQ_REMOVE(qp, p, p_slpq); 727 p->p_wchan = NULL; 728 if (p->p_stat == SSLEEP) { 729 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 730 CTR3(KTR_PROC, "wakeup: proc %p (pid %d, %s)", 731 p, p->p_pid, p->p_comm); 732 if (p->p_slptime > 1) 733 updatepri(p); 734 p->p_slptime = 0; 735 p->p_stat = SRUN; 736 if (p->p_sflag & PS_INMEM) { 737 setrunqueue(p); 738 maybe_resched(p); 739 } else { 740 p->p_sflag |= PS_SWAPINREQ; 741 wakeup((caddr_t)&proc0); 742 } 743 /* END INLINE EXPANSION */ 744 goto restart; 745 } 746 } 747 } 748 mtx_unlock_spin(&sched_lock); 749 } 750 751 /* 752 * Make a process sleeping on the specified identifier runnable. 753 * May wake more than one process if a target process is currently 754 * swapped out. 755 */ 756 void 757 wakeup_one(ident) 758 register void *ident; 759 { 760 register struct slpquehead *qp; 761 register struct proc *p; 762 763 mtx_lock_spin(&sched_lock); 764 qp = &slpque[LOOKUP(ident)]; 765 766 TAILQ_FOREACH(p, qp, p_slpq) { 767 if (p->p_wchan == ident) { 768 TAILQ_REMOVE(qp, p, p_slpq); 769 p->p_wchan = NULL; 770 if (p->p_stat == SSLEEP) { 771 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 772 CTR3(KTR_PROC, "wakeup1: proc %p (pid %d, %s)", 773 p, p->p_pid, p->p_comm); 774 if (p->p_slptime > 1) 775 updatepri(p); 776 p->p_slptime = 0; 777 p->p_stat = SRUN; 778 if (p->p_sflag & PS_INMEM) { 779 setrunqueue(p); 780 maybe_resched(p); 781 break; 782 } else { 783 p->p_sflag |= PS_SWAPINREQ; 784 wakeup((caddr_t)&proc0); 785 } 786 /* END INLINE EXPANSION */ 787 } 788 } 789 } 790 mtx_unlock_spin(&sched_lock); 791 } 792 793 /* 794 * The machine independent parts of mi_switch(). 795 */ 796 void 797 mi_switch() 798 { 799 struct timeval new_switchtime; 800 register struct proc *p = curproc; /* XXX */ 801 #if 0 802 register struct rlimit *rlim; 803 #endif 804 critical_t sched_crit; 805 u_int sched_nest; 806 807 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 808 809 /* 810 * Compute the amount of time during which the current 811 * process was running, and add that to its total so far. 812 */ 813 microuptime(&new_switchtime); 814 if (timevalcmp(&new_switchtime, PCPU_PTR(switchtime), <)) { 815 #if 0 816 /* XXX: This doesn't play well with sched_lock right now. */ 817 printf("microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n", 818 PCPU_GET(switchtime.tv_sec), PCPU_GET(switchtime.tv_usec), 819 new_switchtime.tv_sec, new_switchtime.tv_usec); 820 #endif 821 new_switchtime = PCPU_GET(switchtime); 822 } else { 823 p->p_runtime += (new_switchtime.tv_usec - PCPU_GET(switchtime.tv_usec)) + 824 (new_switchtime.tv_sec - PCPU_GET(switchtime.tv_sec)) * 825 (int64_t)1000000; 826 } 827 828 #if 0 829 /* 830 * Check if the process exceeds its cpu resource allocation. 831 * If over max, kill it. 832 * 833 * XXX drop sched_lock, pickup Giant 834 */ 835 if (p->p_stat != SZOMB && p->p_limit->p_cpulimit != RLIM_INFINITY && 836 p->p_runtime > p->p_limit->p_cpulimit) { 837 rlim = &p->p_rlimit[RLIMIT_CPU]; 838 if (p->p_runtime / (rlim_t)1000000 >= rlim->rlim_max) { 839 mtx_unlock_spin(&sched_lock); 840 PROC_LOCK(p); 841 killproc(p, "exceeded maximum CPU limit"); 842 mtx_lock_spin(&sched_lock); 843 PROC_UNLOCK_NOSWITCH(p); 844 } else { 845 mtx_unlock_spin(&sched_lock); 846 PROC_LOCK(p); 847 psignal(p, SIGXCPU); 848 mtx_lock_spin(&sched_lock); 849 PROC_UNLOCK_NOSWITCH(p); 850 if (rlim->rlim_cur < rlim->rlim_max) { 851 /* XXX: we should make a private copy */ 852 rlim->rlim_cur += 5; 853 } 854 } 855 } 856 #endif 857 858 /* 859 * Pick a new current process and record its start time. 860 */ 861 cnt.v_swtch++; 862 PCPU_SET(switchtime, new_switchtime); 863 CTR3(KTR_PROC, "mi_switch: old proc %p (pid %d, %s)", p, p->p_pid, 864 p->p_comm); 865 sched_crit = sched_lock.mtx_savecrit; 866 sched_nest = sched_lock.mtx_recurse; 867 curproc->p_lastcpu = curproc->p_oncpu; 868 curproc->p_oncpu = NOCPU; 869 clear_resched(curproc); 870 cpu_switch(); 871 curproc->p_oncpu = PCPU_GET(cpuid); 872 sched_lock.mtx_savecrit = sched_crit; 873 sched_lock.mtx_recurse = sched_nest; 874 sched_lock.mtx_lock = (uintptr_t)curproc; 875 CTR3(KTR_PROC, "mi_switch: new proc %p (pid %d, %s)", p, p->p_pid, 876 p->p_comm); 877 if (PCPU_GET(switchtime.tv_sec) == 0) 878 microuptime(PCPU_PTR(switchtime)); 879 PCPU_SET(switchticks, ticks); 880 } 881 882 /* 883 * Change process state to be runnable, 884 * placing it on the run queue if it is in memory, 885 * and awakening the swapper if it isn't in memory. 886 */ 887 void 888 setrunnable(p) 889 register struct proc *p; 890 { 891 892 mtx_lock_spin(&sched_lock); 893 switch (p->p_stat) { 894 case 0: 895 case SRUN: 896 case SZOMB: 897 case SWAIT: 898 default: 899 panic("setrunnable"); 900 case SSTOP: 901 case SSLEEP: /* e.g. when sending signals */ 902 if (p->p_sflag & PS_CVWAITQ) 903 cv_waitq_remove(p); 904 else 905 unsleep(p); 906 break; 907 908 case SIDL: 909 break; 910 } 911 p->p_stat = SRUN; 912 if (p->p_slptime > 1) 913 updatepri(p); 914 p->p_slptime = 0; 915 if ((p->p_sflag & PS_INMEM) == 0) { 916 p->p_sflag |= PS_SWAPINREQ; 917 wakeup((caddr_t)&proc0); 918 } else { 919 setrunqueue(p); 920 maybe_resched(p); 921 } 922 mtx_unlock_spin(&sched_lock); 923 } 924 925 /* 926 * Compute the priority of a process when running in user mode. 927 * Arrange to reschedule if the resulting priority is better 928 * than that of the current process. 929 */ 930 void 931 resetpriority(p) 932 register struct proc *p; 933 { 934 register unsigned int newpriority; 935 936 mtx_lock_spin(&sched_lock); 937 if (p->p_pri.pri_class == PRI_TIMESHARE) { 938 newpriority = PUSER + p->p_estcpu / INVERSE_ESTCPU_WEIGHT + 939 NICE_WEIGHT * (p->p_nice - PRIO_MIN); 940 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 941 PRI_MAX_TIMESHARE); 942 p->p_pri.pri_user = newpriority; 943 } 944 maybe_resched(p); 945 mtx_unlock_spin(&sched_lock); 946 } 947 948 /* ARGSUSED */ 949 static void 950 sched_setup(dummy) 951 void *dummy; 952 { 953 954 callout_init(&schedcpu_callout, 1); 955 callout_init(&roundrobin_callout, 0); 956 957 /* Kick off timeout driven events by calling first time. */ 958 roundrobin(NULL); 959 schedcpu(NULL); 960 } 961 962 /* 963 * We adjust the priority of the current process. The priority of 964 * a process gets worse as it accumulates CPU time. The cpu usage 965 * estimator (p_estcpu) is increased here. resetpriority() will 966 * compute a different priority each time p_estcpu increases by 967 * INVERSE_ESTCPU_WEIGHT 968 * (until MAXPRI is reached). The cpu usage estimator ramps up 969 * quite quickly when the process is running (linearly), and decays 970 * away exponentially, at a rate which is proportionally slower when 971 * the system is busy. The basic principle is that the system will 972 * 90% forget that the process used a lot of CPU time in 5 * loadav 973 * seconds. This causes the system to favor processes which haven't 974 * run much recently, and to round-robin among other processes. 975 */ 976 void 977 schedclock(p) 978 struct proc *p; 979 { 980 981 p->p_cpticks++; 982 p->p_estcpu = ESTCPULIM(p->p_estcpu + 1); 983 if ((p->p_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 984 resetpriority(p); 985 if (p->p_pri.pri_level >= PUSER) 986 p->p_pri.pri_level = p->p_pri.pri_user; 987 } 988 } 989 990 /* 991 * General purpose yield system call 992 */ 993 int 994 yield(struct proc *p, struct yield_args *uap) 995 { 996 997 p->p_retval[0] = 0; 998 999 mtx_lock_spin(&sched_lock); 1000 DROP_GIANT_NOSWITCH(); 1001 p->p_pri.pri_level = PRI_MAX_TIMESHARE; 1002 setrunqueue(p); 1003 p->p_stats->p_ru.ru_nvcsw++; 1004 mi_switch(); 1005 mtx_unlock_spin(&sched_lock); 1006 PICKUP_GIANT(); 1007 1008 return (0); 1009 } 1010