1 /*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 39 * $FreeBSD$ 40 */ 41 42 #include "opt_ddb.h" 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/condvar.h> 48 #include <sys/kernel.h> 49 #include <sys/ktr.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/proc.h> 53 #include <sys/resourcevar.h> 54 #include <sys/signalvar.h> 55 #include <sys/smp.h> 56 #include <sys/sx.h> 57 #include <sys/sysctl.h> 58 #include <sys/sysproto.h> 59 #include <sys/vmmeter.h> 60 #ifdef DDB 61 #include <ddb/ddb.h> 62 #endif 63 #ifdef KTRACE 64 #include <sys/uio.h> 65 #include <sys/ktrace.h> 66 #endif 67 68 #include <machine/cpu.h> 69 70 static void sched_setup(void *dummy); 71 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) 72 73 int hogticks; 74 int lbolt; 75 int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 76 77 static struct callout loadav_callout; 78 static struct callout schedcpu_callout; 79 static struct callout roundrobin_callout; 80 81 struct loadavg averunnable = 82 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ 83 /* 84 * Constants for averages over 1, 5, and 15 minutes 85 * when sampling at 5 second intervals. 86 */ 87 static fixpt_t cexp[3] = { 88 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 89 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 90 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 91 }; 92 93 static void endtsleep(void *); 94 static void loadav(void *arg); 95 static void roundrobin(void *arg); 96 static void schedcpu(void *arg); 97 98 static int 99 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 100 { 101 int error, new_val; 102 103 new_val = sched_quantum * tick; 104 error = sysctl_handle_int(oidp, &new_val, 0, req); 105 if (error != 0 || req->newptr == NULL) 106 return (error); 107 if (new_val < tick) 108 return (EINVAL); 109 sched_quantum = new_val / tick; 110 hogticks = 2 * sched_quantum; 111 return (0); 112 } 113 114 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 115 0, sizeof sched_quantum, sysctl_kern_quantum, "I", 116 "Roundrobin scheduling quantum in microseconds"); 117 118 /* 119 * Arrange to reschedule if necessary, taking the priorities and 120 * schedulers into account. 121 */ 122 void 123 maybe_resched(struct thread *td) 124 { 125 126 mtx_assert(&sched_lock, MA_OWNED); 127 if (td->td_priority < curthread->td_priority) 128 curthread->td_kse->ke_flags |= KEF_NEEDRESCHED; 129 } 130 131 int 132 roundrobin_interval(void) 133 { 134 return (sched_quantum); 135 } 136 137 /* 138 * Force switch among equal priority processes every 100ms. 139 * We don't actually need to force a context switch of the current process. 140 * The act of firing the event triggers a context switch to softclock() and 141 * then switching back out again which is equivalent to a preemption, thus 142 * no further work is needed on the local CPU. 143 */ 144 /* ARGSUSED */ 145 static void 146 roundrobin(arg) 147 void *arg; 148 { 149 150 #ifdef SMP 151 mtx_lock_spin(&sched_lock); 152 forward_roundrobin(); 153 mtx_unlock_spin(&sched_lock); 154 #endif 155 156 callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL); 157 } 158 159 /* 160 * Constants for digital decay and forget: 161 * 90% of (p_estcpu) usage in 5 * loadav time 162 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 163 * Note that, as ps(1) mentions, this can let percentages 164 * total over 100% (I've seen 137.9% for 3 processes). 165 * 166 * Note that schedclock() updates p_estcpu and p_cpticks asynchronously. 167 * 168 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. 169 * That is, the system wants to compute a value of decay such 170 * that the following for loop: 171 * for (i = 0; i < (5 * loadavg); i++) 172 * p_estcpu *= decay; 173 * will compute 174 * p_estcpu *= 0.1; 175 * for all values of loadavg: 176 * 177 * Mathematically this loop can be expressed by saying: 178 * decay ** (5 * loadavg) ~= .1 179 * 180 * The system computes decay as: 181 * decay = (2 * loadavg) / (2 * loadavg + 1) 182 * 183 * We wish to prove that the system's computation of decay 184 * will always fulfill the equation: 185 * decay ** (5 * loadavg) ~= .1 186 * 187 * If we compute b as: 188 * b = 2 * loadavg 189 * then 190 * decay = b / (b + 1) 191 * 192 * We now need to prove two things: 193 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 194 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 195 * 196 * Facts: 197 * For x close to zero, exp(x) =~ 1 + x, since 198 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 199 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 200 * For x close to zero, ln(1+x) =~ x, since 201 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 202 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 203 * ln(.1) =~ -2.30 204 * 205 * Proof of (1): 206 * Solve (factor)**(power) =~ .1 given power (5*loadav): 207 * solving for factor, 208 * ln(factor) =~ (-2.30/5*loadav), or 209 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 210 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 211 * 212 * Proof of (2): 213 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 214 * solving for power, 215 * power*ln(b/(b+1)) =~ -2.30, or 216 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 217 * 218 * Actual power values for the implemented algorithm are as follows: 219 * loadav: 1 2 3 4 220 * power: 5.68 10.32 14.94 19.55 221 */ 222 223 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 224 #define loadfactor(loadav) (2 * (loadav)) 225 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 226 227 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 228 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 229 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 230 231 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ 232 static int fscale __unused = FSCALE; 233 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 234 235 /* 236 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 237 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 238 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 239 * 240 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 241 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 242 * 243 * If you don't want to bother with the faster/more-accurate formula, you 244 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 245 * (more general) method of calculating the %age of CPU used by a process. 246 */ 247 #define CCPU_SHIFT 11 248 249 /* 250 * Recompute process priorities, every hz ticks. 251 * MP-safe, called without the Giant mutex. 252 */ 253 /* ARGSUSED */ 254 static void 255 schedcpu(arg) 256 void *arg; 257 { 258 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 259 struct thread *td; 260 struct proc *p; 261 struct kse *ke; 262 struct ksegrp *kg; 263 int realstathz; 264 int awake; 265 266 realstathz = stathz ? stathz : hz; 267 sx_slock(&allproc_lock); 268 FOREACH_PROC_IN_SYSTEM(p) { 269 mtx_lock_spin(&sched_lock); 270 p->p_swtime++; 271 FOREACH_KSEGRP_IN_PROC(p, kg) { 272 awake = 0; 273 FOREACH_KSE_IN_GROUP(kg, ke) { 274 /* 275 * Increment time in/out of memory and sleep 276 * time (if sleeping). We ignore overflow; 277 * with 16-bit int's (remember them?) 278 * overflow takes 45 days. 279 */ 280 /* 281 * The kse slptimes are not touched in wakeup 282 * because the thread may not HAVE a KSE. 283 */ 284 if ((ke->ke_state == KES_ONRUNQ) || 285 ((ke->ke_state == KES_THREAD) && 286 (ke->ke_thread->td_state == TDS_RUNNING))) { 287 ke->ke_slptime = 0; 288 awake = 1; 289 } else { 290 /* XXXKSE 291 * This is probably a pointless 292 * statistic in a KSE world. 293 */ 294 ke->ke_slptime++; 295 } 296 297 /* 298 * pctcpu is only for ps? 299 * Do it per kse.. and add them up at the end? 300 * XXXKSE 301 */ 302 ke->ke_pctcpu 303 = (ke->ke_pctcpu * ccpu) >> FSHIFT; 304 /* 305 * If the kse has been idle the entire second, 306 * stop recalculating its priority until 307 * it wakes up. 308 */ 309 if (ke->ke_slptime > 1) { 310 continue; 311 } 312 313 #if (FSHIFT >= CCPU_SHIFT) 314 ke->ke_pctcpu += (realstathz == 100) ? 315 ((fixpt_t) ke->ke_cpticks) << 316 (FSHIFT - CCPU_SHIFT) : 317 100 * (((fixpt_t) ke->ke_cpticks) << 318 (FSHIFT - CCPU_SHIFT)) / realstathz; 319 #else 320 ke->ke_pctcpu += ((FSCALE - ccpu) * 321 (ke->ke_cpticks * FSCALE / realstathz)) >> 322 FSHIFT; 323 #endif 324 ke->ke_cpticks = 0; 325 } /* end of kse loop */ 326 /* 327 * If there are ANY running threads in this KSEGRP, 328 * then don't count it as sleeping. 329 */ 330 if (awake == 0) { 331 kg->kg_slptime++; 332 } else { 333 kg->kg_slptime = 0; 334 } 335 kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu); 336 resetpriority(kg); 337 FOREACH_THREAD_IN_GROUP(kg, td) { 338 int changedqueue; 339 if (td->td_priority >= PUSER) { 340 /* 341 * Only change the priority 342 * of threads that are still at their 343 * user priority. 344 * XXXKSE This is problematic 345 * as we may need to re-order 346 * the threads on the KSEG list. 347 */ 348 changedqueue = 349 ((td->td_priority / RQ_PPQ) != 350 (kg->kg_user_pri / RQ_PPQ)); 351 352 td->td_priority = kg->kg_user_pri; 353 if (changedqueue && 354 td->td_state == TDS_RUNQ) { 355 /* this could be optimised */ 356 remrunqueue(td); 357 td->td_priority = 358 kg->kg_user_pri; 359 setrunqueue(td); 360 } else { 361 td->td_priority = kg->kg_user_pri; 362 } 363 } 364 } 365 } /* end of ksegrp loop */ 366 mtx_unlock_spin(&sched_lock); 367 } /* end of process loop */ 368 sx_sunlock(&allproc_lock); 369 wakeup(&lbolt); 370 callout_reset(&schedcpu_callout, hz, schedcpu, NULL); 371 } 372 373 /* 374 * Recalculate the priority of a process after it has slept for a while. 375 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 376 * least six times the loadfactor will decay p_estcpu to zero. 377 */ 378 void 379 updatepri(td) 380 register struct thread *td; 381 { 382 register struct ksegrp *kg; 383 register unsigned int newcpu; 384 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 385 386 if (td == NULL) 387 return; 388 kg = td->td_ksegrp; 389 newcpu = kg->kg_estcpu; 390 if (kg->kg_slptime > 5 * loadfac) 391 kg->kg_estcpu = 0; 392 else { 393 kg->kg_slptime--; /* the first time was done in schedcpu */ 394 while (newcpu && --kg->kg_slptime) 395 newcpu = decay_cpu(loadfac, newcpu); 396 kg->kg_estcpu = newcpu; 397 } 398 resetpriority(td->td_ksegrp); 399 } 400 401 /* 402 * We're only looking at 7 bits of the address; everything is 403 * aligned to 4, lots of things are aligned to greater powers 404 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 405 */ 406 #define TABLESIZE 128 407 static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE]; 408 #define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1)) 409 410 void 411 sleepinit(void) 412 { 413 int i; 414 415 sched_quantum = hz/10; 416 hogticks = 2 * sched_quantum; 417 for (i = 0; i < TABLESIZE; i++) 418 TAILQ_INIT(&slpque[i]); 419 } 420 421 /* 422 * General sleep call. Suspends the current process until a wakeup is 423 * performed on the specified identifier. The process will then be made 424 * runnable with the specified priority. Sleeps at most timo/hz seconds 425 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 426 * before and after sleeping, else signals are not checked. Returns 0 if 427 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 428 * signal needs to be delivered, ERESTART is returned if the current system 429 * call should be restarted if possible, and EINTR is returned if the system 430 * call should be interrupted by the signal (return EINTR). 431 * 432 * The mutex argument is exited before the caller is suspended, and 433 * entered before msleep returns. If priority includes the PDROP 434 * flag the mutex is not entered before returning. 435 */ 436 437 int 438 msleep(ident, mtx, priority, wmesg, timo) 439 void *ident; 440 struct mtx *mtx; 441 int priority, timo; 442 const char *wmesg; 443 { 444 struct thread *td = curthread; 445 struct proc *p = td->td_proc; 446 int sig, catch = priority & PCATCH; 447 int rval = 0; 448 WITNESS_SAVE_DECL(mtx); 449 450 #ifdef KTRACE 451 if (KTRPOINT(td, KTR_CSW)) 452 ktrcsw(1, 0); 453 #endif 454 WITNESS_SLEEP(0, &mtx->mtx_object); 455 KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL, 456 ("sleeping without a mutex")); 457 /* 458 * If we are capable of async syscalls and there isn't already 459 * another one ready to return, start a new thread 460 * and queue it as ready to run. Note that there is danger here 461 * because we need to make sure that we don't sleep allocating 462 * the thread (recursion here might be bad). 463 * Hence the TDF_INMSLEEP flag. 464 */ 465 if (p->p_flag & P_KSES) { 466 /* Just don't bother if we are exiting 467 and not the exiting thread. */ 468 if ((p->p_flag & P_WEXIT) && catch && p->p_singlethread != td) 469 return (EINTR); 470 if (td->td_mailbox && (!(td->td_flags & TDF_INMSLEEP))) { 471 /* 472 * If we have no queued work to do, then 473 * upcall to the UTS to see if it has more to do. 474 * We don't need to upcall now, just make it and 475 * queue it. 476 */ 477 mtx_lock_spin(&sched_lock); 478 if (TAILQ_FIRST(&td->td_ksegrp->kg_runq) == NULL) { 479 /* Don't recurse here! */ 480 td->td_flags |= TDF_INMSLEEP; 481 thread_schedule_upcall(td, td->td_kse); 482 td->td_flags &= ~TDF_INMSLEEP; 483 } 484 mtx_unlock_spin(&sched_lock); 485 } 486 } 487 mtx_lock_spin(&sched_lock); 488 if (cold ) { 489 /* 490 * During autoconfiguration, just give interrupts 491 * a chance, then just return. 492 * Don't run any other procs or panic below, 493 * in case this is the idle process and already asleep. 494 */ 495 if (mtx != NULL && priority & PDROP) 496 mtx_unlock(mtx); 497 mtx_unlock_spin(&sched_lock); 498 return (0); 499 } 500 501 DROP_GIANT(); 502 503 if (mtx != NULL) { 504 mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED); 505 WITNESS_SAVE(&mtx->mtx_object, mtx); 506 mtx_unlock(mtx); 507 if (priority & PDROP) 508 mtx = NULL; 509 } 510 511 KASSERT(p != NULL, ("msleep1")); 512 KASSERT(ident != NULL && td->td_state == TDS_RUNNING, ("msleep")); 513 514 td->td_wchan = ident; 515 td->td_wmesg = wmesg; 516 td->td_kse->ke_slptime = 0; /* XXXKSE */ 517 td->td_ksegrp->kg_slptime = 0; 518 td->td_priority = priority & PRIMASK; 519 CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)", 520 td, p->p_pid, p->p_comm, wmesg, ident); 521 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq); 522 if (timo) 523 callout_reset(&td->td_slpcallout, timo, endtsleep, td); 524 /* 525 * We put ourselves on the sleep queue and start our timeout 526 * before calling thread_suspend_check, as we could stop there, and 527 * a wakeup or a SIGCONT (or both) could occur while we were stopped. 528 * without resuming us, thus we must be ready for sleep 529 * when cursig is called. If the wakeup happens while we're 530 * stopped, td->td_wchan will be 0 upon return from cursig. 531 */ 532 if (catch) { 533 CTR3(KTR_PROC, "msleep caught: thread %p (pid %d, %s)", td, 534 p->p_pid, p->p_comm); 535 td->td_flags |= TDF_SINTR; 536 mtx_unlock_spin(&sched_lock); 537 PROC_LOCK(p); 538 sig = cursig(td); 539 if (sig == 0 && thread_suspend_check(1)) 540 sig = SIGSTOP; 541 mtx_lock_spin(&sched_lock); 542 PROC_UNLOCK(p); 543 if (sig != 0) { 544 if (td->td_wchan != NULL) 545 unsleep(td); 546 } else if (td->td_wchan == NULL) 547 catch = 0; 548 } else 549 sig = 0; 550 if (td->td_wchan != NULL) { 551 p->p_stats->p_ru.ru_nvcsw++; 552 td->td_state = TDS_SLP; 553 mi_switch(); 554 } 555 CTR3(KTR_PROC, "msleep resume: thread %p (pid %d, %s)", td, p->p_pid, 556 p->p_comm); 557 KASSERT(td->td_state == TDS_RUNNING, ("running but not TDS_RUNNING")); 558 td->td_flags &= ~TDF_SINTR; 559 if (td->td_flags & TDF_TIMEOUT) { 560 td->td_flags &= ~TDF_TIMEOUT; 561 if (sig == 0) 562 rval = EWOULDBLOCK; 563 } else if (td->td_flags & TDF_TIMOFAIL) { 564 td->td_flags &= ~TDF_TIMOFAIL; 565 } else if (timo && callout_stop(&td->td_slpcallout) == 0) { 566 /* 567 * This isn't supposed to be pretty. If we are here, then 568 * the endtsleep() callout is currently executing on another 569 * CPU and is either spinning on the sched_lock or will be 570 * soon. If we don't synchronize here, there is a chance 571 * that this process may msleep() again before the callout 572 * has a chance to run and the callout may end up waking up 573 * the wrong msleep(). Yuck. 574 */ 575 td->td_flags |= TDF_TIMEOUT; 576 td->td_state = TDS_SLP; 577 p->p_stats->p_ru.ru_nivcsw++; 578 mi_switch(); 579 } 580 mtx_unlock_spin(&sched_lock); 581 582 if (rval == 0 && catch) { 583 PROC_LOCK(p); 584 /* XXX: shouldn't we always be calling cursig() */ 585 if (sig != 0 || (sig = cursig(td))) { 586 if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig)) 587 rval = EINTR; 588 else 589 rval = ERESTART; 590 } 591 PROC_UNLOCK(p); 592 } 593 #ifdef KTRACE 594 if (KTRPOINT(td, KTR_CSW)) 595 ktrcsw(0, 0); 596 #endif 597 PICKUP_GIANT(); 598 if (mtx != NULL) { 599 mtx_lock(mtx); 600 WITNESS_RESTORE(&mtx->mtx_object, mtx); 601 } 602 return (rval); 603 } 604 605 /* 606 * Implement timeout for msleep() 607 * 608 * If process hasn't been awakened (wchan non-zero), 609 * set timeout flag and undo the sleep. If proc 610 * is stopped, just unsleep so it will remain stopped. 611 * MP-safe, called without the Giant mutex. 612 */ 613 static void 614 endtsleep(arg) 615 void *arg; 616 { 617 register struct thread *td = arg; 618 619 CTR3(KTR_PROC, "endtsleep: thread %p (pid %d, %s)", td, td->td_proc->p_pid, 620 td->td_proc->p_comm); 621 mtx_lock_spin(&sched_lock); 622 /* 623 * This is the other half of the synchronization with msleep() 624 * described above. If the PS_TIMEOUT flag is set, we lost the 625 * race and just need to put the process back on the runqueue. 626 */ 627 if ((td->td_flags & TDF_TIMEOUT) != 0) { 628 td->td_flags &= ~TDF_TIMEOUT; 629 setrunqueue(td); 630 } else if (td->td_wchan != NULL) { 631 if (td->td_state == TDS_SLP) /* XXXKSE */ 632 setrunnable(td); 633 else 634 unsleep(td); 635 td->td_flags |= TDF_TIMEOUT; 636 } else { 637 td->td_flags |= TDF_TIMOFAIL; 638 } 639 mtx_unlock_spin(&sched_lock); 640 } 641 642 /* 643 * Abort a thread, as if an interrupt had occured. Only abort 644 * interruptable waits (unfortunatly it isn't only safe to abort others). 645 * This is about identical to cv_abort(). 646 * Think about merging them? 647 * Also, whatever the signal code does... 648 */ 649 void 650 abortsleep(struct thread *td) 651 { 652 653 mtx_lock_spin(&sched_lock); 654 /* 655 * If the TDF_TIMEOUT flag is set, just leave. A 656 * timeout is scheduled anyhow. 657 */ 658 if ((td->td_flags & (TDF_TIMEOUT | TDF_SINTR)) == TDF_SINTR) { 659 if (td->td_wchan != NULL) { 660 if (td->td_state == TDS_SLP) { /* XXXKSE */ 661 setrunnable(td); 662 } else { 663 /* 664 * Probably in a suspended state.. 665 * um.. dunno XXXKSE 666 */ 667 unsleep(td); 668 } 669 } 670 } 671 mtx_unlock_spin(&sched_lock); 672 } 673 674 /* 675 * Remove a process from its wait queue 676 */ 677 void 678 unsleep(struct thread *td) 679 { 680 681 mtx_lock_spin(&sched_lock); 682 if (td->td_wchan != NULL) { 683 TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq); 684 td->td_wchan = NULL; 685 } 686 mtx_unlock_spin(&sched_lock); 687 } 688 689 /* 690 * Make all processes sleeping on the specified identifier runnable. 691 */ 692 void 693 wakeup(ident) 694 register void *ident; 695 { 696 register struct slpquehead *qp; 697 register struct thread *td; 698 struct thread *ntd; 699 struct proc *p; 700 701 mtx_lock_spin(&sched_lock); 702 qp = &slpque[LOOKUP(ident)]; 703 restart: 704 for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) { 705 ntd = TAILQ_NEXT(td, td_slpq); 706 p = td->td_proc; 707 if (td->td_wchan == ident) { 708 TAILQ_REMOVE(qp, td, td_slpq); 709 td->td_wchan = NULL; 710 if (td->td_state == TDS_SLP) { 711 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 712 CTR3(KTR_PROC, "wakeup: thread %p (pid %d, %s)", 713 td, p->p_pid, p->p_comm); 714 if (td->td_ksegrp->kg_slptime > 1) 715 updatepri(td); 716 td->td_ksegrp->kg_slptime = 0; 717 if (p->p_sflag & PS_INMEM) { 718 setrunqueue(td); 719 maybe_resched(td); 720 } else { 721 /* XXXKSE Wrong! */ td->td_state = TDS_RUNQ; 722 p->p_sflag |= PS_SWAPINREQ; 723 wakeup(&proc0); 724 } 725 /* END INLINE EXPANSION */ 726 } 727 goto restart; 728 } 729 } 730 mtx_unlock_spin(&sched_lock); 731 } 732 733 /* 734 * Make a process sleeping on the specified identifier runnable. 735 * May wake more than one process if a target process is currently 736 * swapped out. 737 */ 738 void 739 wakeup_one(ident) 740 register void *ident; 741 { 742 register struct slpquehead *qp; 743 register struct thread *td; 744 register struct proc *p; 745 struct thread *ntd; 746 747 mtx_lock_spin(&sched_lock); 748 qp = &slpque[LOOKUP(ident)]; 749 restart: 750 for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) { 751 ntd = TAILQ_NEXT(td, td_slpq); 752 p = td->td_proc; 753 if (td->td_wchan == ident) { 754 TAILQ_REMOVE(qp, td, td_slpq); 755 td->td_wchan = NULL; 756 if (td->td_state == TDS_SLP) { 757 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 758 CTR3(KTR_PROC,"wakeup1: thread %p (pid %d, %s)", 759 td, p->p_pid, p->p_comm); 760 if (td->td_ksegrp->kg_slptime > 1) 761 updatepri(td); 762 td->td_ksegrp->kg_slptime = 0; 763 if (p->p_sflag & PS_INMEM) { 764 setrunqueue(td); 765 maybe_resched(td); 766 break; 767 } else { 768 /* XXXKSE Wrong */ td->td_state = TDS_RUNQ; 769 p->p_sflag |= PS_SWAPINREQ; 770 wakeup(&proc0); 771 } 772 /* END INLINE EXPANSION */ 773 goto restart; 774 } 775 } 776 } 777 mtx_unlock_spin(&sched_lock); 778 } 779 780 /* 781 * The machine independent parts of mi_switch(). 782 */ 783 void 784 mi_switch() 785 { 786 struct bintime new_switchtime; 787 struct thread *td = curthread; /* XXX */ 788 struct proc *p = td->td_proc; /* XXX */ 789 struct kse *ke = td->td_kse; 790 #if 0 791 register struct rlimit *rlim; 792 #endif 793 u_int sched_nest; 794 795 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 796 KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?")); 797 #ifdef INVARIANTS 798 if (td->td_state != TDS_MTX && 799 td->td_state != TDS_RUNQ && 800 td->td_state != TDS_RUNNING) 801 mtx_assert(&Giant, MA_NOTOWNED); 802 #endif 803 KASSERT(td->td_critnest == 1, 804 ("mi_switch: switch in a critical section")); 805 806 /* 807 * Compute the amount of time during which the current 808 * process was running, and add that to its total so far. 809 */ 810 binuptime(&new_switchtime); 811 bintime_add(&p->p_runtime, &new_switchtime); 812 bintime_sub(&p->p_runtime, PCPU_PTR(switchtime)); 813 814 #ifdef DDB 815 /* 816 * Don't perform context switches from the debugger. 817 */ 818 if (db_active) { 819 mtx_unlock_spin(&sched_lock); 820 db_error("Context switches not allowed in the debugger."); 821 } 822 #endif 823 824 #if 0 825 /* 826 * Check if the process exceeds its cpu resource allocation. 827 * If over max, kill it. 828 * 829 * XXX drop sched_lock, pickup Giant 830 */ 831 if (p->p_state != PRS_ZOMBIE && 832 p->p_limit->p_cpulimit != RLIM_INFINITY && 833 p->p_runtime > p->p_limit->p_cpulimit) { 834 rlim = &p->p_rlimit[RLIMIT_CPU]; 835 if (p->p_runtime / (rlim_t)1000000 >= rlim->rlim_max) { 836 mtx_unlock_spin(&sched_lock); 837 PROC_LOCK(p); 838 killproc(p, "exceeded maximum CPU limit"); 839 mtx_lock_spin(&sched_lock); 840 PROC_UNLOCK(p); 841 } else { 842 mtx_unlock_spin(&sched_lock); 843 PROC_LOCK(p); 844 psignal(p, SIGXCPU); 845 mtx_lock_spin(&sched_lock); 846 PROC_UNLOCK(p); 847 if (rlim->rlim_cur < rlim->rlim_max) { 848 /* XXX: we should make a private copy */ 849 rlim->rlim_cur += 5; 850 } 851 } 852 } 853 #endif 854 855 /* 856 * Pick a new current process and record its start time. 857 */ 858 cnt.v_swtch++; 859 PCPU_SET(switchtime, new_switchtime); 860 CTR3(KTR_PROC, "mi_switch: old thread %p (pid %d, %s)", td, p->p_pid, 861 p->p_comm); 862 sched_nest = sched_lock.mtx_recurse; 863 td->td_lastcpu = ke->ke_oncpu; 864 ke->ke_oncpu = NOCPU; 865 ke->ke_flags &= ~KEF_NEEDRESCHED; 866 /* 867 * At the last moment: if this KSE is not on the run queue, 868 * it needs to be freed correctly and the thread treated accordingly. 869 */ 870 if ((td->td_state == TDS_RUNNING) && 871 ((ke->ke_flags & KEF_IDLEKSE) == 0)) { 872 /* Put us back on the run queue (kse and all). */ 873 setrunqueue(td); 874 } else if ((td->td_flags & TDF_UNBOUND) && 875 (td->td_state != TDS_RUNQ)) { /* in case of old code */ 876 /* 877 * We will not be on the run queue. 878 * Someone else can use the KSE if they need it. 879 */ 880 td->td_kse = NULL; 881 kse_reassign(ke); 882 } 883 cpu_switch(); 884 td->td_kse->ke_oncpu = PCPU_GET(cpuid); 885 sched_lock.mtx_recurse = sched_nest; 886 sched_lock.mtx_lock = (uintptr_t)td; 887 CTR3(KTR_PROC, "mi_switch: new thread %p (pid %d, %s)", td, p->p_pid, 888 p->p_comm); 889 if (PCPU_GET(switchtime.sec) == 0) 890 binuptime(PCPU_PTR(switchtime)); 891 PCPU_SET(switchticks, ticks); 892 893 /* 894 * Call the switchin function while still holding the scheduler lock 895 * (used by the idlezero code and the general page-zeroing code) 896 */ 897 if (td->td_switchin) 898 td->td_switchin(); 899 } 900 901 /* 902 * Change process state to be runnable, 903 * placing it on the run queue if it is in memory, 904 * and awakening the swapper if it isn't in memory. 905 */ 906 void 907 setrunnable(struct thread *td) 908 { 909 struct proc *p = td->td_proc; 910 911 mtx_assert(&sched_lock, MA_OWNED); 912 switch (p->p_state) { 913 case PRS_ZOMBIE: 914 panic("setrunnable(1)"); 915 default: 916 break; 917 } 918 switch (td->td_state) { 919 case 0: 920 case TDS_RUNNING: 921 case TDS_IWAIT: 922 default: 923 printf("state is %d", td->td_state); 924 panic("setrunnable(2)"); 925 case TDS_SUSPENDED: 926 thread_unsuspend(p); 927 break; 928 case TDS_SLP: /* e.g. when sending signals */ 929 if (td->td_flags & TDF_CVWAITQ) 930 cv_waitq_remove(td); 931 else 932 unsleep(td); 933 case TDS_UNQUEUED: /* being put back onto the queue */ 934 case TDS_NEW: /* not yet had time to suspend */ 935 case TDS_RUNQ: /* not yet had time to suspend */ 936 break; 937 } 938 if (td->td_ksegrp->kg_slptime > 1) 939 updatepri(td); 940 td->td_ksegrp->kg_slptime = 0; 941 if ((p->p_sflag & PS_INMEM) == 0) { 942 td->td_state = TDS_RUNQ; /* XXXKSE not a good idea */ 943 p->p_sflag |= PS_SWAPINREQ; 944 wakeup(&proc0); 945 } else { 946 if (td->td_state != TDS_RUNQ) 947 setrunqueue(td); /* XXXKSE */ 948 maybe_resched(td); 949 } 950 } 951 952 /* 953 * Compute the priority of a process when running in user mode. 954 * Arrange to reschedule if the resulting priority is better 955 * than that of the current process. 956 */ 957 void 958 resetpriority(kg) 959 register struct ksegrp *kg; 960 { 961 register unsigned int newpriority; 962 struct thread *td; 963 964 mtx_lock_spin(&sched_lock); 965 if (kg->kg_pri_class == PRI_TIMESHARE) { 966 newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + 967 NICE_WEIGHT * (kg->kg_nice - PRIO_MIN); 968 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 969 PRI_MAX_TIMESHARE); 970 kg->kg_user_pri = newpriority; 971 } 972 FOREACH_THREAD_IN_GROUP(kg, td) { 973 maybe_resched(td); /* XXXKSE silly */ 974 } 975 mtx_unlock_spin(&sched_lock); 976 } 977 978 /* 979 * Compute a tenex style load average of a quantity on 980 * 1, 5 and 15 minute intervals. 981 * XXXKSE Needs complete rewrite when correct info is available. 982 * Completely Bogus.. only works with 1:1 (but compiles ok now :-) 983 */ 984 static void 985 loadav(void *arg) 986 { 987 int i, nrun; 988 struct loadavg *avg; 989 struct proc *p; 990 struct thread *td; 991 992 avg = &averunnable; 993 sx_slock(&allproc_lock); 994 nrun = 0; 995 FOREACH_PROC_IN_SYSTEM(p) { 996 FOREACH_THREAD_IN_PROC(p, td) { 997 switch (td->td_state) { 998 case TDS_RUNQ: 999 case TDS_RUNNING: 1000 if ((p->p_flag & P_NOLOAD) != 0) 1001 goto nextproc; 1002 nrun++; /* XXXKSE */ 1003 default: 1004 break; 1005 } 1006 nextproc: 1007 continue; 1008 } 1009 } 1010 sx_sunlock(&allproc_lock); 1011 for (i = 0; i < 3; i++) 1012 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 1013 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 1014 1015 /* 1016 * Schedule the next update to occur after 5 seconds, but add a 1017 * random variation to avoid synchronisation with processes that 1018 * run at regular intervals. 1019 */ 1020 callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)), 1021 loadav, NULL); 1022 } 1023 1024 /* ARGSUSED */ 1025 static void 1026 sched_setup(dummy) 1027 void *dummy; 1028 { 1029 1030 callout_init(&schedcpu_callout, 1); 1031 callout_init(&roundrobin_callout, 0); 1032 callout_init(&loadav_callout, 0); 1033 1034 /* Kick off timeout driven events by calling first time. */ 1035 roundrobin(NULL); 1036 schedcpu(NULL); 1037 loadav(NULL); 1038 } 1039 1040 /* 1041 * We adjust the priority of the current process. The priority of 1042 * a process gets worse as it accumulates CPU time. The cpu usage 1043 * estimator (p_estcpu) is increased here. resetpriority() will 1044 * compute a different priority each time p_estcpu increases by 1045 * INVERSE_ESTCPU_WEIGHT 1046 * (until MAXPRI is reached). The cpu usage estimator ramps up 1047 * quite quickly when the process is running (linearly), and decays 1048 * away exponentially, at a rate which is proportionally slower when 1049 * the system is busy. The basic principle is that the system will 1050 * 90% forget that the process used a lot of CPU time in 5 * loadav 1051 * seconds. This causes the system to favor processes which haven't 1052 * run much recently, and to round-robin among other processes. 1053 */ 1054 void 1055 schedclock(td) 1056 struct thread *td; 1057 { 1058 struct kse *ke; 1059 struct ksegrp *kg; 1060 1061 KASSERT((td != NULL), ("schedclock: null thread pointer")); 1062 ke = td->td_kse; 1063 kg = td->td_ksegrp; 1064 ke->ke_cpticks++; 1065 kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1); 1066 if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 1067 resetpriority(kg); 1068 if (td->td_priority >= PUSER) 1069 td->td_priority = kg->kg_user_pri; 1070 } 1071 } 1072 1073 /* 1074 * General purpose yield system call 1075 */ 1076 int 1077 yield(struct thread *td, struct yield_args *uap) 1078 { 1079 struct ksegrp *kg = td->td_ksegrp; 1080 1081 mtx_assert(&Giant, MA_NOTOWNED); 1082 mtx_lock_spin(&sched_lock); 1083 td->td_priority = PRI_MAX_TIMESHARE; 1084 kg->kg_proc->p_stats->p_ru.ru_nvcsw++; 1085 mi_switch(); 1086 mtx_unlock_spin(&sched_lock); 1087 td->td_retval[0] = 0; 1088 1089 return (0); 1090 } 1091 1092