1 /*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * $FreeBSD$ 39 */ 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/ktr.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/proc.h> 48 #include <sys/resourcevar.h> 49 #include <sys/sched.h> 50 #include <sys/smp.h> 51 #include <sys/sysctl.h> 52 #include <sys/sx.h> 53 54 55 static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 56 #define SCHED_QUANTUM (hz / 10); /* Default sched quantum */ 57 58 static struct callout schedcpu_callout; 59 static struct callout roundrobin_callout; 60 61 static void roundrobin(void *arg); 62 static void schedcpu(void *arg); 63 static void sched_setup(void *dummy); 64 static void maybe_resched(struct thread *td); 65 static void updatepri(struct ksegrp *kg); 66 static void resetpriority(struct ksegrp *kg); 67 68 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL) 69 70 /* 71 * Global run queue. 72 */ 73 static struct runq runq; 74 SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq) 75 76 static int 77 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 78 { 79 int error, new_val; 80 81 new_val = sched_quantum * tick; 82 error = sysctl_handle_int(oidp, &new_val, 0, req); 83 if (error != 0 || req->newptr == NULL) 84 return (error); 85 if (new_val < tick) 86 return (EINVAL); 87 sched_quantum = new_val / tick; 88 hogticks = 2 * sched_quantum; 89 return (0); 90 } 91 92 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 93 0, sizeof sched_quantum, sysctl_kern_quantum, "I", 94 "Roundrobin scheduling quantum in microseconds"); 95 96 /* 97 * Arrange to reschedule if necessary, taking the priorities and 98 * schedulers into account. 99 */ 100 static void 101 maybe_resched(struct thread *td) 102 { 103 104 mtx_assert(&sched_lock, MA_OWNED); 105 if (td->td_priority < curthread->td_priority) 106 curthread->td_kse->ke_flags |= KEF_NEEDRESCHED; 107 } 108 109 /* 110 * Force switch among equal priority processes every 100ms. 111 * We don't actually need to force a context switch of the current process. 112 * The act of firing the event triggers a context switch to softclock() and 113 * then switching back out again which is equivalent to a preemption, thus 114 * no further work is needed on the local CPU. 115 */ 116 /* ARGSUSED */ 117 static void 118 roundrobin(void *arg) 119 { 120 121 #ifdef SMP 122 mtx_lock_spin(&sched_lock); 123 forward_roundrobin(); 124 mtx_unlock_spin(&sched_lock); 125 #endif 126 127 callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL); 128 } 129 130 /* 131 * Constants for digital decay and forget: 132 * 90% of (p_estcpu) usage in 5 * loadav time 133 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 134 * Note that, as ps(1) mentions, this can let percentages 135 * total over 100% (I've seen 137.9% for 3 processes). 136 * 137 * Note that schedclock() updates p_estcpu and p_cpticks asynchronously. 138 * 139 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. 140 * That is, the system wants to compute a value of decay such 141 * that the following for loop: 142 * for (i = 0; i < (5 * loadavg); i++) 143 * p_estcpu *= decay; 144 * will compute 145 * p_estcpu *= 0.1; 146 * for all values of loadavg: 147 * 148 * Mathematically this loop can be expressed by saying: 149 * decay ** (5 * loadavg) ~= .1 150 * 151 * The system computes decay as: 152 * decay = (2 * loadavg) / (2 * loadavg + 1) 153 * 154 * We wish to prove that the system's computation of decay 155 * will always fulfill the equation: 156 * decay ** (5 * loadavg) ~= .1 157 * 158 * If we compute b as: 159 * b = 2 * loadavg 160 * then 161 * decay = b / (b + 1) 162 * 163 * We now need to prove two things: 164 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 165 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 166 * 167 * Facts: 168 * For x close to zero, exp(x) =~ 1 + x, since 169 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 170 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 171 * For x close to zero, ln(1+x) =~ x, since 172 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 173 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 174 * ln(.1) =~ -2.30 175 * 176 * Proof of (1): 177 * Solve (factor)**(power) =~ .1 given power (5*loadav): 178 * solving for factor, 179 * ln(factor) =~ (-2.30/5*loadav), or 180 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 181 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 182 * 183 * Proof of (2): 184 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 185 * solving for power, 186 * power*ln(b/(b+1)) =~ -2.30, or 187 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 188 * 189 * Actual power values for the implemented algorithm are as follows: 190 * loadav: 1 2 3 4 191 * power: 5.68 10.32 14.94 19.55 192 */ 193 194 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 195 #define loadfactor(loadav) (2 * (loadav)) 196 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 197 198 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 199 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 200 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 201 202 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ 203 static int fscale __unused = FSCALE; 204 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, ""); 205 206 /* 207 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 208 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 209 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 210 * 211 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 212 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 213 * 214 * If you don't want to bother with the faster/more-accurate formula, you 215 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 216 * (more general) method of calculating the %age of CPU used by a process. 217 */ 218 #define CCPU_SHIFT 11 219 220 /* 221 * Recompute process priorities, every hz ticks. 222 * MP-safe, called without the Giant mutex. 223 */ 224 /* ARGSUSED */ 225 static void 226 schedcpu(void *arg) 227 { 228 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 229 struct thread *td; 230 struct proc *p; 231 struct kse *ke; 232 struct ksegrp *kg; 233 int realstathz; 234 int awake; 235 236 realstathz = stathz ? stathz : hz; 237 sx_slock(&allproc_lock); 238 FOREACH_PROC_IN_SYSTEM(p) { 239 mtx_lock_spin(&sched_lock); 240 p->p_swtime++; 241 FOREACH_KSEGRP_IN_PROC(p, kg) { 242 awake = 0; 243 FOREACH_KSE_IN_GROUP(kg, ke) { 244 /* 245 * Increment time in/out of memory and sleep 246 * time (if sleeping). We ignore overflow; 247 * with 16-bit int's (remember them?) 248 * overflow takes 45 days. 249 */ 250 /* 251 * The kse slptimes are not touched in wakeup 252 * because the thread may not HAVE a KSE. 253 */ 254 if (ke->ke_state == KES_ONRUNQ) { 255 awake = 1; 256 ke->ke_flags &= ~KEF_DIDRUN; 257 } else if ((ke->ke_state == KES_THREAD) && 258 (TD_IS_RUNNING(ke->ke_thread))) { 259 awake = 1; 260 /* Do not clear KEF_DIDRUN */ 261 } else if (ke->ke_flags & KEF_DIDRUN) { 262 awake = 1; 263 ke->ke_flags &= ~KEF_DIDRUN; 264 } 265 266 /* 267 * pctcpu is only for ps? 268 * Do it per kse.. and add them up at the end? 269 * XXXKSE 270 */ 271 ke->ke_pctcpu 272 = (ke->ke_pctcpu * ccpu) >> FSHIFT; 273 /* 274 * If the kse has been idle the entire second, 275 * stop recalculating its priority until 276 * it wakes up. 277 */ 278 if (ke->ke_cpticks == 0) 279 continue; 280 #if (FSHIFT >= CCPU_SHIFT) 281 ke->ke_pctcpu += (realstathz == 100) ? 282 ((fixpt_t) ke->ke_cpticks) << 283 (FSHIFT - CCPU_SHIFT) : 284 100 * (((fixpt_t) ke->ke_cpticks) << 285 (FSHIFT - CCPU_SHIFT)) / realstathz; 286 #else 287 ke->ke_pctcpu += ((FSCALE - ccpu) * 288 (ke->ke_cpticks * FSCALE / realstathz)) >> 289 FSHIFT; 290 #endif 291 ke->ke_cpticks = 0; 292 } /* end of kse loop */ 293 /* 294 * If there are ANY running threads in this KSEGRP, 295 * then don't count it as sleeping. 296 */ 297 if (awake) { 298 if (kg->kg_slptime > 1) { 299 /* 300 * In an ideal world, this should not 301 * happen, because whoever woke us 302 * up from the long sleep should have 303 * unwound the slptime and reset our 304 * priority before we run at the stale 305 * priority. Should KASSERT at some 306 * point when all the cases are fixed. 307 */ 308 updatepri(kg); 309 } 310 kg->kg_slptime = 0; 311 } else { 312 kg->kg_slptime++; 313 } 314 if (kg->kg_slptime > 1) 315 continue; 316 kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu); 317 resetpriority(kg); 318 FOREACH_THREAD_IN_GROUP(kg, td) { 319 if (td->td_priority >= PUSER) { 320 sched_prio(td, kg->kg_user_pri); 321 } 322 } 323 } /* end of ksegrp loop */ 324 mtx_unlock_spin(&sched_lock); 325 } /* end of process loop */ 326 sx_sunlock(&allproc_lock); 327 wakeup(&lbolt); 328 callout_reset(&schedcpu_callout, hz, schedcpu, NULL); 329 } 330 331 /* 332 * Recalculate the priority of a process after it has slept for a while. 333 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 334 * least six times the loadfactor will decay p_estcpu to zero. 335 */ 336 static void 337 updatepri(struct ksegrp *kg) 338 { 339 register unsigned int newcpu; 340 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 341 342 newcpu = kg->kg_estcpu; 343 if (kg->kg_slptime > 5 * loadfac) 344 kg->kg_estcpu = 0; 345 else { 346 kg->kg_slptime--; /* the first time was done in schedcpu */ 347 while (newcpu && --kg->kg_slptime) 348 newcpu = decay_cpu(loadfac, newcpu); 349 kg->kg_estcpu = newcpu; 350 } 351 resetpriority(kg); 352 } 353 354 /* 355 * Compute the priority of a process when running in user mode. 356 * Arrange to reschedule if the resulting priority is better 357 * than that of the current process. 358 */ 359 static void 360 resetpriority(struct ksegrp *kg) 361 { 362 register unsigned int newpriority; 363 struct thread *td; 364 365 mtx_lock_spin(&sched_lock); 366 if (kg->kg_pri_class == PRI_TIMESHARE) { 367 newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + 368 NICE_WEIGHT * (kg->kg_nice - PRIO_MIN); 369 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 370 PRI_MAX_TIMESHARE); 371 kg->kg_user_pri = newpriority; 372 } 373 FOREACH_THREAD_IN_GROUP(kg, td) { 374 maybe_resched(td); /* XXXKSE silly */ 375 } 376 mtx_unlock_spin(&sched_lock); 377 } 378 379 /* ARGSUSED */ 380 static void 381 sched_setup(void *dummy) 382 { 383 if (sched_quantum == 0) 384 sched_quantum = SCHED_QUANTUM; 385 hogticks = 2 * sched_quantum; 386 387 callout_init(&schedcpu_callout, 1); 388 callout_init(&roundrobin_callout, 0); 389 390 /* Kick off timeout driven events by calling first time. */ 391 roundrobin(NULL); 392 schedcpu(NULL); 393 } 394 395 /* External interfaces start here */ 396 int 397 sched_runnable(void) 398 { 399 return runq_check(&runq); 400 } 401 402 int 403 sched_rr_interval(void) 404 { 405 if (sched_quantum == 0) 406 sched_quantum = SCHED_QUANTUM; 407 return (sched_quantum); 408 } 409 410 /* 411 * We adjust the priority of the current process. The priority of 412 * a process gets worse as it accumulates CPU time. The cpu usage 413 * estimator (p_estcpu) is increased here. resetpriority() will 414 * compute a different priority each time p_estcpu increases by 415 * INVERSE_ESTCPU_WEIGHT 416 * (until MAXPRI is reached). The cpu usage estimator ramps up 417 * quite quickly when the process is running (linearly), and decays 418 * away exponentially, at a rate which is proportionally slower when 419 * the system is busy. The basic principle is that the system will 420 * 90% forget that the process used a lot of CPU time in 5 * loadav 421 * seconds. This causes the system to favor processes which haven't 422 * run much recently, and to round-robin among other processes. 423 */ 424 void 425 sched_clock(struct thread *td) 426 { 427 struct kse *ke; 428 struct ksegrp *kg; 429 430 KASSERT((td != NULL), ("schedclock: null thread pointer")); 431 ke = td->td_kse; 432 kg = td->td_ksegrp; 433 ke->ke_cpticks++; 434 kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1); 435 if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 436 resetpriority(kg); 437 if (td->td_priority >= PUSER) 438 td->td_priority = kg->kg_user_pri; 439 } 440 } 441 /* 442 * charge childs scheduling cpu usage to parent. 443 * 444 * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp. 445 * Charge it to the ksegrp that did the wait since process estcpu is sum of 446 * all ksegrps, this is strictly as expected. Assume that the child process 447 * aggregated all the estcpu into the 'built-in' ksegrp. 448 */ 449 void 450 sched_exit(struct ksegrp *kg, struct ksegrp *child) 451 { 452 kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + child->kg_estcpu); 453 } 454 455 void 456 sched_fork(struct ksegrp *kg, struct ksegrp *child) 457 { 458 /* 459 * set priority of child to be that of parent. 460 * XXXKSE this needs redefining.. 461 */ 462 child->kg_estcpu = kg->kg_estcpu; 463 } 464 465 void 466 sched_nice(struct ksegrp *kg, int nice) 467 { 468 kg->kg_nice = nice; 469 resetpriority(kg); 470 } 471 472 /* 473 * Adjust the priority of a thread. 474 * This may include moving the thread within the KSEGRP, 475 * changing the assignment of a kse to the thread, 476 * and moving a KSE in the system run queue. 477 */ 478 void 479 sched_prio(struct thread *td, u_char prio) 480 { 481 482 if (TD_ON_RUNQ(td)) { 483 adjustrunqueue(td, prio); 484 } else { 485 td->td_priority = prio; 486 } 487 } 488 489 void 490 sched_sleep(struct thread *td, u_char prio) 491 { 492 td->td_ksegrp->kg_slptime = 0; 493 td->td_priority = prio; 494 } 495 496 void 497 sched_switchin(struct thread *td) 498 { 499 td->td_kse->ke_oncpu = PCPU_GET(cpuid); 500 } 501 502 void 503 sched_switchout(struct thread *td) 504 { 505 struct kse *ke; 506 struct proc *p; 507 508 ke = td->td_kse; 509 p = td->td_proc; 510 511 KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?")); 512 513 td->td_lastcpu = ke->ke_oncpu; 514 td->td_last_kse = ke; 515 ke->ke_oncpu = NOCPU; 516 ke->ke_flags &= ~KEF_NEEDRESCHED; 517 /* 518 * At the last moment, if this thread is still marked RUNNING, 519 * then put it back on the run queue as it has not been suspended 520 * or stopped or any thing else similar. 521 */ 522 if (TD_IS_RUNNING(td)) { 523 /* Put us back on the run queue (kse and all). */ 524 setrunqueue(td); 525 } else if (p->p_flag & P_KSES) { 526 /* 527 * We will not be on the run queue. So we must be 528 * sleeping or similar. As it's available, 529 * someone else can use the KSE if they need it. 530 * (If bound LOANING can still occur). 531 */ 532 kse_reassign(ke); 533 } 534 } 535 536 void 537 sched_wakeup(struct thread *td) 538 { 539 struct ksegrp *kg; 540 541 kg = td->td_ksegrp; 542 if (kg->kg_slptime > 1) 543 updatepri(kg); 544 kg->kg_slptime = 0; 545 setrunqueue(td); 546 maybe_resched(td); 547 } 548 549 void 550 sched_add(struct kse *ke) 551 { 552 mtx_assert(&sched_lock, MA_OWNED); 553 KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE")); 554 KASSERT((ke->ke_thread->td_kse != NULL), 555 ("runq_add: No KSE on thread")); 556 KASSERT(ke->ke_state != KES_ONRUNQ, 557 ("runq_add: kse %p (%s) already in run queue", ke, 558 ke->ke_proc->p_comm)); 559 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 560 ("runq_add: process swapped out")); 561 ke->ke_ksegrp->kg_runq_kses++; 562 ke->ke_state = KES_ONRUNQ; 563 564 runq_add(&runq, ke); 565 } 566 567 void 568 sched_rem(struct kse *ke) 569 { 570 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 571 ("runq_remove: process swapped out")); 572 KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 573 mtx_assert(&sched_lock, MA_OWNED); 574 575 runq_remove(&runq, ke); 576 ke->ke_state = KES_THREAD; 577 ke->ke_ksegrp->kg_runq_kses--; 578 } 579 580 struct kse * 581 sched_choose(void) 582 { 583 struct kse *ke; 584 585 ke = runq_choose(&runq); 586 587 if (ke != NULL) { 588 runq_remove(&runq, ke); 589 ke->ke_state = KES_THREAD; 590 591 KASSERT((ke->ke_thread != NULL), 592 ("runq_choose: No thread on KSE")); 593 KASSERT((ke->ke_thread->td_kse != NULL), 594 ("runq_choose: No KSE on thread")); 595 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 596 ("runq_choose: process swapped out")); 597 } 598 return (ke); 599 } 600 601 void 602 sched_userret(struct thread *td) 603 { 604 struct ksegrp *kg; 605 /* 606 * XXX we cheat slightly on the locking here to avoid locking in 607 * the usual case. Setting td_priority here is essentially an 608 * incomplete workaround for not setting it properly elsewhere. 609 * Now that some interrupt handlers are threads, not setting it 610 * properly elsewhere can clobber it in the window between setting 611 * it here and returning to user mode, so don't waste time setting 612 * it perfectly here. 613 */ 614 kg = td->td_ksegrp; 615 if (td->td_priority != kg->kg_user_pri) { 616 mtx_lock_spin(&sched_lock); 617 td->td_priority = kg->kg_user_pri; 618 mtx_unlock_spin(&sched_lock); 619 } 620 } 621