1 /*- 2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/ktr.h> 33 #include <sys/lock.h> 34 #include <sys/mutex.h> 35 #include <sys/proc.h> 36 #include <sys/resource.h> 37 #include <sys/sched.h> 38 #include <sys/smp.h> 39 #include <sys/sx.h> 40 #include <sys/sysctl.h> 41 #include <sys/sysproto.h> 42 #include <sys/vmmeter.h> 43 #ifdef DDB 44 #include <ddb/ddb.h> 45 #endif 46 #ifdef KTRACE 47 #include <sys/uio.h> 48 #include <sys/ktrace.h> 49 #endif 50 51 #include <machine/cpu.h> 52 53 #define KTR_ULE KTR_NFS 54 55 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 56 /* XXX This is bogus compatability crap for ps */ 57 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 58 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 59 60 static void sched_setup(void *dummy); 61 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 62 63 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED"); 64 65 static int sched_strict; 66 SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, ""); 67 68 static int slice_min = 1; 69 SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 70 71 static int slice_max = 2; 72 SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 73 74 int realstathz; 75 int tickincr = 1; 76 77 /* 78 * These datastructures are allocated within their parent datastructure but 79 * are scheduler specific. 80 */ 81 82 struct ke_sched { 83 int ske_slice; 84 struct runq *ske_runq; 85 /* The following variables are only used for pctcpu calculation */ 86 int ske_ltick; /* Last tick that we were running on */ 87 int ske_ftick; /* First tick that we were running on */ 88 int ske_ticks; /* Tick count */ 89 /* CPU that we have affinity for. */ 90 u_char ske_cpu; 91 }; 92 #define ke_slice ke_sched->ske_slice 93 #define ke_runq ke_sched->ske_runq 94 #define ke_ltick ke_sched->ske_ltick 95 #define ke_ftick ke_sched->ske_ftick 96 #define ke_ticks ke_sched->ske_ticks 97 #define ke_cpu ke_sched->ske_cpu 98 99 struct kg_sched { 100 int skg_slptime; /* Number of ticks we vol. slept */ 101 int skg_runtime; /* Number of ticks we were running */ 102 }; 103 #define kg_slptime kg_sched->skg_slptime 104 #define kg_runtime kg_sched->skg_runtime 105 106 struct td_sched { 107 int std_slptime; 108 }; 109 #define td_slptime td_sched->std_slptime 110 111 struct td_sched td_sched; 112 struct ke_sched ke_sched; 113 struct kg_sched kg_sched; 114 115 struct ke_sched *kse0_sched = &ke_sched; 116 struct kg_sched *ksegrp0_sched = &kg_sched; 117 struct p_sched *proc0_sched = NULL; 118 struct td_sched *thread0_sched = &td_sched; 119 120 /* 121 * This priority range has 20 priorities on either end that are reachable 122 * only through nice values. 123 * 124 * PRI_RANGE: Total priority range for timeshare threads. 125 * PRI_NRESV: Reserved priorities for nice. 126 * PRI_BASE: The start of the dynamic range. 127 * DYN_RANGE: Number of priorities that are available int the dynamic 128 * priority range. 129 */ 130 #define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 131 #define SCHED_PRI_NRESV PRIO_TOTAL 132 #define SCHED_PRI_NHALF (PRIO_TOTAL / 2) 133 #define SCHED_PRI_NTHRESH (SCHED_PRI_NHALF - 1) 134 #define SCHED_PRI_BASE ((SCHED_PRI_NRESV / 2) + PRI_MIN_TIMESHARE) 135 #define SCHED_DYN_RANGE (SCHED_PRI_RANGE - SCHED_PRI_NRESV) 136 #define SCHED_PRI_INTERACT(score) \ 137 ((score) * SCHED_DYN_RANGE / SCHED_INTERACT_RANGE) 138 139 /* 140 * These determine the interactivity of a process. 141 * 142 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 143 * before throttling back. 144 * SLP_RUN_THROTTLE: Divisor for reducing slp/run time. 145 * INTERACT_RANGE: Range of interactivity values. Smaller is better. 146 * INTERACT_HALF: Convenience define, half of the interactivity range. 147 * INTERACT_THRESH: Threshhold for placement on the current runq. 148 */ 149 #define SCHED_SLP_RUN_MAX ((hz / 10) << 10) 150 #define SCHED_SLP_RUN_THROTTLE (10) 151 #define SCHED_INTERACT_RANGE (100) 152 #define SCHED_INTERACT_HALF (SCHED_INTERACT_RANGE / 2) 153 #define SCHED_INTERACT_THRESH (10) 154 155 /* 156 * These parameters and macros determine the size of the time slice that is 157 * granted to each thread. 158 * 159 * SLICE_MIN: Minimum time slice granted, in units of ticks. 160 * SLICE_MAX: Maximum time slice granted. 161 * SLICE_RANGE: Range of available time slices scaled by hz. 162 * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 163 * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 164 */ 165 #define SCHED_SLICE_MIN (slice_min) 166 #define SCHED_SLICE_MAX (slice_max) 167 #define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 168 #define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 169 #define SCHED_SLICE_NICE(nice) \ 170 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_PRI_NTHRESH)) 171 172 /* 173 * This macro determines whether or not the kse belongs on the current or 174 * next run queue. 175 * 176 * XXX nice value should effect how interactive a kg is. 177 */ 178 #define SCHED_INTERACTIVE(kg) \ 179 (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 180 #define SCHED_CURR(kg, ke) SCHED_INTERACTIVE(kg) 181 #if 0 182 (ke->ke_thread->td_priority < PRI_MIN_TIMESHARE || SCHED_INTERACTIVE(kg)) 183 #endif 184 185 /* 186 * Cpu percentage computation macros and defines. 187 * 188 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 189 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 190 */ 191 192 #define SCHED_CPU_TIME 10 193 #define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 194 195 /* 196 * kseq - per processor runqs and statistics. 197 */ 198 199 #define KSEQ_NCLASS (PRI_IDLE + 1) /* Number of run classes. */ 200 201 struct kseq { 202 struct runq ksq_idle; /* Queue of IDLE threads. */ 203 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 204 struct runq *ksq_next; /* Next timeshare queue. */ 205 struct runq *ksq_curr; /* Current queue. */ 206 int ksq_loads[KSEQ_NCLASS]; /* Load for each class */ 207 int ksq_load; /* Aggregate load. */ 208 short ksq_nice[PRIO_TOTAL + 1]; /* KSEs in each nice bin. */ 209 short ksq_nicemin; /* Least nice. */ 210 #ifdef SMP 211 unsigned int ksq_rslices; /* Slices on run queue */ 212 #endif 213 }; 214 215 /* 216 * One kse queue per processor. 217 */ 218 #ifdef SMP 219 struct kseq kseq_cpu[MAXCPU]; 220 #define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) 221 #define KSEQ_CPU(x) (&kseq_cpu[(x)]) 222 #else 223 struct kseq kseq_cpu; 224 #define KSEQ_SELF() (&kseq_cpu) 225 #define KSEQ_CPU(x) (&kseq_cpu) 226 #endif 227 228 static void sched_slice(struct kse *ke); 229 static void sched_priority(struct ksegrp *kg); 230 static int sched_interact_score(struct ksegrp *kg); 231 void sched_pctcpu_update(struct kse *ke); 232 int sched_pickcpu(void); 233 234 /* Operations on per processor queues */ 235 static struct kse * kseq_choose(struct kseq *kseq); 236 static void kseq_setup(struct kseq *kseq); 237 static void kseq_add(struct kseq *kseq, struct kse *ke); 238 static void kseq_rem(struct kseq *kseq, struct kse *ke); 239 static void kseq_nice_add(struct kseq *kseq, int nice); 240 static void kseq_nice_rem(struct kseq *kseq, int nice); 241 void kseq_print(struct kseq *kseq); 242 #ifdef SMP 243 struct kseq * kseq_load_highest(void); 244 #endif 245 246 void 247 kseq_print(struct kseq *kseq) 248 { 249 int i; 250 251 if (kseq == NULL) 252 kseq = KSEQ_SELF(); 253 254 printf("kseq:\n"); 255 printf("\tload: %d\n", kseq->ksq_load); 256 printf("\tload ITHD: %d\n", kseq->ksq_loads[PRI_ITHD]); 257 printf("\tload REALTIME: %d\n", kseq->ksq_loads[PRI_REALTIME]); 258 printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]); 259 printf("\tload IDLE: %d\n", kseq->ksq_loads[PRI_IDLE]); 260 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 261 printf("\tnice counts:\n"); 262 for (i = 0; i < PRIO_TOTAL + 1; i++) 263 if (kseq->ksq_nice[i]) 264 printf("\t\t%d = %d\n", 265 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 266 } 267 268 static void 269 kseq_add(struct kseq *kseq, struct kse *ke) 270 { 271 kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]++; 272 kseq->ksq_load++; 273 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 274 CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 275 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 276 ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); 277 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 278 kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); 279 #ifdef SMP 280 kseq->ksq_rslices += ke->ke_slice; 281 #endif 282 } 283 284 static void 285 kseq_rem(struct kseq *kseq, struct kse *ke) 286 { 287 kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]--; 288 kseq->ksq_load--; 289 ke->ke_runq = NULL; 290 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 291 kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); 292 #ifdef SMP 293 kseq->ksq_rslices -= ke->ke_slice; 294 #endif 295 } 296 297 static void 298 kseq_nice_add(struct kseq *kseq, int nice) 299 { 300 /* Normalize to zero. */ 301 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 302 if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 0) 303 kseq->ksq_nicemin = nice; 304 } 305 306 static void 307 kseq_nice_rem(struct kseq *kseq, int nice) 308 { 309 int n; 310 311 /* Normalize to zero. */ 312 n = nice + SCHED_PRI_NHALF; 313 kseq->ksq_nice[n]--; 314 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 315 316 /* 317 * If this wasn't the smallest nice value or there are more in 318 * this bucket we can just return. Otherwise we have to recalculate 319 * the smallest nice. 320 */ 321 if (nice != kseq->ksq_nicemin || 322 kseq->ksq_nice[n] != 0 || 323 kseq->ksq_loads[PRI_TIMESHARE] == 0) 324 return; 325 326 for (; n < SCHED_PRI_NRESV + 1; n++) 327 if (kseq->ksq_nice[n]) { 328 kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 329 return; 330 } 331 } 332 333 #ifdef SMP 334 struct kseq * 335 kseq_load_highest(void) 336 { 337 struct kseq *kseq; 338 int load; 339 int cpu; 340 int i; 341 342 cpu = 0; 343 load = 0; 344 345 for (i = 0; i < mp_maxid; i++) { 346 if (CPU_ABSENT(i)) 347 continue; 348 kseq = KSEQ_CPU(i); 349 if (kseq->ksq_load > load) { 350 load = kseq->ksq_load; 351 cpu = i; 352 } 353 } 354 if (load > 1) 355 return (KSEQ_CPU(cpu)); 356 357 return (NULL); 358 } 359 #endif 360 361 struct kse * 362 kseq_choose(struct kseq *kseq) 363 { 364 struct kse *ke; 365 struct runq *swap; 366 367 swap = NULL; 368 369 for (;;) { 370 ke = runq_choose(kseq->ksq_curr); 371 if (ke == NULL) { 372 /* 373 * We already swaped once and didn't get anywhere. 374 */ 375 if (swap) 376 break; 377 swap = kseq->ksq_curr; 378 kseq->ksq_curr = kseq->ksq_next; 379 kseq->ksq_next = swap; 380 continue; 381 } 382 /* 383 * If we encounter a slice of 0 the kse is in a 384 * TIMESHARE kse group and its nice was too far out 385 * of the range that receives slices. 386 */ 387 if (ke->ke_slice == 0) { 388 runq_remove(ke->ke_runq, ke); 389 sched_slice(ke); 390 ke->ke_runq = kseq->ksq_next; 391 runq_add(ke->ke_runq, ke); 392 continue; 393 } 394 return (ke); 395 } 396 397 return (runq_choose(&kseq->ksq_idle)); 398 } 399 400 static void 401 kseq_setup(struct kseq *kseq) 402 { 403 runq_init(&kseq->ksq_timeshare[0]); 404 runq_init(&kseq->ksq_timeshare[1]); 405 runq_init(&kseq->ksq_idle); 406 407 kseq->ksq_curr = &kseq->ksq_timeshare[0]; 408 kseq->ksq_next = &kseq->ksq_timeshare[1]; 409 410 kseq->ksq_loads[PRI_ITHD] = 0; 411 kseq->ksq_loads[PRI_REALTIME] = 0; 412 kseq->ksq_loads[PRI_TIMESHARE] = 0; 413 kseq->ksq_loads[PRI_IDLE] = 0; 414 #ifdef SMP 415 kseq->ksq_rslices = 0; 416 #endif 417 } 418 419 static void 420 sched_setup(void *dummy) 421 { 422 int i; 423 424 slice_min = (hz/100); 425 slice_max = (hz/10); 426 427 mtx_lock_spin(&sched_lock); 428 /* init kseqs */ 429 for (i = 0; i < MAXCPU; i++) 430 kseq_setup(KSEQ_CPU(i)); 431 432 kseq_add(KSEQ_SELF(), &kse0); 433 mtx_unlock_spin(&sched_lock); 434 } 435 436 /* 437 * Scale the scheduling priority according to the "interactivity" of this 438 * process. 439 */ 440 static void 441 sched_priority(struct ksegrp *kg) 442 { 443 int pri; 444 445 if (kg->kg_pri_class != PRI_TIMESHARE) 446 return; 447 448 pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 449 pri += SCHED_PRI_BASE; 450 pri += kg->kg_nice; 451 452 if (pri > PRI_MAX_TIMESHARE) 453 pri = PRI_MAX_TIMESHARE; 454 else if (pri < PRI_MIN_TIMESHARE) 455 pri = PRI_MIN_TIMESHARE; 456 457 kg->kg_user_pri = pri; 458 459 return; 460 } 461 462 /* 463 * Calculate a time slice based on the properties of the kseg and the runq 464 * that we're on. This is only for PRI_TIMESHARE ksegrps. 465 */ 466 static void 467 sched_slice(struct kse *ke) 468 { 469 struct kseq *kseq; 470 struct ksegrp *kg; 471 472 kg = ke->ke_ksegrp; 473 kseq = KSEQ_CPU(ke->ke_cpu); 474 475 /* 476 * Rationale: 477 * KSEs in interactive ksegs get the minimum slice so that we 478 * quickly notice if it abuses its advantage. 479 * 480 * KSEs in non-interactive ksegs are assigned a slice that is 481 * based on the ksegs nice value relative to the least nice kseg 482 * on the run queue for this cpu. 483 * 484 * If the KSE is less nice than all others it gets the maximum 485 * slice and other KSEs will adjust their slice relative to 486 * this when they first expire. 487 * 488 * There is 20 point window that starts relative to the least 489 * nice kse on the run queue. Slice size is determined by 490 * the kse distance from the last nice ksegrp. 491 * 492 * If you are outside of the window you will get no slice and 493 * you will be reevaluated each time you are selected on the 494 * run queue. 495 * 496 */ 497 498 if (!SCHED_INTERACTIVE(kg)) { 499 int nice; 500 501 nice = kg->kg_nice + (0 - kseq->ksq_nicemin); 502 if (kseq->ksq_loads[PRI_TIMESHARE] == 0 || 503 kg->kg_nice < kseq->ksq_nicemin) 504 ke->ke_slice = SCHED_SLICE_MAX; 505 else if (nice <= SCHED_PRI_NTHRESH) 506 ke->ke_slice = SCHED_SLICE_NICE(nice); 507 else 508 ke->ke_slice = 0; 509 } else 510 ke->ke_slice = SCHED_SLICE_MIN; 511 512 CTR6(KTR_ULE, 513 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 514 ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, 515 kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg)); 516 517 /* 518 * Check to see if we need to scale back the slp and run time 519 * in the kg. This will cause us to forget old interactivity 520 * while maintaining the current ratio. 521 */ 522 CTR4(KTR_ULE, "Slp vs Run %p (Slp %d, Run %d, Score %d)", 523 ke, kg->kg_slptime >> 10, kg->kg_runtime >> 10, 524 sched_interact_score(kg)); 525 526 if ((kg->kg_runtime + kg->kg_slptime) > SCHED_SLP_RUN_MAX) { 527 kg->kg_runtime /= SCHED_SLP_RUN_THROTTLE; 528 kg->kg_slptime /= SCHED_SLP_RUN_THROTTLE; 529 } 530 CTR4(KTR_ULE, "Slp vs Run(2) %p (Slp %d, Run %d, Score %d)", 531 ke, kg->kg_slptime >> 10, kg->kg_runtime >> 10, 532 sched_interact_score(kg)); 533 534 return; 535 } 536 537 static int 538 sched_interact_score(struct ksegrp *kg) 539 { 540 int big; 541 int small; 542 int base; 543 544 if (kg->kg_runtime > kg->kg_slptime) { 545 big = kg->kg_runtime; 546 small = kg->kg_slptime; 547 base = SCHED_INTERACT_HALF; 548 } else { 549 big = kg->kg_slptime; 550 small = kg->kg_runtime; 551 base = 0; 552 } 553 554 big /= SCHED_INTERACT_HALF; 555 if (big != 0) 556 small /= big; 557 else 558 small = 0; 559 560 small += base; 561 /* XXX Factor in nice */ 562 return (small); 563 } 564 565 /* 566 * This is only somewhat accurate since given many processes of the same 567 * priority they will switch when their slices run out, which will be 568 * at most SCHED_SLICE_MAX. 569 */ 570 int 571 sched_rr_interval(void) 572 { 573 return (SCHED_SLICE_MAX); 574 } 575 576 void 577 sched_pctcpu_update(struct kse *ke) 578 { 579 /* 580 * Adjust counters and watermark for pctcpu calc. 581 * 582 * Shift the tick count out so that the divide doesn't round away 583 * our results. 584 */ 585 ke->ke_ticks <<= 10; 586 ke->ke_ticks = (ke->ke_ticks / (ke->ke_ltick - ke->ke_ftick)) * 587 SCHED_CPU_TICKS; 588 ke->ke_ticks >>= 10; 589 ke->ke_ltick = ticks; 590 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 591 } 592 593 #ifdef SMP 594 /* XXX Should be changed to kseq_load_lowest() */ 595 int 596 sched_pickcpu(void) 597 { 598 struct kseq *kseq; 599 int load; 600 int cpu; 601 int i; 602 603 if (!smp_started) 604 return (0); 605 606 load = 0; 607 cpu = 0; 608 609 for (i = 0; i < mp_maxid; i++) { 610 if (CPU_ABSENT(i)) 611 continue; 612 kseq = KSEQ_CPU(i); 613 if (kseq->ksq_load < load) { 614 cpu = i; 615 load = kseq->ksq_load; 616 } 617 } 618 619 CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu); 620 return (cpu); 621 } 622 #else 623 int 624 sched_pickcpu(void) 625 { 626 return (0); 627 } 628 #endif 629 630 void 631 sched_prio(struct thread *td, u_char prio) 632 { 633 struct kse *ke; 634 struct runq *rq; 635 636 mtx_assert(&sched_lock, MA_OWNED); 637 ke = td->td_kse; 638 td->td_priority = prio; 639 640 if (TD_ON_RUNQ(td)) { 641 rq = ke->ke_runq; 642 643 runq_remove(rq, ke); 644 runq_add(rq, ke); 645 } 646 } 647 648 void 649 sched_switchout(struct thread *td) 650 { 651 struct kse *ke; 652 653 mtx_assert(&sched_lock, MA_OWNED); 654 655 ke = td->td_kse; 656 657 td->td_last_kse = ke; 658 td->td_lastcpu = td->td_oncpu; 659 td->td_oncpu = NOCPU; 660 td->td_flags &= ~TDF_NEEDRESCHED; 661 662 if (TD_IS_RUNNING(td)) { 663 runq_add(ke->ke_runq, ke); 664 /* setrunqueue(td); */ 665 return; 666 } 667 if (ke->ke_runq) 668 kseq_rem(KSEQ_CPU(ke->ke_cpu), ke); 669 /* 670 * We will not be on the run queue. So we must be 671 * sleeping or similar. 672 */ 673 if (td->td_proc->p_flag & P_THREADED) 674 kse_reassign(ke); 675 } 676 677 void 678 sched_switchin(struct thread *td) 679 { 680 /* struct kse *ke = td->td_kse; */ 681 mtx_assert(&sched_lock, MA_OWNED); 682 683 td->td_oncpu = PCPU_GET(cpuid); 684 685 if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE && 686 td->td_priority != td->td_ksegrp->kg_user_pri) 687 curthread->td_flags |= TDF_NEEDRESCHED; 688 } 689 690 void 691 sched_nice(struct ksegrp *kg, int nice) 692 { 693 struct kse *ke; 694 struct thread *td; 695 struct kseq *kseq; 696 697 /* 698 * We need to adjust the nice counts for running KSEs. 699 */ 700 if (kg->kg_pri_class == PRI_TIMESHARE) 701 FOREACH_KSE_IN_GROUP(kg, ke) { 702 if (ke->ke_state != KES_ONRUNQ && 703 ke->ke_state != KES_THREAD) 704 continue; 705 kseq = KSEQ_CPU(ke->ke_cpu); 706 kseq_nice_rem(kseq, kg->kg_nice); 707 kseq_nice_add(kseq, nice); 708 } 709 kg->kg_nice = nice; 710 sched_priority(kg); 711 FOREACH_THREAD_IN_GROUP(kg, td) 712 td->td_flags |= TDF_NEEDRESCHED; 713 } 714 715 void 716 sched_sleep(struct thread *td, u_char prio) 717 { 718 mtx_assert(&sched_lock, MA_OWNED); 719 720 td->td_slptime = ticks; 721 td->td_priority = prio; 722 723 CTR2(KTR_ULE, "sleep kse %p (tick: %d)", 724 td->td_kse, td->td_slptime); 725 } 726 727 void 728 sched_wakeup(struct thread *td) 729 { 730 mtx_assert(&sched_lock, MA_OWNED); 731 732 /* 733 * Let the kseg know how long we slept for. This is because process 734 * interactivity behavior is modeled in the kseg. 735 */ 736 if (td->td_slptime) { 737 struct ksegrp *kg; 738 int hzticks; 739 740 kg = td->td_ksegrp; 741 hzticks = ticks - td->td_slptime; 742 kg->kg_slptime += hzticks << 10; 743 sched_priority(kg); 744 CTR2(KTR_ULE, "wakeup kse %p (%d ticks)", 745 td->td_kse, hzticks); 746 td->td_slptime = 0; 747 } 748 setrunqueue(td); 749 if (td->td_priority < curthread->td_priority) 750 curthread->td_flags |= TDF_NEEDRESCHED; 751 } 752 753 /* 754 * Penalize the parent for creating a new child and initialize the child's 755 * priority. 756 */ 757 void 758 sched_fork(struct proc *p, struct proc *p1) 759 { 760 761 mtx_assert(&sched_lock, MA_OWNED); 762 763 sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 764 sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 765 sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 766 } 767 768 void 769 sched_fork_kse(struct kse *ke, struct kse *child) 770 { 771 child->ke_slice = ke->ke_slice; 772 child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */ 773 child->ke_runq = NULL; 774 775 /* 776 * Claim that we've been running for one second for statistical 777 * purposes. 778 */ 779 child->ke_ticks = 0; 780 child->ke_ltick = ticks; 781 child->ke_ftick = ticks - hz; 782 } 783 784 void 785 sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 786 { 787 /* XXX Need something better here */ 788 if (kg->kg_slptime > kg->kg_runtime) { 789 child->kg_slptime = SCHED_DYN_RANGE; 790 child->kg_runtime = kg->kg_slptime / SCHED_DYN_RANGE; 791 } else { 792 child->kg_runtime = SCHED_DYN_RANGE; 793 child->kg_slptime = kg->kg_runtime / SCHED_DYN_RANGE; 794 } 795 796 child->kg_user_pri = kg->kg_user_pri; 797 child->kg_nice = kg->kg_nice; 798 } 799 800 void 801 sched_fork_thread(struct thread *td, struct thread *child) 802 { 803 } 804 805 void 806 sched_class(struct ksegrp *kg, int class) 807 { 808 struct kseq *kseq; 809 struct kse *ke; 810 811 if (kg->kg_pri_class == class) 812 return; 813 814 FOREACH_KSE_IN_GROUP(kg, ke) { 815 if (ke->ke_state != KES_ONRUNQ && 816 ke->ke_state != KES_THREAD) 817 continue; 818 kseq = KSEQ_CPU(ke->ke_cpu); 819 820 kseq->ksq_loads[PRI_BASE(kg->kg_pri_class)]--; 821 kseq->ksq_loads[PRI_BASE(class)]++; 822 823 if (kg->kg_pri_class == PRI_TIMESHARE) 824 kseq_nice_rem(kseq, kg->kg_nice); 825 else if (class == PRI_TIMESHARE) 826 kseq_nice_add(kseq, kg->kg_nice); 827 } 828 829 kg->kg_pri_class = class; 830 } 831 832 /* 833 * Return some of the child's priority and interactivity to the parent. 834 */ 835 void 836 sched_exit(struct proc *p, struct proc *child) 837 { 838 /* XXX Need something better here */ 839 mtx_assert(&sched_lock, MA_OWNED); 840 sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child)); 841 } 842 843 void 844 sched_exit_kse(struct kse *ke, struct kse *child) 845 { 846 kseq_rem(KSEQ_CPU(child->ke_cpu), child); 847 } 848 849 void 850 sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 851 { 852 } 853 854 void 855 sched_exit_thread(struct thread *td, struct thread *child) 856 { 857 } 858 859 void 860 sched_clock(struct kse *ke) 861 { 862 struct kseq *kseq; 863 struct ksegrp *kg; 864 struct thread *td; 865 #if 0 866 struct kse *nke; 867 #endif 868 869 /* 870 * sched_setup() apparently happens prior to stathz being set. We 871 * need to resolve the timers earlier in the boot so we can avoid 872 * calculating this here. 873 */ 874 if (realstathz == 0) { 875 realstathz = stathz ? stathz : hz; 876 tickincr = hz / realstathz; 877 /* 878 * XXX This does not work for values of stathz that are much 879 * larger than hz. 880 */ 881 if (tickincr == 0) 882 tickincr = 1; 883 } 884 885 td = ke->ke_thread; 886 kg = ke->ke_ksegrp; 887 888 mtx_assert(&sched_lock, MA_OWNED); 889 KASSERT((td != NULL), ("schedclock: null thread pointer")); 890 891 /* Adjust ticks for pctcpu */ 892 ke->ke_ticks++; 893 ke->ke_ltick = ticks; 894 895 /* Go up to one second beyond our max and then trim back down */ 896 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 897 sched_pctcpu_update(ke); 898 899 if (td->td_kse->ke_flags & KEF_IDLEKSE) 900 return; 901 902 CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)", 903 ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 904 905 /* 906 * We only do slicing code for TIMESHARE ksegrps. 907 */ 908 if (kg->kg_pri_class != PRI_TIMESHARE) 909 return; 910 /* 911 * Check for a higher priority task on the run queue. This can happen 912 * on SMP if another processor woke up a process on our runq. 913 */ 914 kseq = KSEQ_SELF(); 915 #if 0 916 if (kseq->ksq_load > 1 && (nke = kseq_choose(kseq)) != NULL) { 917 if (sched_strict && 918 nke->ke_thread->td_priority < td->td_priority) 919 td->td_flags |= TDF_NEEDRESCHED; 920 else if (nke->ke_thread->td_priority < 921 td->td_priority SCHED_PRIO_SLOP) 922 923 if (nke->ke_thread->td_priority < td->td_priority) 924 td->td_flags |= TDF_NEEDRESCHED; 925 } 926 #endif 927 /* 928 * We used a tick charge it to the ksegrp so that we can compute our 929 * interactivity. 930 */ 931 kg->kg_runtime += tickincr << 10; 932 933 /* 934 * We used up one time slice. 935 */ 936 ke->ke_slice--; 937 #ifdef SMP 938 kseq->ksq_rslices--; 939 #endif 940 941 if (ke->ke_slice > 0) 942 return; 943 /* 944 * We're out of time, recompute priorities and requeue. 945 */ 946 kseq_rem(kseq, ke); 947 sched_priority(kg); 948 sched_slice(ke); 949 if (SCHED_CURR(kg, ke)) 950 ke->ke_runq = kseq->ksq_curr; 951 else 952 ke->ke_runq = kseq->ksq_next; 953 kseq_add(kseq, ke); 954 td->td_flags |= TDF_NEEDRESCHED; 955 } 956 957 int 958 sched_runnable(void) 959 { 960 struct kseq *kseq; 961 962 kseq = KSEQ_SELF(); 963 964 if (kseq->ksq_load) 965 return (1); 966 #ifdef SMP 967 /* 968 * For SMP we may steal other processor's KSEs. Just search until we 969 * verify that at least on other cpu has a runnable task. 970 */ 971 if (smp_started) { 972 int i; 973 974 for (i = 0; i < mp_maxid; i++) { 975 if (CPU_ABSENT(i)) 976 continue; 977 kseq = KSEQ_CPU(i); 978 if (kseq->ksq_load) 979 return (1); 980 } 981 } 982 #endif 983 return (0); 984 } 985 986 void 987 sched_userret(struct thread *td) 988 { 989 struct ksegrp *kg; 990 991 kg = td->td_ksegrp; 992 993 if (td->td_priority != kg->kg_user_pri) { 994 mtx_lock_spin(&sched_lock); 995 td->td_priority = kg->kg_user_pri; 996 mtx_unlock_spin(&sched_lock); 997 } 998 } 999 1000 struct kse * 1001 sched_choose(void) 1002 { 1003 struct kseq *kseq; 1004 struct kse *ke; 1005 1006 #ifdef SMP 1007 retry: 1008 #endif 1009 kseq = KSEQ_SELF(); 1010 ke = kseq_choose(kseq); 1011 if (ke) { 1012 runq_remove(ke->ke_runq, ke); 1013 ke->ke_state = KES_THREAD; 1014 1015 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 1016 CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)", 1017 ke, ke->ke_runq, ke->ke_slice, 1018 ke->ke_thread->td_priority); 1019 } 1020 return (ke); 1021 } 1022 1023 #ifdef SMP 1024 if (smp_started) { 1025 /* 1026 * Find the cpu with the highest load and steal one proc. 1027 */ 1028 if ((kseq = kseq_load_highest()) == NULL) 1029 return (NULL); 1030 1031 /* 1032 * Remove this kse from this kseq and runq and then requeue 1033 * on the current processor. Then we will dequeue it 1034 * normally above. 1035 */ 1036 ke = kseq_choose(kseq); 1037 runq_remove(ke->ke_runq, ke); 1038 ke->ke_state = KES_THREAD; 1039 kseq_rem(kseq, ke); 1040 1041 ke->ke_cpu = PCPU_GET(cpuid); 1042 sched_add(ke); 1043 goto retry; 1044 } 1045 #endif 1046 1047 return (NULL); 1048 } 1049 1050 void 1051 sched_add(struct kse *ke) 1052 { 1053 struct kseq *kseq; 1054 struct ksegrp *kg; 1055 1056 mtx_assert(&sched_lock, MA_OWNED); 1057 KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 1058 KASSERT((ke->ke_thread->td_kse != NULL), 1059 ("sched_add: No KSE on thread")); 1060 KASSERT(ke->ke_state != KES_ONRUNQ, 1061 ("sched_add: kse %p (%s) already in run queue", ke, 1062 ke->ke_proc->p_comm)); 1063 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 1064 ("sched_add: process swapped out")); 1065 1066 kg = ke->ke_ksegrp; 1067 1068 if (ke->ke_runq) 1069 Debugger("hrm?"); 1070 1071 switch (PRI_BASE(kg->kg_pri_class)) { 1072 case PRI_ITHD: 1073 case PRI_REALTIME: 1074 kseq = KSEQ_SELF(); 1075 if (ke->ke_runq == NULL) 1076 kseq_add(kseq, ke); 1077 ke->ke_runq = kseq->ksq_curr; 1078 ke->ke_slice = SCHED_SLICE_MAX; 1079 break; 1080 case PRI_TIMESHARE: 1081 kseq = KSEQ_CPU(ke->ke_cpu); 1082 if (ke->ke_runq == NULL) { 1083 if (SCHED_CURR(kg, ke)) 1084 ke->ke_runq = kseq->ksq_curr; 1085 else 1086 ke->ke_runq = kseq->ksq_next; 1087 kseq_add(kseq, ke); 1088 } 1089 break; 1090 case PRI_IDLE: 1091 kseq = KSEQ_CPU(ke->ke_cpu); 1092 1093 if (ke->ke_runq == NULL) 1094 kseq_add(kseq, ke); 1095 /* 1096 * This is for priority prop. 1097 */ 1098 if (ke->ke_thread->td_priority < PRI_MAX_TIMESHARE) 1099 ke->ke_runq = kseq->ksq_curr; 1100 else 1101 ke->ke_runq = &kseq->ksq_idle; 1102 ke->ke_slice = SCHED_SLICE_MIN; 1103 break; 1104 default: 1105 panic("Unknown pri class.\n"); 1106 break; 1107 } 1108 1109 ke->ke_ksegrp->kg_runq_kses++; 1110 ke->ke_state = KES_ONRUNQ; 1111 1112 runq_add(ke->ke_runq, ke); 1113 } 1114 1115 void 1116 sched_rem(struct kse *ke) 1117 { 1118 struct kseq *kseq; 1119 1120 mtx_assert(&sched_lock, MA_OWNED); 1121 /* KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); */ 1122 panic("WTF\n"); 1123 1124 ke->ke_state = KES_THREAD; 1125 ke->ke_ksegrp->kg_runq_kses--; 1126 kseq = KSEQ_CPU(ke->ke_cpu); 1127 runq_remove(ke->ke_runq, ke); 1128 kseq_rem(kseq, ke); 1129 } 1130 1131 fixpt_t 1132 sched_pctcpu(struct kse *ke) 1133 { 1134 fixpt_t pctcpu; 1135 1136 pctcpu = 0; 1137 1138 if (ke->ke_ticks) { 1139 int rtick; 1140 1141 /* Update to account for time potentially spent sleeping */ 1142 ke->ke_ltick = ticks; 1143 sched_pctcpu_update(ke); 1144 1145 /* How many rtick per second ? */ 1146 rtick = ke->ke_ticks / SCHED_CPU_TIME; 1147 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 1148 } 1149 1150 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1151 1152 return (pctcpu); 1153 } 1154 1155 int 1156 sched_sizeof_kse(void) 1157 { 1158 return (sizeof(struct kse) + sizeof(struct ke_sched)); 1159 } 1160 1161 int 1162 sched_sizeof_ksegrp(void) 1163 { 1164 return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 1165 } 1166 1167 int 1168 sched_sizeof_proc(void) 1169 { 1170 return (sizeof(struct proc)); 1171 } 1172 1173 int 1174 sched_sizeof_thread(void) 1175 { 1176 return (sizeof(struct thread) + sizeof(struct td_sched)); 1177 } 1178