1 /*- 2 * Copyright (c) 2002-2005, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <opt_sched.h> 31 32 #define kse td_sched 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kdb.h> 37 #include <sys/kernel.h> 38 #include <sys/ktr.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/proc.h> 42 #include <sys/resource.h> 43 #include <sys/resourcevar.h> 44 #include <sys/sched.h> 45 #include <sys/smp.h> 46 #include <sys/sx.h> 47 #include <sys/sysctl.h> 48 #include <sys/sysproto.h> 49 #include <sys/turnstile.h> 50 #include <sys/vmmeter.h> 51 #ifdef KTRACE 52 #include <sys/uio.h> 53 #include <sys/ktrace.h> 54 #endif 55 56 #ifdef HWPMC_HOOKS 57 #include <sys/pmckern.h> 58 #endif 59 60 #include <machine/cpu.h> 61 #include <machine/smp.h> 62 63 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 64 /* XXX This is bogus compatability crap for ps */ 65 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 66 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 67 68 static void sched_setup(void *dummy); 69 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 70 71 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler"); 72 73 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0, 74 "Scheduler name"); 75 76 static int slice_min = 1; 77 SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 78 79 static int slice_max = 10; 80 SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 81 82 int realstathz; 83 int tickincr = 1; 84 85 /* 86 * The following datastructures are allocated within their parent structure 87 * but are scheduler specific. 88 */ 89 /* 90 * The schedulable entity that can be given a context to run. A process may 91 * have several of these. 92 */ 93 struct kse { 94 TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */ 95 int ke_flags; /* (j) KEF_* flags. */ 96 struct thread *ke_thread; /* (*) Active associated thread. */ 97 fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */ 98 char ke_rqindex; /* (j) Run queue index. */ 99 enum { 100 KES_THREAD = 0x0, /* slaved to thread state */ 101 KES_ONRUNQ 102 } ke_state; /* (j) thread sched specific status. */ 103 int ke_slptime; 104 int ke_slice; 105 struct runq *ke_runq; 106 u_char ke_cpu; /* CPU that we have affinity for. */ 107 /* The following variables are only used for pctcpu calculation */ 108 int ke_ltick; /* Last tick that we were running on */ 109 int ke_ftick; /* First tick that we were running on */ 110 int ke_ticks; /* Tick count */ 111 112 }; 113 #define td_kse td_sched 114 #define td_slptime td_kse->ke_slptime 115 #define ke_proc ke_thread->td_proc 116 #define ke_ksegrp ke_thread->td_ksegrp 117 #define ke_assign ke_procq.tqe_next 118 /* flags kept in ke_flags */ 119 #define KEF_ASSIGNED 0x0001 /* Thread is being migrated. */ 120 #define KEF_BOUND 0x0002 /* Thread can not migrate. */ 121 #define KEF_XFERABLE 0x0004 /* Thread was added as transferable. */ 122 #define KEF_HOLD 0x0008 /* Thread is temporarily bound. */ 123 #define KEF_REMOVED 0x0010 /* Thread was removed while ASSIGNED */ 124 #define KEF_INTERNAL 0x0020 /* Thread added due to migration. */ 125 #define KEF_DIDRUN 0x02000 /* Thread actually ran. */ 126 #define KEF_EXIT 0x04000 /* Thread is being killed. */ 127 128 struct kg_sched { 129 struct thread *skg_last_assigned; /* (j) Last thread assigned to */ 130 /* the system scheduler */ 131 int skg_slptime; /* Number of ticks we vol. slept */ 132 int skg_runtime; /* Number of ticks we were running */ 133 int skg_avail_opennings; /* (j) Num unfilled slots in group.*/ 134 int skg_concurrency; /* (j) Num threads requested in group.*/ 135 }; 136 #define kg_last_assigned kg_sched->skg_last_assigned 137 #define kg_avail_opennings kg_sched->skg_avail_opennings 138 #define kg_concurrency kg_sched->skg_concurrency 139 #define kg_runtime kg_sched->skg_runtime 140 #define kg_slptime kg_sched->skg_slptime 141 142 #define SLOT_RELEASE(kg) (kg)->kg_avail_opennings++ 143 #define SLOT_USE(kg) (kg)->kg_avail_opennings-- 144 145 static struct kse kse0; 146 static struct kg_sched kg_sched0; 147 148 /* 149 * The priority is primarily determined by the interactivity score. Thus, we 150 * give lower(better) priorities to kse groups that use less CPU. The nice 151 * value is then directly added to this to allow nice to have some effect 152 * on latency. 153 * 154 * PRI_RANGE: Total priority range for timeshare threads. 155 * PRI_NRESV: Number of nice values. 156 * PRI_BASE: The start of the dynamic range. 157 */ 158 #define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 159 #define SCHED_PRI_NRESV ((PRIO_MAX - PRIO_MIN) + 1) 160 #define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 161 #define SCHED_PRI_BASE (PRI_MIN_TIMESHARE) 162 #define SCHED_PRI_INTERACT(score) \ 163 ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX) 164 165 /* 166 * These determine the interactivity of a process. 167 * 168 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 169 * before throttling back. 170 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 171 * INTERACT_MAX: Maximum interactivity value. Smaller is better. 172 * INTERACT_THRESH: Threshhold for placement on the current runq. 173 */ 174 #define SCHED_SLP_RUN_MAX ((hz * 5) << 10) 175 #define SCHED_SLP_RUN_FORK ((hz / 2) << 10) 176 #define SCHED_INTERACT_MAX (100) 177 #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 178 #define SCHED_INTERACT_THRESH (30) 179 180 /* 181 * These parameters and macros determine the size of the time slice that is 182 * granted to each thread. 183 * 184 * SLICE_MIN: Minimum time slice granted, in units of ticks. 185 * SLICE_MAX: Maximum time slice granted. 186 * SLICE_RANGE: Range of available time slices scaled by hz. 187 * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 188 * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 189 * SLICE_NTHRESH: The nice cutoff point for slice assignment. 190 */ 191 #define SCHED_SLICE_MIN (slice_min) 192 #define SCHED_SLICE_MAX (slice_max) 193 #define SCHED_SLICE_INTERACTIVE (slice_max) 194 #define SCHED_SLICE_NTHRESH (SCHED_PRI_NHALF - 1) 195 #define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 196 #define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 197 #define SCHED_SLICE_NICE(nice) \ 198 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH)) 199 200 /* 201 * This macro determines whether or not the thread belongs on the current or 202 * next run queue. 203 */ 204 #define SCHED_INTERACTIVE(kg) \ 205 (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 206 #define SCHED_CURR(kg, ke) \ 207 ((ke->ke_thread->td_flags & TDF_BORROWING) || SCHED_INTERACTIVE(kg)) 208 209 /* 210 * Cpu percentage computation macros and defines. 211 * 212 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 213 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 214 */ 215 216 #define SCHED_CPU_TIME 10 217 #define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 218 219 /* 220 * kseq - per processor runqs and statistics. 221 */ 222 struct kseq { 223 struct runq ksq_idle; /* Queue of IDLE threads. */ 224 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 225 struct runq *ksq_next; /* Next timeshare queue. */ 226 struct runq *ksq_curr; /* Current queue. */ 227 int ksq_load_timeshare; /* Load for timeshare. */ 228 int ksq_load; /* Aggregate load. */ 229 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */ 230 short ksq_nicemin; /* Least nice. */ 231 #ifdef SMP 232 int ksq_transferable; 233 LIST_ENTRY(kseq) ksq_siblings; /* Next in kseq group. */ 234 struct kseq_group *ksq_group; /* Our processor group. */ 235 volatile struct kse *ksq_assigned; /* assigned by another CPU. */ 236 #else 237 int ksq_sysload; /* For loadavg, !ITHD load. */ 238 #endif 239 }; 240 241 #ifdef SMP 242 /* 243 * kseq groups are groups of processors which can cheaply share threads. When 244 * one processor in the group goes idle it will check the runqs of the other 245 * processors in its group prior to halting and waiting for an interrupt. 246 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. 247 * In a numa environment we'd want an idle bitmap per group and a two tiered 248 * load balancer. 249 */ 250 struct kseq_group { 251 int ksg_cpus; /* Count of CPUs in this kseq group. */ 252 cpumask_t ksg_cpumask; /* Mask of cpus in this group. */ 253 cpumask_t ksg_idlemask; /* Idle cpus in this group. */ 254 cpumask_t ksg_mask; /* Bit mask for first cpu. */ 255 int ksg_load; /* Total load of this group. */ 256 int ksg_transferable; /* Transferable load of this group. */ 257 LIST_HEAD(, kseq) ksg_members; /* Linked list of all members. */ 258 }; 259 #endif 260 261 /* 262 * One kse queue per processor. 263 */ 264 #ifdef SMP 265 static cpumask_t kseq_idle; 266 static int ksg_maxid; 267 static struct kseq kseq_cpu[MAXCPU]; 268 static struct kseq_group kseq_groups[MAXCPU]; 269 static int bal_tick; 270 static int gbal_tick; 271 static int balance_groups; 272 273 #define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) 274 #define KSEQ_CPU(x) (&kseq_cpu[(x)]) 275 #define KSEQ_ID(x) ((x) - kseq_cpu) 276 #define KSEQ_GROUP(x) (&kseq_groups[(x)]) 277 #else /* !SMP */ 278 static struct kseq kseq_cpu; 279 280 #define KSEQ_SELF() (&kseq_cpu) 281 #define KSEQ_CPU(x) (&kseq_cpu) 282 #endif 283 284 static void slot_fill(struct ksegrp *); 285 static struct kse *sched_choose(void); /* XXX Should be thread * */ 286 static void sched_slice(struct kse *); 287 static void sched_priority(struct ksegrp *); 288 static void sched_thread_priority(struct thread *, u_char); 289 static int sched_interact_score(struct ksegrp *); 290 static void sched_interact_update(struct ksegrp *); 291 static void sched_interact_fork(struct ksegrp *); 292 static void sched_pctcpu_update(struct kse *); 293 294 /* Operations on per processor queues */ 295 static struct kse * kseq_choose(struct kseq *); 296 static void kseq_setup(struct kseq *); 297 static void kseq_load_add(struct kseq *, struct kse *); 298 static void kseq_load_rem(struct kseq *, struct kse *); 299 static __inline void kseq_runq_add(struct kseq *, struct kse *, int); 300 static __inline void kseq_runq_rem(struct kseq *, struct kse *); 301 static void kseq_nice_add(struct kseq *, int); 302 static void kseq_nice_rem(struct kseq *, int); 303 void kseq_print(int cpu); 304 #ifdef SMP 305 static int kseq_transfer(struct kseq *, struct kse *, int); 306 static struct kse *runq_steal(struct runq *); 307 static void sched_balance(void); 308 static void sched_balance_groups(void); 309 static void sched_balance_group(struct kseq_group *); 310 static void sched_balance_pair(struct kseq *, struct kseq *); 311 static void kseq_move(struct kseq *, int); 312 static int kseq_idled(struct kseq *); 313 static void kseq_notify(struct kse *, int); 314 static void kseq_assign(struct kseq *); 315 static struct kse *kseq_steal(struct kseq *, int); 316 #define KSE_CAN_MIGRATE(ke) \ 317 ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0) 318 #endif 319 320 void 321 kseq_print(int cpu) 322 { 323 struct kseq *kseq; 324 int i; 325 326 kseq = KSEQ_CPU(cpu); 327 328 printf("kseq:\n"); 329 printf("\tload: %d\n", kseq->ksq_load); 330 printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare); 331 #ifdef SMP 332 printf("\tload transferable: %d\n", kseq->ksq_transferable); 333 #endif 334 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 335 printf("\tnice counts:\n"); 336 for (i = 0; i < SCHED_PRI_NRESV; i++) 337 if (kseq->ksq_nice[i]) 338 printf("\t\t%d = %d\n", 339 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 340 } 341 342 static __inline void 343 kseq_runq_add(struct kseq *kseq, struct kse *ke, int flags) 344 { 345 #ifdef SMP 346 if (KSE_CAN_MIGRATE(ke)) { 347 kseq->ksq_transferable++; 348 kseq->ksq_group->ksg_transferable++; 349 ke->ke_flags |= KEF_XFERABLE; 350 } 351 #endif 352 runq_add(ke->ke_runq, ke, flags); 353 } 354 355 static __inline void 356 kseq_runq_rem(struct kseq *kseq, struct kse *ke) 357 { 358 #ifdef SMP 359 if (ke->ke_flags & KEF_XFERABLE) { 360 kseq->ksq_transferable--; 361 kseq->ksq_group->ksg_transferable--; 362 ke->ke_flags &= ~KEF_XFERABLE; 363 } 364 #endif 365 runq_remove(ke->ke_runq, ke); 366 } 367 368 static void 369 kseq_load_add(struct kseq *kseq, struct kse *ke) 370 { 371 int class; 372 mtx_assert(&sched_lock, MA_OWNED); 373 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 374 if (class == PRI_TIMESHARE) 375 kseq->ksq_load_timeshare++; 376 kseq->ksq_load++; 377 CTR1(KTR_SCHED, "load: %d", kseq->ksq_load); 378 if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0) 379 #ifdef SMP 380 kseq->ksq_group->ksg_load++; 381 #else 382 kseq->ksq_sysload++; 383 #endif 384 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 385 kseq_nice_add(kseq, ke->ke_proc->p_nice); 386 } 387 388 static void 389 kseq_load_rem(struct kseq *kseq, struct kse *ke) 390 { 391 int class; 392 mtx_assert(&sched_lock, MA_OWNED); 393 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 394 if (class == PRI_TIMESHARE) 395 kseq->ksq_load_timeshare--; 396 if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0) 397 #ifdef SMP 398 kseq->ksq_group->ksg_load--; 399 #else 400 kseq->ksq_sysload--; 401 #endif 402 kseq->ksq_load--; 403 CTR1(KTR_SCHED, "load: %d", kseq->ksq_load); 404 ke->ke_runq = NULL; 405 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 406 kseq_nice_rem(kseq, ke->ke_proc->p_nice); 407 } 408 409 static void 410 kseq_nice_add(struct kseq *kseq, int nice) 411 { 412 mtx_assert(&sched_lock, MA_OWNED); 413 /* Normalize to zero. */ 414 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 415 if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1) 416 kseq->ksq_nicemin = nice; 417 } 418 419 static void 420 kseq_nice_rem(struct kseq *kseq, int nice) 421 { 422 int n; 423 424 mtx_assert(&sched_lock, MA_OWNED); 425 /* Normalize to zero. */ 426 n = nice + SCHED_PRI_NHALF; 427 kseq->ksq_nice[n]--; 428 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 429 430 /* 431 * If this wasn't the smallest nice value or there are more in 432 * this bucket we can just return. Otherwise we have to recalculate 433 * the smallest nice. 434 */ 435 if (nice != kseq->ksq_nicemin || 436 kseq->ksq_nice[n] != 0 || 437 kseq->ksq_load_timeshare == 0) 438 return; 439 440 for (; n < SCHED_PRI_NRESV; n++) 441 if (kseq->ksq_nice[n]) { 442 kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 443 return; 444 } 445 } 446 447 #ifdef SMP 448 /* 449 * sched_balance is a simple CPU load balancing algorithm. It operates by 450 * finding the least loaded and most loaded cpu and equalizing their load 451 * by migrating some processes. 452 * 453 * Dealing only with two CPUs at a time has two advantages. Firstly, most 454 * installations will only have 2 cpus. Secondly, load balancing too much at 455 * once can have an unpleasant effect on the system. The scheduler rarely has 456 * enough information to make perfect decisions. So this algorithm chooses 457 * algorithm simplicity and more gradual effects on load in larger systems. 458 * 459 * It could be improved by considering the priorities and slices assigned to 460 * each task prior to balancing them. There are many pathological cases with 461 * any approach and so the semi random algorithm below may work as well as any. 462 * 463 */ 464 static void 465 sched_balance(void) 466 { 467 struct kseq_group *high; 468 struct kseq_group *low; 469 struct kseq_group *ksg; 470 int cnt; 471 int i; 472 473 bal_tick = ticks + (random() % (hz * 2)); 474 if (smp_started == 0) 475 return; 476 low = high = NULL; 477 i = random() % (ksg_maxid + 1); 478 for (cnt = 0; cnt <= ksg_maxid; cnt++) { 479 ksg = KSEQ_GROUP(i); 480 /* 481 * Find the CPU with the highest load that has some 482 * threads to transfer. 483 */ 484 if ((high == NULL || ksg->ksg_load > high->ksg_load) 485 && ksg->ksg_transferable) 486 high = ksg; 487 if (low == NULL || ksg->ksg_load < low->ksg_load) 488 low = ksg; 489 if (++i > ksg_maxid) 490 i = 0; 491 } 492 if (low != NULL && high != NULL && high != low) 493 sched_balance_pair(LIST_FIRST(&high->ksg_members), 494 LIST_FIRST(&low->ksg_members)); 495 } 496 497 static void 498 sched_balance_groups(void) 499 { 500 int i; 501 502 gbal_tick = ticks + (random() % (hz * 2)); 503 mtx_assert(&sched_lock, MA_OWNED); 504 if (smp_started) 505 for (i = 0; i <= ksg_maxid; i++) 506 sched_balance_group(KSEQ_GROUP(i)); 507 } 508 509 static void 510 sched_balance_group(struct kseq_group *ksg) 511 { 512 struct kseq *kseq; 513 struct kseq *high; 514 struct kseq *low; 515 int load; 516 517 if (ksg->ksg_transferable == 0) 518 return; 519 low = NULL; 520 high = NULL; 521 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 522 load = kseq->ksq_load; 523 if (high == NULL || load > high->ksq_load) 524 high = kseq; 525 if (low == NULL || load < low->ksq_load) 526 low = kseq; 527 } 528 if (high != NULL && low != NULL && high != low) 529 sched_balance_pair(high, low); 530 } 531 532 static void 533 sched_balance_pair(struct kseq *high, struct kseq *low) 534 { 535 int transferable; 536 int high_load; 537 int low_load; 538 int move; 539 int diff; 540 int i; 541 542 /* 543 * If we're transfering within a group we have to use this specific 544 * kseq's transferable count, otherwise we can steal from other members 545 * of the group. 546 */ 547 if (high->ksq_group == low->ksq_group) { 548 transferable = high->ksq_transferable; 549 high_load = high->ksq_load; 550 low_load = low->ksq_load; 551 } else { 552 transferable = high->ksq_group->ksg_transferable; 553 high_load = high->ksq_group->ksg_load; 554 low_load = low->ksq_group->ksg_load; 555 } 556 if (transferable == 0) 557 return; 558 /* 559 * Determine what the imbalance is and then adjust that to how many 560 * kses we actually have to give up (transferable). 561 */ 562 diff = high_load - low_load; 563 move = diff / 2; 564 if (diff & 0x1) 565 move++; 566 move = min(move, transferable); 567 for (i = 0; i < move; i++) 568 kseq_move(high, KSEQ_ID(low)); 569 return; 570 } 571 572 static void 573 kseq_move(struct kseq *from, int cpu) 574 { 575 struct kseq *kseq; 576 struct kseq *to; 577 struct kse *ke; 578 579 kseq = from; 580 to = KSEQ_CPU(cpu); 581 ke = kseq_steal(kseq, 1); 582 if (ke == NULL) { 583 struct kseq_group *ksg; 584 585 ksg = kseq->ksq_group; 586 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 587 if (kseq == from || kseq->ksq_transferable == 0) 588 continue; 589 ke = kseq_steal(kseq, 1); 590 break; 591 } 592 if (ke == NULL) 593 panic("kseq_move: No KSEs available with a " 594 "transferable count of %d\n", 595 ksg->ksg_transferable); 596 } 597 if (kseq == to) 598 return; 599 ke->ke_state = KES_THREAD; 600 kseq_runq_rem(kseq, ke); 601 kseq_load_rem(kseq, ke); 602 kseq_notify(ke, cpu); 603 } 604 605 static int 606 kseq_idled(struct kseq *kseq) 607 { 608 struct kseq_group *ksg; 609 struct kseq *steal; 610 struct kse *ke; 611 612 ksg = kseq->ksq_group; 613 /* 614 * If we're in a cpu group, try and steal kses from another cpu in 615 * the group before idling. 616 */ 617 if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) { 618 LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) { 619 if (steal == kseq || steal->ksq_transferable == 0) 620 continue; 621 ke = kseq_steal(steal, 0); 622 if (ke == NULL) 623 continue; 624 ke->ke_state = KES_THREAD; 625 kseq_runq_rem(steal, ke); 626 kseq_load_rem(steal, ke); 627 ke->ke_cpu = PCPU_GET(cpuid); 628 ke->ke_flags |= KEF_INTERNAL | KEF_HOLD; 629 sched_add(ke->ke_thread, SRQ_YIELDING); 630 return (0); 631 } 632 } 633 /* 634 * We only set the idled bit when all of the cpus in the group are 635 * idle. Otherwise we could get into a situation where a KSE bounces 636 * back and forth between two idle cores on seperate physical CPUs. 637 */ 638 ksg->ksg_idlemask |= PCPU_GET(cpumask); 639 if (ksg->ksg_idlemask != ksg->ksg_cpumask) 640 return (1); 641 atomic_set_int(&kseq_idle, ksg->ksg_mask); 642 return (1); 643 } 644 645 static void 646 kseq_assign(struct kseq *kseq) 647 { 648 struct kse *nke; 649 struct kse *ke; 650 651 do { 652 *(volatile struct kse **)&ke = kseq->ksq_assigned; 653 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL)); 654 for (; ke != NULL; ke = nke) { 655 nke = ke->ke_assign; 656 kseq->ksq_group->ksg_load--; 657 kseq->ksq_load--; 658 ke->ke_flags &= ~KEF_ASSIGNED; 659 ke->ke_flags |= KEF_INTERNAL | KEF_HOLD; 660 sched_add(ke->ke_thread, SRQ_YIELDING); 661 } 662 } 663 664 static void 665 kseq_notify(struct kse *ke, int cpu) 666 { 667 struct kseq *kseq; 668 struct thread *td; 669 struct pcpu *pcpu; 670 int class; 671 int prio; 672 673 kseq = KSEQ_CPU(cpu); 674 /* XXX */ 675 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 676 if ((class == PRI_TIMESHARE || class == PRI_REALTIME) && 677 (kseq_idle & kseq->ksq_group->ksg_mask)) 678 atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask); 679 kseq->ksq_group->ksg_load++; 680 kseq->ksq_load++; 681 ke->ke_cpu = cpu; 682 ke->ke_flags |= KEF_ASSIGNED; 683 prio = ke->ke_thread->td_priority; 684 685 /* 686 * Place a KSE on another cpu's queue and force a resched. 687 */ 688 do { 689 *(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned; 690 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke)); 691 /* 692 * Without sched_lock we could lose a race where we set NEEDRESCHED 693 * on a thread that is switched out before the IPI is delivered. This 694 * would lead us to miss the resched. This will be a problem once 695 * sched_lock is pushed down. 696 */ 697 pcpu = pcpu_find(cpu); 698 td = pcpu->pc_curthread; 699 if (ke->ke_thread->td_priority < td->td_priority || 700 td == pcpu->pc_idlethread) { 701 td->td_flags |= TDF_NEEDRESCHED; 702 ipi_selected(1 << cpu, IPI_AST); 703 } 704 } 705 706 static struct kse * 707 runq_steal(struct runq *rq) 708 { 709 struct rqhead *rqh; 710 struct rqbits *rqb; 711 struct kse *ke; 712 int word; 713 int bit; 714 715 mtx_assert(&sched_lock, MA_OWNED); 716 rqb = &rq->rq_status; 717 for (word = 0; word < RQB_LEN; word++) { 718 if (rqb->rqb_bits[word] == 0) 719 continue; 720 for (bit = 0; bit < RQB_BPW; bit++) { 721 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 722 continue; 723 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 724 TAILQ_FOREACH(ke, rqh, ke_procq) { 725 if (KSE_CAN_MIGRATE(ke)) 726 return (ke); 727 } 728 } 729 } 730 return (NULL); 731 } 732 733 static struct kse * 734 kseq_steal(struct kseq *kseq, int stealidle) 735 { 736 struct kse *ke; 737 738 /* 739 * Steal from next first to try to get a non-interactive task that 740 * may not have run for a while. 741 */ 742 if ((ke = runq_steal(kseq->ksq_next)) != NULL) 743 return (ke); 744 if ((ke = runq_steal(kseq->ksq_curr)) != NULL) 745 return (ke); 746 if (stealidle) 747 return (runq_steal(&kseq->ksq_idle)); 748 return (NULL); 749 } 750 751 int 752 kseq_transfer(struct kseq *kseq, struct kse *ke, int class) 753 { 754 struct kseq_group *nksg; 755 struct kseq_group *ksg; 756 struct kseq *old; 757 int cpu; 758 int idx; 759 760 if (smp_started == 0) 761 return (0); 762 cpu = 0; 763 /* 764 * If our load exceeds a certain threshold we should attempt to 765 * reassign this thread. The first candidate is the cpu that 766 * originally ran the thread. If it is idle, assign it there, 767 * otherwise, pick an idle cpu. 768 * 769 * The threshold at which we start to reassign kses has a large impact 770 * on the overall performance of the system. Tuned too high and 771 * some CPUs may idle. Too low and there will be excess migration 772 * and context switches. 773 */ 774 old = KSEQ_CPU(ke->ke_cpu); 775 nksg = old->ksq_group; 776 ksg = kseq->ksq_group; 777 if (kseq_idle) { 778 if (kseq_idle & nksg->ksg_mask) { 779 cpu = ffs(nksg->ksg_idlemask); 780 if (cpu) { 781 CTR2(KTR_SCHED, 782 "kseq_transfer: %p found old cpu %X " 783 "in idlemask.", ke, cpu); 784 goto migrate; 785 } 786 } 787 /* 788 * Multiple cpus could find this bit simultaneously 789 * but the race shouldn't be terrible. 790 */ 791 cpu = ffs(kseq_idle); 792 if (cpu) { 793 CTR2(KTR_SCHED, "kseq_transfer: %p found %X " 794 "in idlemask.", ke, cpu); 795 goto migrate; 796 } 797 } 798 idx = 0; 799 #if 0 800 if (old->ksq_load < kseq->ksq_load) { 801 cpu = ke->ke_cpu + 1; 802 CTR2(KTR_SCHED, "kseq_transfer: %p old cpu %X " 803 "load less than ours.", ke, cpu); 804 goto migrate; 805 } 806 /* 807 * No new CPU was found, look for one with less load. 808 */ 809 for (idx = 0; idx <= ksg_maxid; idx++) { 810 nksg = KSEQ_GROUP(idx); 811 if (nksg->ksg_load /*+ (nksg->ksg_cpus * 2)*/ < ksg->ksg_load) { 812 cpu = ffs(nksg->ksg_cpumask); 813 CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X load less " 814 "than ours.", ke, cpu); 815 goto migrate; 816 } 817 } 818 #endif 819 /* 820 * If another cpu in this group has idled, assign a thread over 821 * to them after checking to see if there are idled groups. 822 */ 823 if (ksg->ksg_idlemask) { 824 cpu = ffs(ksg->ksg_idlemask); 825 if (cpu) { 826 CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X idle in " 827 "group.", ke, cpu); 828 goto migrate; 829 } 830 } 831 return (0); 832 migrate: 833 /* 834 * Now that we've found an idle CPU, migrate the thread. 835 */ 836 cpu--; 837 ke->ke_runq = NULL; 838 kseq_notify(ke, cpu); 839 840 return (1); 841 } 842 843 #endif /* SMP */ 844 845 /* 846 * Pick the highest priority task we have and return it. 847 */ 848 849 static struct kse * 850 kseq_choose(struct kseq *kseq) 851 { 852 struct runq *swap; 853 struct kse *ke; 854 int nice; 855 856 mtx_assert(&sched_lock, MA_OWNED); 857 swap = NULL; 858 859 for (;;) { 860 ke = runq_choose(kseq->ksq_curr); 861 if (ke == NULL) { 862 /* 863 * We already swapped once and didn't get anywhere. 864 */ 865 if (swap) 866 break; 867 swap = kseq->ksq_curr; 868 kseq->ksq_curr = kseq->ksq_next; 869 kseq->ksq_next = swap; 870 continue; 871 } 872 /* 873 * If we encounter a slice of 0 the kse is in a 874 * TIMESHARE kse group and its nice was too far out 875 * of the range that receives slices. 876 */ 877 nice = ke->ke_proc->p_nice + (0 - kseq->ksq_nicemin); 878 if (ke->ke_slice == 0 || (nice > SCHED_SLICE_NTHRESH && 879 ke->ke_proc->p_nice != 0)) { 880 runq_remove(ke->ke_runq, ke); 881 sched_slice(ke); 882 ke->ke_runq = kseq->ksq_next; 883 runq_add(ke->ke_runq, ke, 0); 884 continue; 885 } 886 return (ke); 887 } 888 889 return (runq_choose(&kseq->ksq_idle)); 890 } 891 892 static void 893 kseq_setup(struct kseq *kseq) 894 { 895 runq_init(&kseq->ksq_timeshare[0]); 896 runq_init(&kseq->ksq_timeshare[1]); 897 runq_init(&kseq->ksq_idle); 898 kseq->ksq_curr = &kseq->ksq_timeshare[0]; 899 kseq->ksq_next = &kseq->ksq_timeshare[1]; 900 kseq->ksq_load = 0; 901 kseq->ksq_load_timeshare = 0; 902 } 903 904 static void 905 sched_setup(void *dummy) 906 { 907 #ifdef SMP 908 int i; 909 #endif 910 911 slice_min = (hz/100); /* 10ms */ 912 slice_max = (hz/7); /* ~140ms */ 913 914 #ifdef SMP 915 balance_groups = 0; 916 /* 917 * Initialize the kseqs. 918 */ 919 for (i = 0; i < MAXCPU; i++) { 920 struct kseq *ksq; 921 922 ksq = &kseq_cpu[i]; 923 ksq->ksq_assigned = NULL; 924 kseq_setup(&kseq_cpu[i]); 925 } 926 if (smp_topology == NULL) { 927 struct kseq_group *ksg; 928 struct kseq *ksq; 929 int cpus; 930 931 for (cpus = 0, i = 0; i < MAXCPU; i++) { 932 if (CPU_ABSENT(i)) 933 continue; 934 ksq = &kseq_cpu[cpus]; 935 ksg = &kseq_groups[cpus]; 936 /* 937 * Setup a kseq group with one member. 938 */ 939 ksq->ksq_transferable = 0; 940 ksq->ksq_group = ksg; 941 ksg->ksg_cpus = 1; 942 ksg->ksg_idlemask = 0; 943 ksg->ksg_cpumask = ksg->ksg_mask = 1 << i; 944 ksg->ksg_load = 0; 945 ksg->ksg_transferable = 0; 946 LIST_INIT(&ksg->ksg_members); 947 LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings); 948 cpus++; 949 } 950 ksg_maxid = cpus - 1; 951 } else { 952 struct kseq_group *ksg; 953 struct cpu_group *cg; 954 int j; 955 956 for (i = 0; i < smp_topology->ct_count; i++) { 957 cg = &smp_topology->ct_group[i]; 958 ksg = &kseq_groups[i]; 959 /* 960 * Initialize the group. 961 */ 962 ksg->ksg_idlemask = 0; 963 ksg->ksg_load = 0; 964 ksg->ksg_transferable = 0; 965 ksg->ksg_cpus = cg->cg_count; 966 ksg->ksg_cpumask = cg->cg_mask; 967 LIST_INIT(&ksg->ksg_members); 968 /* 969 * Find all of the group members and add them. 970 */ 971 for (j = 0; j < MAXCPU; j++) { 972 if ((cg->cg_mask & (1 << j)) != 0) { 973 if (ksg->ksg_mask == 0) 974 ksg->ksg_mask = 1 << j; 975 kseq_cpu[j].ksq_transferable = 0; 976 kseq_cpu[j].ksq_group = ksg; 977 LIST_INSERT_HEAD(&ksg->ksg_members, 978 &kseq_cpu[j], ksq_siblings); 979 } 980 } 981 if (ksg->ksg_cpus > 1) 982 balance_groups = 1; 983 } 984 ksg_maxid = smp_topology->ct_count - 1; 985 } 986 /* 987 * Stagger the group and global load balancer so they do not 988 * interfere with each other. 989 */ 990 bal_tick = ticks + hz; 991 if (balance_groups) 992 gbal_tick = ticks + (hz / 2); 993 #else 994 kseq_setup(KSEQ_SELF()); 995 #endif 996 mtx_lock_spin(&sched_lock); 997 kseq_load_add(KSEQ_SELF(), &kse0); 998 mtx_unlock_spin(&sched_lock); 999 } 1000 1001 /* 1002 * Scale the scheduling priority according to the "interactivity" of this 1003 * process. 1004 */ 1005 static void 1006 sched_priority(struct ksegrp *kg) 1007 { 1008 int pri; 1009 1010 if (kg->kg_pri_class != PRI_TIMESHARE) 1011 return; 1012 1013 pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 1014 pri += SCHED_PRI_BASE; 1015 pri += kg->kg_proc->p_nice; 1016 1017 if (pri > PRI_MAX_TIMESHARE) 1018 pri = PRI_MAX_TIMESHARE; 1019 else if (pri < PRI_MIN_TIMESHARE) 1020 pri = PRI_MIN_TIMESHARE; 1021 1022 kg->kg_user_pri = pri; 1023 1024 return; 1025 } 1026 1027 /* 1028 * Calculate a time slice based on the properties of the kseg and the runq 1029 * that we're on. This is only for PRI_TIMESHARE ksegrps. 1030 */ 1031 static void 1032 sched_slice(struct kse *ke) 1033 { 1034 struct kseq *kseq; 1035 struct ksegrp *kg; 1036 1037 kg = ke->ke_ksegrp; 1038 kseq = KSEQ_CPU(ke->ke_cpu); 1039 1040 if (ke->ke_thread->td_flags & TDF_BORROWING) { 1041 ke->ke_slice = SCHED_SLICE_MIN; 1042 return; 1043 } 1044 1045 /* 1046 * Rationale: 1047 * KSEs in interactive ksegs get a minimal slice so that we 1048 * quickly notice if it abuses its advantage. 1049 * 1050 * KSEs in non-interactive ksegs are assigned a slice that is 1051 * based on the ksegs nice value relative to the least nice kseg 1052 * on the run queue for this cpu. 1053 * 1054 * If the KSE is less nice than all others it gets the maximum 1055 * slice and other KSEs will adjust their slice relative to 1056 * this when they first expire. 1057 * 1058 * There is 20 point window that starts relative to the least 1059 * nice kse on the run queue. Slice size is determined by 1060 * the kse distance from the last nice ksegrp. 1061 * 1062 * If the kse is outside of the window it will get no slice 1063 * and will be reevaluated each time it is selected on the 1064 * run queue. The exception to this is nice 0 ksegs when 1065 * a nice -20 is running. They are always granted a minimum 1066 * slice. 1067 */ 1068 if (!SCHED_INTERACTIVE(kg)) { 1069 int nice; 1070 1071 nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin); 1072 if (kseq->ksq_load_timeshare == 0 || 1073 kg->kg_proc->p_nice < kseq->ksq_nicemin) 1074 ke->ke_slice = SCHED_SLICE_MAX; 1075 else if (nice <= SCHED_SLICE_NTHRESH) 1076 ke->ke_slice = SCHED_SLICE_NICE(nice); 1077 else if (kg->kg_proc->p_nice == 0) 1078 ke->ke_slice = SCHED_SLICE_MIN; 1079 else 1080 ke->ke_slice = 0; 1081 } else 1082 ke->ke_slice = SCHED_SLICE_INTERACTIVE; 1083 1084 return; 1085 } 1086 1087 /* 1088 * This routine enforces a maximum limit on the amount of scheduling history 1089 * kept. It is called after either the slptime or runtime is adjusted. 1090 * This routine will not operate correctly when slp or run times have been 1091 * adjusted to more than double their maximum. 1092 */ 1093 static void 1094 sched_interact_update(struct ksegrp *kg) 1095 { 1096 int sum; 1097 1098 sum = kg->kg_runtime + kg->kg_slptime; 1099 if (sum < SCHED_SLP_RUN_MAX) 1100 return; 1101 /* 1102 * If we have exceeded by more than 1/5th then the algorithm below 1103 * will not bring us back into range. Dividing by two here forces 1104 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1105 */ 1106 if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1107 kg->kg_runtime /= 2; 1108 kg->kg_slptime /= 2; 1109 return; 1110 } 1111 kg->kg_runtime = (kg->kg_runtime / 5) * 4; 1112 kg->kg_slptime = (kg->kg_slptime / 5) * 4; 1113 } 1114 1115 static void 1116 sched_interact_fork(struct ksegrp *kg) 1117 { 1118 int ratio; 1119 int sum; 1120 1121 sum = kg->kg_runtime + kg->kg_slptime; 1122 if (sum > SCHED_SLP_RUN_FORK) { 1123 ratio = sum / SCHED_SLP_RUN_FORK; 1124 kg->kg_runtime /= ratio; 1125 kg->kg_slptime /= ratio; 1126 } 1127 } 1128 1129 static int 1130 sched_interact_score(struct ksegrp *kg) 1131 { 1132 int div; 1133 1134 if (kg->kg_runtime > kg->kg_slptime) { 1135 div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF); 1136 return (SCHED_INTERACT_HALF + 1137 (SCHED_INTERACT_HALF - (kg->kg_slptime / div))); 1138 } if (kg->kg_slptime > kg->kg_runtime) { 1139 div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF); 1140 return (kg->kg_runtime / div); 1141 } 1142 1143 /* 1144 * This can happen if slptime and runtime are 0. 1145 */ 1146 return (0); 1147 1148 } 1149 1150 /* 1151 * Very early in the boot some setup of scheduler-specific 1152 * parts of proc0 and of soem scheduler resources needs to be done. 1153 * Called from: 1154 * proc0_init() 1155 */ 1156 void 1157 schedinit(void) 1158 { 1159 /* 1160 * Set up the scheduler specific parts of proc0. 1161 */ 1162 proc0.p_sched = NULL; /* XXX */ 1163 ksegrp0.kg_sched = &kg_sched0; 1164 thread0.td_sched = &kse0; 1165 kse0.ke_thread = &thread0; 1166 kse0.ke_state = KES_THREAD; 1167 kg_sched0.skg_concurrency = 1; 1168 kg_sched0.skg_avail_opennings = 0; /* we are already running */ 1169 } 1170 1171 /* 1172 * This is only somewhat accurate since given many processes of the same 1173 * priority they will switch when their slices run out, which will be 1174 * at most SCHED_SLICE_MAX. 1175 */ 1176 int 1177 sched_rr_interval(void) 1178 { 1179 return (SCHED_SLICE_MAX); 1180 } 1181 1182 static void 1183 sched_pctcpu_update(struct kse *ke) 1184 { 1185 /* 1186 * Adjust counters and watermark for pctcpu calc. 1187 */ 1188 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) { 1189 /* 1190 * Shift the tick count out so that the divide doesn't 1191 * round away our results. 1192 */ 1193 ke->ke_ticks <<= 10; 1194 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) * 1195 SCHED_CPU_TICKS; 1196 ke->ke_ticks >>= 10; 1197 } else 1198 ke->ke_ticks = 0; 1199 ke->ke_ltick = ticks; 1200 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 1201 } 1202 1203 void 1204 sched_thread_priority(struct thread *td, u_char prio) 1205 { 1206 struct kse *ke; 1207 1208 CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", 1209 td, td->td_proc->p_comm, td->td_priority, prio, curthread, 1210 curthread->td_proc->p_comm); 1211 ke = td->td_kse; 1212 mtx_assert(&sched_lock, MA_OWNED); 1213 if (td->td_priority == prio) 1214 return; 1215 if (TD_ON_RUNQ(td)) { 1216 /* 1217 * If the priority has been elevated due to priority 1218 * propagation, we may have to move ourselves to a new 1219 * queue. We still call adjustrunqueue below in case kse 1220 * needs to fix things up. 1221 */ 1222 if (prio < td->td_priority && ke->ke_runq != NULL && 1223 (ke->ke_flags & KEF_ASSIGNED) == 0 && 1224 ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) { 1225 runq_remove(ke->ke_runq, ke); 1226 ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr; 1227 runq_add(ke->ke_runq, ke, 0); 1228 } 1229 /* 1230 * Hold this kse on this cpu so that sched_prio() doesn't 1231 * cause excessive migration. We only want migration to 1232 * happen as the result of a wakeup. 1233 */ 1234 ke->ke_flags |= KEF_HOLD; 1235 adjustrunqueue(td, prio); 1236 ke->ke_flags &= ~KEF_HOLD; 1237 } else 1238 td->td_priority = prio; 1239 } 1240 1241 /* 1242 * Update a thread's priority when it is lent another thread's 1243 * priority. 1244 */ 1245 void 1246 sched_lend_prio(struct thread *td, u_char prio) 1247 { 1248 1249 td->td_flags |= TDF_BORROWING; 1250 sched_thread_priority(td, prio); 1251 } 1252 1253 /* 1254 * Restore a thread's priority when priority propagation is 1255 * over. The prio argument is the minimum priority the thread 1256 * needs to have to satisfy other possible priority lending 1257 * requests. If the thread's regular priority is less 1258 * important than prio, the thread will keep a priority boost 1259 * of prio. 1260 */ 1261 void 1262 sched_unlend_prio(struct thread *td, u_char prio) 1263 { 1264 u_char base_pri; 1265 1266 if (td->td_base_pri >= PRI_MIN_TIMESHARE && 1267 td->td_base_pri <= PRI_MAX_TIMESHARE) 1268 base_pri = td->td_ksegrp->kg_user_pri; 1269 else 1270 base_pri = td->td_base_pri; 1271 if (prio >= base_pri) { 1272 td->td_flags &= ~TDF_BORROWING; 1273 sched_thread_priority(td, base_pri); 1274 } else 1275 sched_lend_prio(td, prio); 1276 } 1277 1278 void 1279 sched_prio(struct thread *td, u_char prio) 1280 { 1281 u_char oldprio; 1282 1283 /* First, update the base priority. */ 1284 td->td_base_pri = prio; 1285 1286 /* 1287 * If the thread is borrowing another thread's priority, don't 1288 * ever lower the priority. 1289 */ 1290 if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 1291 return; 1292 1293 /* Change the real priority. */ 1294 oldprio = td->td_priority; 1295 sched_thread_priority(td, prio); 1296 1297 /* 1298 * If the thread is on a turnstile, then let the turnstile update 1299 * its state. 1300 */ 1301 if (TD_ON_LOCK(td) && oldprio != prio) 1302 turnstile_adjust(td, oldprio); 1303 } 1304 1305 void 1306 sched_switch(struct thread *td, struct thread *newtd, int flags) 1307 { 1308 struct kseq *ksq; 1309 struct kse *ke; 1310 1311 mtx_assert(&sched_lock, MA_OWNED); 1312 1313 ke = td->td_kse; 1314 ksq = KSEQ_SELF(); 1315 1316 td->td_lastcpu = td->td_oncpu; 1317 td->td_oncpu = NOCPU; 1318 td->td_flags &= ~TDF_NEEDRESCHED; 1319 td->td_owepreempt = 0; 1320 1321 /* 1322 * If the KSE has been assigned it may be in the process of switching 1323 * to the new cpu. This is the case in sched_bind(). 1324 */ 1325 if (td == PCPU_GET(idlethread)) { 1326 TD_SET_CAN_RUN(td); 1327 } else if ((ke->ke_flags & KEF_ASSIGNED) == 0) { 1328 /* We are ending our run so make our slot available again */ 1329 SLOT_RELEASE(td->td_ksegrp); 1330 kseq_load_rem(ksq, ke); 1331 if (TD_IS_RUNNING(td)) { 1332 /* 1333 * Don't allow the thread to migrate 1334 * from a preemption. 1335 */ 1336 ke->ke_flags |= KEF_HOLD; 1337 setrunqueue(td, (flags & SW_PREEMPT) ? 1338 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 1339 SRQ_OURSELF|SRQ_YIELDING); 1340 ke->ke_flags &= ~KEF_HOLD; 1341 } else if ((td->td_proc->p_flag & P_HADTHREADS) && 1342 (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp)) 1343 /* 1344 * We will not be on the run queue. 1345 * So we must be sleeping or similar. 1346 * Don't use the slot if we will need it 1347 * for newtd. 1348 */ 1349 slot_fill(td->td_ksegrp); 1350 } 1351 if (newtd != NULL) { 1352 /* 1353 * If we bring in a thread account for it as if it had been 1354 * added to the run queue and then chosen. 1355 */ 1356 newtd->td_kse->ke_flags |= KEF_DIDRUN; 1357 newtd->td_kse->ke_runq = ksq->ksq_curr; 1358 TD_SET_RUNNING(newtd); 1359 kseq_load_add(KSEQ_SELF(), newtd->td_kse); 1360 /* 1361 * XXX When we preempt, we've already consumed a slot because 1362 * we got here through sched_add(). However, newtd can come 1363 * from thread_switchout() which can't SLOT_USE() because 1364 * the SLOT code is scheduler dependent. We must use the 1365 * slot here otherwise. 1366 */ 1367 if ((flags & SW_PREEMPT) == 0) 1368 SLOT_USE(newtd->td_ksegrp); 1369 } else 1370 newtd = choosethread(); 1371 if (td != newtd) { 1372 #ifdef HWPMC_HOOKS 1373 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1374 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 1375 #endif 1376 cpu_switch(td, newtd); 1377 #ifdef HWPMC_HOOKS 1378 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1379 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1380 #endif 1381 } 1382 1383 sched_lock.mtx_lock = (uintptr_t)td; 1384 1385 td->td_oncpu = PCPU_GET(cpuid); 1386 } 1387 1388 void 1389 sched_nice(struct proc *p, int nice) 1390 { 1391 struct ksegrp *kg; 1392 struct kse *ke; 1393 struct thread *td; 1394 struct kseq *kseq; 1395 1396 PROC_LOCK_ASSERT(p, MA_OWNED); 1397 mtx_assert(&sched_lock, MA_OWNED); 1398 /* 1399 * We need to adjust the nice counts for running KSEs. 1400 */ 1401 FOREACH_KSEGRP_IN_PROC(p, kg) { 1402 if (kg->kg_pri_class == PRI_TIMESHARE) { 1403 FOREACH_THREAD_IN_GROUP(kg, td) { 1404 ke = td->td_kse; 1405 if (ke->ke_runq == NULL) 1406 continue; 1407 kseq = KSEQ_CPU(ke->ke_cpu); 1408 kseq_nice_rem(kseq, p->p_nice); 1409 kseq_nice_add(kseq, nice); 1410 } 1411 } 1412 } 1413 p->p_nice = nice; 1414 FOREACH_KSEGRP_IN_PROC(p, kg) { 1415 sched_priority(kg); 1416 FOREACH_THREAD_IN_GROUP(kg, td) 1417 td->td_flags |= TDF_NEEDRESCHED; 1418 } 1419 } 1420 1421 void 1422 sched_sleep(struct thread *td) 1423 { 1424 mtx_assert(&sched_lock, MA_OWNED); 1425 1426 td->td_slptime = ticks; 1427 } 1428 1429 void 1430 sched_wakeup(struct thread *td) 1431 { 1432 mtx_assert(&sched_lock, MA_OWNED); 1433 1434 /* 1435 * Let the kseg know how long we slept for. This is because process 1436 * interactivity behavior is modeled in the kseg. 1437 */ 1438 if (td->td_slptime) { 1439 struct ksegrp *kg; 1440 int hzticks; 1441 1442 kg = td->td_ksegrp; 1443 hzticks = (ticks - td->td_slptime) << 10; 1444 if (hzticks >= SCHED_SLP_RUN_MAX) { 1445 kg->kg_slptime = SCHED_SLP_RUN_MAX; 1446 kg->kg_runtime = 1; 1447 } else { 1448 kg->kg_slptime += hzticks; 1449 sched_interact_update(kg); 1450 } 1451 sched_priority(kg); 1452 sched_slice(td->td_kse); 1453 td->td_slptime = 0; 1454 } 1455 setrunqueue(td, SRQ_BORING); 1456 } 1457 1458 /* 1459 * Penalize the parent for creating a new child and initialize the child's 1460 * priority. 1461 */ 1462 void 1463 sched_fork(struct thread *td, struct thread *childtd) 1464 { 1465 1466 mtx_assert(&sched_lock, MA_OWNED); 1467 1468 sched_fork_ksegrp(td, childtd->td_ksegrp); 1469 sched_fork_thread(td, childtd); 1470 } 1471 1472 void 1473 sched_fork_ksegrp(struct thread *td, struct ksegrp *child) 1474 { 1475 struct ksegrp *kg = td->td_ksegrp; 1476 mtx_assert(&sched_lock, MA_OWNED); 1477 1478 child->kg_slptime = kg->kg_slptime; 1479 child->kg_runtime = kg->kg_runtime; 1480 child->kg_user_pri = kg->kg_user_pri; 1481 sched_interact_fork(child); 1482 kg->kg_runtime += tickincr << 10; 1483 sched_interact_update(kg); 1484 } 1485 1486 void 1487 sched_fork_thread(struct thread *td, struct thread *child) 1488 { 1489 struct kse *ke; 1490 struct kse *ke2; 1491 1492 sched_newthread(child); 1493 ke = td->td_kse; 1494 ke2 = child->td_kse; 1495 ke2->ke_slice = 1; /* Attempt to quickly learn interactivity. */ 1496 ke2->ke_cpu = ke->ke_cpu; 1497 ke2->ke_runq = NULL; 1498 1499 /* Grab our parents cpu estimation information. */ 1500 ke2->ke_ticks = ke->ke_ticks; 1501 ke2->ke_ltick = ke->ke_ltick; 1502 ke2->ke_ftick = ke->ke_ftick; 1503 } 1504 1505 void 1506 sched_class(struct ksegrp *kg, int class) 1507 { 1508 struct kseq *kseq; 1509 struct kse *ke; 1510 struct thread *td; 1511 int nclass; 1512 int oclass; 1513 1514 mtx_assert(&sched_lock, MA_OWNED); 1515 if (kg->kg_pri_class == class) 1516 return; 1517 1518 nclass = PRI_BASE(class); 1519 oclass = PRI_BASE(kg->kg_pri_class); 1520 FOREACH_THREAD_IN_GROUP(kg, td) { 1521 ke = td->td_kse; 1522 if ((ke->ke_state != KES_ONRUNQ && 1523 ke->ke_state != KES_THREAD) || ke->ke_runq == NULL) 1524 continue; 1525 kseq = KSEQ_CPU(ke->ke_cpu); 1526 1527 #ifdef SMP 1528 /* 1529 * On SMP if we're on the RUNQ we must adjust the transferable 1530 * count because could be changing to or from an interrupt 1531 * class. 1532 */ 1533 if (ke->ke_state == KES_ONRUNQ) { 1534 if (KSE_CAN_MIGRATE(ke)) { 1535 kseq->ksq_transferable--; 1536 kseq->ksq_group->ksg_transferable--; 1537 } 1538 if (KSE_CAN_MIGRATE(ke)) { 1539 kseq->ksq_transferable++; 1540 kseq->ksq_group->ksg_transferable++; 1541 } 1542 } 1543 #endif 1544 if (oclass == PRI_TIMESHARE) { 1545 kseq->ksq_load_timeshare--; 1546 kseq_nice_rem(kseq, kg->kg_proc->p_nice); 1547 } 1548 if (nclass == PRI_TIMESHARE) { 1549 kseq->ksq_load_timeshare++; 1550 kseq_nice_add(kseq, kg->kg_proc->p_nice); 1551 } 1552 } 1553 1554 kg->kg_pri_class = class; 1555 } 1556 1557 /* 1558 * Return some of the child's priority and interactivity to the parent. 1559 */ 1560 void 1561 sched_exit(struct proc *p, struct thread *childtd) 1562 { 1563 mtx_assert(&sched_lock, MA_OWNED); 1564 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), childtd); 1565 sched_exit_thread(NULL, childtd); 1566 } 1567 1568 void 1569 sched_exit_ksegrp(struct ksegrp *kg, struct thread *td) 1570 { 1571 /* kg->kg_slptime += td->td_ksegrp->kg_slptime; */ 1572 kg->kg_runtime += td->td_ksegrp->kg_runtime; 1573 sched_interact_update(kg); 1574 } 1575 1576 void 1577 sched_exit_thread(struct thread *td, struct thread *childtd) 1578 { 1579 CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", 1580 childtd, childtd->td_proc->p_comm, childtd->td_priority); 1581 kseq_load_rem(KSEQ_CPU(childtd->td_kse->ke_cpu), childtd->td_kse); 1582 } 1583 1584 void 1585 sched_clock(struct thread *td) 1586 { 1587 struct kseq *kseq; 1588 struct ksegrp *kg; 1589 struct kse *ke; 1590 1591 mtx_assert(&sched_lock, MA_OWNED); 1592 kseq = KSEQ_SELF(); 1593 #ifdef SMP 1594 if (ticks >= bal_tick) 1595 sched_balance(); 1596 if (ticks >= gbal_tick && balance_groups) 1597 sched_balance_groups(); 1598 /* 1599 * We could have been assigned a non real-time thread without an 1600 * IPI. 1601 */ 1602 if (kseq->ksq_assigned) 1603 kseq_assign(kseq); /* Potentially sets NEEDRESCHED */ 1604 #endif 1605 /* 1606 * sched_setup() apparently happens prior to stathz being set. We 1607 * need to resolve the timers earlier in the boot so we can avoid 1608 * calculating this here. 1609 */ 1610 if (realstathz == 0) { 1611 realstathz = stathz ? stathz : hz; 1612 tickincr = hz / realstathz; 1613 /* 1614 * XXX This does not work for values of stathz that are much 1615 * larger than hz. 1616 */ 1617 if (tickincr == 0) 1618 tickincr = 1; 1619 } 1620 1621 ke = td->td_kse; 1622 kg = ke->ke_ksegrp; 1623 1624 /* Adjust ticks for pctcpu */ 1625 ke->ke_ticks++; 1626 ke->ke_ltick = ticks; 1627 1628 /* Go up to one second beyond our max and then trim back down */ 1629 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 1630 sched_pctcpu_update(ke); 1631 1632 if (td->td_flags & TDF_IDLETD) 1633 return; 1634 /* 1635 * We only do slicing code for TIMESHARE ksegrps. 1636 */ 1637 if (kg->kg_pri_class != PRI_TIMESHARE) 1638 return; 1639 /* 1640 * We used a tick charge it to the ksegrp so that we can compute our 1641 * interactivity. 1642 */ 1643 kg->kg_runtime += tickincr << 10; 1644 sched_interact_update(kg); 1645 1646 /* 1647 * We used up one time slice. 1648 */ 1649 if (--ke->ke_slice > 0) 1650 return; 1651 /* 1652 * We're out of time, recompute priorities and requeue. 1653 */ 1654 kseq_load_rem(kseq, ke); 1655 sched_priority(kg); 1656 sched_slice(ke); 1657 if (SCHED_CURR(kg, ke)) 1658 ke->ke_runq = kseq->ksq_curr; 1659 else 1660 ke->ke_runq = kseq->ksq_next; 1661 kseq_load_add(kseq, ke); 1662 td->td_flags |= TDF_NEEDRESCHED; 1663 } 1664 1665 int 1666 sched_runnable(void) 1667 { 1668 struct kseq *kseq; 1669 int load; 1670 1671 load = 1; 1672 1673 kseq = KSEQ_SELF(); 1674 #ifdef SMP 1675 if (kseq->ksq_assigned) { 1676 mtx_lock_spin(&sched_lock); 1677 kseq_assign(kseq); 1678 mtx_unlock_spin(&sched_lock); 1679 } 1680 #endif 1681 if ((curthread->td_flags & TDF_IDLETD) != 0) { 1682 if (kseq->ksq_load > 0) 1683 goto out; 1684 } else 1685 if (kseq->ksq_load - 1 > 0) 1686 goto out; 1687 load = 0; 1688 out: 1689 return (load); 1690 } 1691 1692 void 1693 sched_userret(struct thread *td) 1694 { 1695 struct ksegrp *kg; 1696 1697 KASSERT((td->td_flags & TDF_BORROWING) == 0, 1698 ("thread with borrowed priority returning to userland")); 1699 kg = td->td_ksegrp; 1700 if (td->td_priority != kg->kg_user_pri) { 1701 mtx_lock_spin(&sched_lock); 1702 td->td_priority = kg->kg_user_pri; 1703 td->td_base_pri = kg->kg_user_pri; 1704 mtx_unlock_spin(&sched_lock); 1705 } 1706 } 1707 1708 struct kse * 1709 sched_choose(void) 1710 { 1711 struct kseq *kseq; 1712 struct kse *ke; 1713 1714 mtx_assert(&sched_lock, MA_OWNED); 1715 kseq = KSEQ_SELF(); 1716 #ifdef SMP 1717 restart: 1718 if (kseq->ksq_assigned) 1719 kseq_assign(kseq); 1720 #endif 1721 ke = kseq_choose(kseq); 1722 if (ke) { 1723 #ifdef SMP 1724 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) 1725 if (kseq_idled(kseq) == 0) 1726 goto restart; 1727 #endif 1728 kseq_runq_rem(kseq, ke); 1729 ke->ke_state = KES_THREAD; 1730 return (ke); 1731 } 1732 #ifdef SMP 1733 if (kseq_idled(kseq) == 0) 1734 goto restart; 1735 #endif 1736 return (NULL); 1737 } 1738 1739 void 1740 sched_add(struct thread *td, int flags) 1741 { 1742 struct kseq *kseq; 1743 struct ksegrp *kg; 1744 struct kse *ke; 1745 int preemptive; 1746 int canmigrate; 1747 int class; 1748 1749 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 1750 td, td->td_proc->p_comm, td->td_priority, curthread, 1751 curthread->td_proc->p_comm); 1752 mtx_assert(&sched_lock, MA_OWNED); 1753 ke = td->td_kse; 1754 kg = td->td_ksegrp; 1755 canmigrate = 1; 1756 preemptive = !(flags & SRQ_YIELDING); 1757 class = PRI_BASE(kg->kg_pri_class); 1758 kseq = KSEQ_SELF(); 1759 if ((ke->ke_flags & KEF_INTERNAL) == 0) 1760 SLOT_USE(td->td_ksegrp); 1761 ke->ke_flags &= ~KEF_INTERNAL; 1762 #ifdef SMP 1763 if (ke->ke_flags & KEF_ASSIGNED) { 1764 if (ke->ke_flags & KEF_REMOVED) 1765 ke->ke_flags &= ~KEF_REMOVED; 1766 return; 1767 } 1768 canmigrate = KSE_CAN_MIGRATE(ke); 1769 #endif 1770 KASSERT(ke->ke_state != KES_ONRUNQ, 1771 ("sched_add: kse %p (%s) already in run queue", ke, 1772 ke->ke_proc->p_comm)); 1773 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 1774 ("sched_add: process swapped out")); 1775 KASSERT(ke->ke_runq == NULL, 1776 ("sched_add: KSE %p is still assigned to a run queue", ke)); 1777 switch (class) { 1778 case PRI_ITHD: 1779 case PRI_REALTIME: 1780 ke->ke_runq = kseq->ksq_curr; 1781 ke->ke_slice = SCHED_SLICE_MAX; 1782 if (canmigrate) 1783 ke->ke_cpu = PCPU_GET(cpuid); 1784 break; 1785 case PRI_TIMESHARE: 1786 if (SCHED_CURR(kg, ke)) 1787 ke->ke_runq = kseq->ksq_curr; 1788 else 1789 ke->ke_runq = kseq->ksq_next; 1790 break; 1791 case PRI_IDLE: 1792 /* 1793 * This is for priority prop. 1794 */ 1795 if (ke->ke_thread->td_priority < PRI_MIN_IDLE) 1796 ke->ke_runq = kseq->ksq_curr; 1797 else 1798 ke->ke_runq = &kseq->ksq_idle; 1799 ke->ke_slice = SCHED_SLICE_MIN; 1800 break; 1801 default: 1802 panic("Unknown pri class."); 1803 break; 1804 } 1805 #ifdef SMP 1806 /* 1807 * Don't migrate running threads here. Force the long term balancer 1808 * to do it. 1809 */ 1810 if (ke->ke_flags & KEF_HOLD) { 1811 ke->ke_flags &= ~KEF_HOLD; 1812 canmigrate = 0; 1813 } 1814 /* 1815 * If this thread is pinned or bound, notify the target cpu. 1816 */ 1817 if (!canmigrate && ke->ke_cpu != PCPU_GET(cpuid) ) { 1818 ke->ke_runq = NULL; 1819 kseq_notify(ke, ke->ke_cpu); 1820 return; 1821 } 1822 /* 1823 * If we had been idle, clear our bit in the group and potentially 1824 * the global bitmap. If not, see if we should transfer this thread. 1825 */ 1826 if ((class == PRI_TIMESHARE || class == PRI_REALTIME) && 1827 (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) { 1828 /* 1829 * Check to see if our group is unidling, and if so, remove it 1830 * from the global idle mask. 1831 */ 1832 if (kseq->ksq_group->ksg_idlemask == 1833 kseq->ksq_group->ksg_cpumask) 1834 atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask); 1835 /* 1836 * Now remove ourselves from the group specific idle mask. 1837 */ 1838 kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask); 1839 } else if (canmigrate && kseq->ksq_load > 1 && class != PRI_ITHD) 1840 if (kseq_transfer(kseq, ke, class)) 1841 return; 1842 ke->ke_cpu = PCPU_GET(cpuid); 1843 #endif 1844 if (td->td_priority < curthread->td_priority && 1845 ke->ke_runq == kseq->ksq_curr) 1846 curthread->td_flags |= TDF_NEEDRESCHED; 1847 if (preemptive && maybe_preempt(td)) 1848 return; 1849 ke->ke_state = KES_ONRUNQ; 1850 1851 kseq_runq_add(kseq, ke, flags); 1852 kseq_load_add(kseq, ke); 1853 } 1854 1855 void 1856 sched_rem(struct thread *td) 1857 { 1858 struct kseq *kseq; 1859 struct kse *ke; 1860 1861 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 1862 td, td->td_proc->p_comm, td->td_priority, curthread, 1863 curthread->td_proc->p_comm); 1864 mtx_assert(&sched_lock, MA_OWNED); 1865 ke = td->td_kse; 1866 SLOT_RELEASE(td->td_ksegrp); 1867 if (ke->ke_flags & KEF_ASSIGNED) { 1868 ke->ke_flags |= KEF_REMOVED; 1869 return; 1870 } 1871 KASSERT((ke->ke_state == KES_ONRUNQ), 1872 ("sched_rem: KSE not on run queue")); 1873 1874 ke->ke_state = KES_THREAD; 1875 kseq = KSEQ_CPU(ke->ke_cpu); 1876 kseq_runq_rem(kseq, ke); 1877 kseq_load_rem(kseq, ke); 1878 } 1879 1880 fixpt_t 1881 sched_pctcpu(struct thread *td) 1882 { 1883 fixpt_t pctcpu; 1884 struct kse *ke; 1885 1886 pctcpu = 0; 1887 ke = td->td_kse; 1888 if (ke == NULL) 1889 return (0); 1890 1891 mtx_lock_spin(&sched_lock); 1892 if (ke->ke_ticks) { 1893 int rtick; 1894 1895 /* 1896 * Don't update more frequently than twice a second. Allowing 1897 * this causes the cpu usage to decay away too quickly due to 1898 * rounding errors. 1899 */ 1900 if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick || 1901 ke->ke_ltick < (ticks - (hz / 2))) 1902 sched_pctcpu_update(ke); 1903 /* How many rtick per second ? */ 1904 rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); 1905 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 1906 } 1907 1908 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1909 mtx_unlock_spin(&sched_lock); 1910 1911 return (pctcpu); 1912 } 1913 1914 void 1915 sched_bind(struct thread *td, int cpu) 1916 { 1917 struct kse *ke; 1918 1919 mtx_assert(&sched_lock, MA_OWNED); 1920 ke = td->td_kse; 1921 ke->ke_flags |= KEF_BOUND; 1922 #ifdef SMP 1923 if (PCPU_GET(cpuid) == cpu) 1924 return; 1925 /* sched_rem without the runq_remove */ 1926 ke->ke_state = KES_THREAD; 1927 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1928 kseq_notify(ke, cpu); 1929 /* When we return from mi_switch we'll be on the correct cpu. */ 1930 mi_switch(SW_VOL, NULL); 1931 #endif 1932 } 1933 1934 void 1935 sched_unbind(struct thread *td) 1936 { 1937 mtx_assert(&sched_lock, MA_OWNED); 1938 td->td_kse->ke_flags &= ~KEF_BOUND; 1939 } 1940 1941 int 1942 sched_is_bound(struct thread *td) 1943 { 1944 mtx_assert(&sched_lock, MA_OWNED); 1945 return (td->td_kse->ke_flags & KEF_BOUND); 1946 } 1947 1948 int 1949 sched_load(void) 1950 { 1951 #ifdef SMP 1952 int total; 1953 int i; 1954 1955 total = 0; 1956 for (i = 0; i <= ksg_maxid; i++) 1957 total += KSEQ_GROUP(i)->ksg_load; 1958 return (total); 1959 #else 1960 return (KSEQ_SELF()->ksq_sysload); 1961 #endif 1962 } 1963 1964 int 1965 sched_sizeof_ksegrp(void) 1966 { 1967 return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 1968 } 1969 1970 int 1971 sched_sizeof_proc(void) 1972 { 1973 return (sizeof(struct proc)); 1974 } 1975 1976 int 1977 sched_sizeof_thread(void) 1978 { 1979 return (sizeof(struct thread) + sizeof(struct td_sched)); 1980 } 1981 #define KERN_SWITCH_INCLUDE 1 1982 #include "kern/kern_switch.c" 1983