1 /*- 2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <opt_sched.h> 31 32 #define kse td_sched 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kdb.h> 37 #include <sys/kernel.h> 38 #include <sys/ktr.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/proc.h> 42 #include <sys/resource.h> 43 #include <sys/resourcevar.h> 44 #include <sys/sched.h> 45 #include <sys/smp.h> 46 #include <sys/sx.h> 47 #include <sys/sysctl.h> 48 #include <sys/sysproto.h> 49 #include <sys/vmmeter.h> 50 #ifdef KTRACE 51 #include <sys/uio.h> 52 #include <sys/ktrace.h> 53 #endif 54 55 #include <machine/cpu.h> 56 #include <machine/smp.h> 57 58 #define KTR_ULE KTR_NFS 59 60 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 61 /* XXX This is bogus compatability crap for ps */ 62 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 63 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 64 65 static void sched_setup(void *dummy); 66 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 67 68 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler"); 69 70 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0, 71 "Scheduler name"); 72 73 static int slice_min = 1; 74 SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 75 76 static int slice_max = 10; 77 SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 78 79 int realstathz; 80 int tickincr = 1; 81 82 #ifdef PREEMPTION 83 static void 84 printf_caddr_t(void *data) 85 { 86 printf("%s", (char *)data); 87 } 88 static char preempt_warning[] = 89 "WARNING: Kernel PREEMPTION is unstable under SCHED_ULE.\n"; 90 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t, 91 preempt_warning) 92 #endif 93 94 /* 95 * The schedulable entity that can be given a context to run. 96 * A process may have several of these. Probably one per processor 97 * but posibly a few more. In this universe they are grouped 98 * with a KSEG that contains the priority and niceness 99 * for the group. 100 */ 101 struct kse { 102 TAILQ_ENTRY(kse) ke_kglist; /* (*) Queue of threads in ke_ksegrp. */ 103 TAILQ_ENTRY(kse) ke_kgrlist; /* (*) Queue of threads in this state.*/ 104 TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */ 105 int ke_flags; /* (j) KEF_* flags. */ 106 struct thread *ke_thread; /* (*) Active associated thread. */ 107 fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */ 108 u_char ke_oncpu; /* (j) Which cpu we are on. */ 109 char ke_rqindex; /* (j) Run queue index. */ 110 enum { 111 KES_THREAD = 0x0, /* slaved to thread state */ 112 KES_ONRUNQ 113 } ke_state; /* (j) thread sched specific status. */ 114 int ke_slptime; 115 int ke_slice; 116 struct runq *ke_runq; 117 u_char ke_cpu; /* CPU that we have affinity for. */ 118 /* The following variables are only used for pctcpu calculation */ 119 int ke_ltick; /* Last tick that we were running on */ 120 int ke_ftick; /* First tick that we were running on */ 121 int ke_ticks; /* Tick count */ 122 123 }; 124 125 126 #define td_kse td_sched 127 #define td_slptime td_kse->ke_slptime 128 #define ke_proc ke_thread->td_proc 129 #define ke_ksegrp ke_thread->td_ksegrp 130 131 /* flags kept in ke_flags */ 132 #define KEF_SCHED0 0x00001 /* For scheduler-specific use. */ 133 #define KEF_SCHED1 0x00002 /* For scheduler-specific use. */ 134 #define KEF_SCHED2 0x00004 /* For scheduler-specific use. */ 135 #define KEF_SCHED3 0x00008 /* For scheduler-specific use. */ 136 #define KEF_DIDRUN 0x02000 /* Thread actually ran. */ 137 #define KEF_EXIT 0x04000 /* Thread is being killed. */ 138 139 /* 140 * These datastructures are allocated within their parent datastructure but 141 * are scheduler specific. 142 */ 143 144 #define ke_assign ke_procq.tqe_next 145 146 #define KEF_ASSIGNED KEF_SCHED0 /* Thread is being migrated. */ 147 #define KEF_BOUND KEF_SCHED1 /* Thread can not migrate. */ 148 #define KEF_XFERABLE KEF_SCHED2 /* Thread was added as transferable. */ 149 #define KEF_HOLD KEF_SCHED3 /* Thread is temporarily bound. */ 150 151 struct kg_sched { 152 struct thread *skg_last_assigned; /* (j) Last thread assigned to */ 153 /* the system scheduler */ 154 int skg_slptime; /* Number of ticks we vol. slept */ 155 int skg_runtime; /* Number of ticks we were running */ 156 int skg_avail_opennings; /* (j) Num unfilled slots in group.*/ 157 int skg_concurrency; /* (j) Num threads requested in group.*/ 158 int skg_runq_threads; /* (j) Num KSEs on runq. */ 159 }; 160 #define kg_last_assigned kg_sched->skg_last_assigned 161 #define kg_avail_opennings kg_sched->skg_avail_opennings 162 #define kg_concurrency kg_sched->skg_concurrency 163 #define kg_runq_threads kg_sched->skg_runq_threads 164 #define kg_runtime kg_sched->skg_runtime 165 #define kg_slptime kg_sched->skg_slptime 166 167 #define SLOT_RELEASE(kg) \ 168 do { \ 169 kg->kg_avail_opennings++; \ 170 CTR3(KTR_RUNQ, "kg %p(%d) Slot released (->%d)", \ 171 kg, \ 172 kg->kg_concurrency, \ 173 kg->kg_avail_opennings); \ 174 /*KASSERT((kg->kg_avail_opennings <= kg->kg_concurrency), \ 175 ("slots out of whack")); */ \ 176 } while (0) 177 178 #define SLOT_USE(kg) \ 179 do { \ 180 kg->kg_avail_opennings--; \ 181 CTR3(KTR_RUNQ, "kg %p(%d) Slot used (->%d)", \ 182 kg, \ 183 kg->kg_concurrency, \ 184 kg->kg_avail_opennings); \ 185 /*KASSERT((kg->kg_avail_opennings >= 0), \ 186 ("slots out of whack"));*/ \ 187 } while (0) 188 189 static struct kse kse0; 190 static struct kg_sched kg_sched0; 191 192 /* 193 * The priority is primarily determined by the interactivity score. Thus, we 194 * give lower(better) priorities to kse groups that use less CPU. The nice 195 * value is then directly added to this to allow nice to have some effect 196 * on latency. 197 * 198 * PRI_RANGE: Total priority range for timeshare threads. 199 * PRI_NRESV: Number of nice values. 200 * PRI_BASE: The start of the dynamic range. 201 */ 202 #define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 203 #define SCHED_PRI_NRESV ((PRIO_MAX - PRIO_MIN) + 1) 204 #define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 205 #define SCHED_PRI_BASE (PRI_MIN_TIMESHARE) 206 #define SCHED_PRI_INTERACT(score) \ 207 ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX) 208 209 /* 210 * These determine the interactivity of a process. 211 * 212 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 213 * before throttling back. 214 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 215 * INTERACT_MAX: Maximum interactivity value. Smaller is better. 216 * INTERACT_THRESH: Threshhold for placement on the current runq. 217 */ 218 #define SCHED_SLP_RUN_MAX ((hz * 5) << 10) 219 #define SCHED_SLP_RUN_FORK ((hz / 2) << 10) 220 #define SCHED_INTERACT_MAX (100) 221 #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 222 #define SCHED_INTERACT_THRESH (30) 223 224 /* 225 * These parameters and macros determine the size of the time slice that is 226 * granted to each thread. 227 * 228 * SLICE_MIN: Minimum time slice granted, in units of ticks. 229 * SLICE_MAX: Maximum time slice granted. 230 * SLICE_RANGE: Range of available time slices scaled by hz. 231 * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 232 * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 233 * SLICE_NTHRESH: The nice cutoff point for slice assignment. 234 */ 235 #define SCHED_SLICE_MIN (slice_min) 236 #define SCHED_SLICE_MAX (slice_max) 237 #define SCHED_SLICE_INTERACTIVE (slice_max) 238 #define SCHED_SLICE_NTHRESH (SCHED_PRI_NHALF - 1) 239 #define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 240 #define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 241 #define SCHED_SLICE_NICE(nice) \ 242 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH)) 243 244 /* 245 * This macro determines whether or not the thread belongs on the current or 246 * next run queue. 247 */ 248 #define SCHED_INTERACTIVE(kg) \ 249 (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 250 #define SCHED_CURR(kg, ke) \ 251 (ke->ke_thread->td_priority < kg->kg_user_pri || \ 252 SCHED_INTERACTIVE(kg)) 253 254 /* 255 * Cpu percentage computation macros and defines. 256 * 257 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 258 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 259 */ 260 261 #define SCHED_CPU_TIME 10 262 #define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 263 264 /* 265 * kseq - per processor runqs and statistics. 266 */ 267 struct kseq { 268 struct runq ksq_idle; /* Queue of IDLE threads. */ 269 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 270 struct runq *ksq_next; /* Next timeshare queue. */ 271 struct runq *ksq_curr; /* Current queue. */ 272 int ksq_load_timeshare; /* Load for timeshare. */ 273 int ksq_load; /* Aggregate load. */ 274 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */ 275 short ksq_nicemin; /* Least nice. */ 276 #ifdef SMP 277 int ksq_transferable; 278 LIST_ENTRY(kseq) ksq_siblings; /* Next in kseq group. */ 279 struct kseq_group *ksq_group; /* Our processor group. */ 280 volatile struct kse *ksq_assigned; /* assigned by another CPU. */ 281 #else 282 int ksq_sysload; /* For loadavg, !ITHD load. */ 283 #endif 284 }; 285 286 #ifdef SMP 287 /* 288 * kseq groups are groups of processors which can cheaply share threads. When 289 * one processor in the group goes idle it will check the runqs of the other 290 * processors in its group prior to halting and waiting for an interrupt. 291 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. 292 * In a numa environment we'd want an idle bitmap per group and a two tiered 293 * load balancer. 294 */ 295 struct kseq_group { 296 int ksg_cpus; /* Count of CPUs in this kseq group. */ 297 cpumask_t ksg_cpumask; /* Mask of cpus in this group. */ 298 cpumask_t ksg_idlemask; /* Idle cpus in this group. */ 299 cpumask_t ksg_mask; /* Bit mask for first cpu. */ 300 int ksg_load; /* Total load of this group. */ 301 int ksg_transferable; /* Transferable load of this group. */ 302 LIST_HEAD(, kseq) ksg_members; /* Linked list of all members. */ 303 }; 304 #endif 305 306 /* 307 * One kse queue per processor. 308 */ 309 #ifdef SMP 310 static cpumask_t kseq_idle; 311 static int ksg_maxid; 312 static struct kseq kseq_cpu[MAXCPU]; 313 static struct kseq_group kseq_groups[MAXCPU]; 314 static int bal_tick; 315 static int gbal_tick; 316 317 #define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) 318 #define KSEQ_CPU(x) (&kseq_cpu[(x)]) 319 #define KSEQ_ID(x) ((x) - kseq_cpu) 320 #define KSEQ_GROUP(x) (&kseq_groups[(x)]) 321 #else /* !SMP */ 322 static struct kseq kseq_cpu; 323 324 #define KSEQ_SELF() (&kseq_cpu) 325 #define KSEQ_CPU(x) (&kseq_cpu) 326 #endif 327 328 static void slot_fill(struct ksegrp *kg); 329 static struct kse *sched_choose(void); /* XXX Should be thread * */ 330 static void sched_add_internal(struct thread *td, int preemptive); 331 static void sched_slice(struct kse *ke); 332 static void sched_priority(struct ksegrp *kg); 333 static int sched_interact_score(struct ksegrp *kg); 334 static void sched_interact_update(struct ksegrp *kg); 335 static void sched_interact_fork(struct ksegrp *kg); 336 static void sched_pctcpu_update(struct kse *ke); 337 338 /* Operations on per processor queues */ 339 static struct kse * kseq_choose(struct kseq *kseq); 340 static void kseq_setup(struct kseq *kseq); 341 static void kseq_load_add(struct kseq *kseq, struct kse *ke); 342 static void kseq_load_rem(struct kseq *kseq, struct kse *ke); 343 static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke); 344 static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke); 345 static void kseq_nice_add(struct kseq *kseq, int nice); 346 static void kseq_nice_rem(struct kseq *kseq, int nice); 347 void kseq_print(int cpu); 348 #ifdef SMP 349 static int kseq_transfer(struct kseq *ksq, struct kse *ke, int class); 350 static struct kse *runq_steal(struct runq *rq); 351 static void sched_balance(void); 352 static void sched_balance_groups(void); 353 static void sched_balance_group(struct kseq_group *ksg); 354 static void sched_balance_pair(struct kseq *high, struct kseq *low); 355 static void kseq_move(struct kseq *from, int cpu); 356 static int kseq_idled(struct kseq *kseq); 357 static void kseq_notify(struct kse *ke, int cpu); 358 static void kseq_assign(struct kseq *); 359 static struct kse *kseq_steal(struct kseq *kseq, int stealidle); 360 /* 361 * On P4 Xeons the round-robin interrupt delivery is broken. As a result of 362 * this, we can't pin interrupts to the cpu that they were delivered to, 363 * otherwise all ithreads only run on CPU 0. 364 */ 365 #ifdef __i386__ 366 #define KSE_CAN_MIGRATE(ke, class) \ 367 ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0) 368 #else /* !__i386__ */ 369 #define KSE_CAN_MIGRATE(ke, class) \ 370 ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 && \ 371 ((ke)->ke_flags & KEF_BOUND) == 0) 372 #endif /* !__i386__ */ 373 #endif 374 375 void 376 kseq_print(int cpu) 377 { 378 struct kseq *kseq; 379 int i; 380 381 kseq = KSEQ_CPU(cpu); 382 383 printf("kseq:\n"); 384 printf("\tload: %d\n", kseq->ksq_load); 385 printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare); 386 #ifdef SMP 387 printf("\tload transferable: %d\n", kseq->ksq_transferable); 388 #endif 389 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 390 printf("\tnice counts:\n"); 391 for (i = 0; i < SCHED_PRI_NRESV; i++) 392 if (kseq->ksq_nice[i]) 393 printf("\t\t%d = %d\n", 394 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 395 } 396 397 static __inline void 398 kseq_runq_add(struct kseq *kseq, struct kse *ke) 399 { 400 #ifdef SMP 401 if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) { 402 kseq->ksq_transferable++; 403 kseq->ksq_group->ksg_transferable++; 404 ke->ke_flags |= KEF_XFERABLE; 405 } 406 #endif 407 runq_add(ke->ke_runq, ke, 0); 408 } 409 410 static __inline void 411 kseq_runq_rem(struct kseq *kseq, struct kse *ke) 412 { 413 #ifdef SMP 414 if (ke->ke_flags & KEF_XFERABLE) { 415 kseq->ksq_transferable--; 416 kseq->ksq_group->ksg_transferable--; 417 ke->ke_flags &= ~KEF_XFERABLE; 418 } 419 #endif 420 runq_remove(ke->ke_runq, ke); 421 } 422 423 static void 424 kseq_load_add(struct kseq *kseq, struct kse *ke) 425 { 426 int class; 427 mtx_assert(&sched_lock, MA_OWNED); 428 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 429 if (class == PRI_TIMESHARE) 430 kseq->ksq_load_timeshare++; 431 kseq->ksq_load++; 432 if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0) 433 #ifdef SMP 434 kseq->ksq_group->ksg_load++; 435 #else 436 kseq->ksq_sysload++; 437 #endif 438 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 439 CTR6(KTR_ULE, 440 "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 441 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 442 ke->ke_proc->p_nice, kseq->ksq_nicemin); 443 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 444 kseq_nice_add(kseq, ke->ke_proc->p_nice); 445 } 446 447 static void 448 kseq_load_rem(struct kseq *kseq, struct kse *ke) 449 { 450 int class; 451 mtx_assert(&sched_lock, MA_OWNED); 452 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 453 if (class == PRI_TIMESHARE) 454 kseq->ksq_load_timeshare--; 455 if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0) 456 #ifdef SMP 457 kseq->ksq_group->ksg_load--; 458 #else 459 kseq->ksq_sysload--; 460 #endif 461 kseq->ksq_load--; 462 ke->ke_runq = NULL; 463 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 464 kseq_nice_rem(kseq, ke->ke_proc->p_nice); 465 } 466 467 static void 468 kseq_nice_add(struct kseq *kseq, int nice) 469 { 470 mtx_assert(&sched_lock, MA_OWNED); 471 /* Normalize to zero. */ 472 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 473 if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1) 474 kseq->ksq_nicemin = nice; 475 } 476 477 static void 478 kseq_nice_rem(struct kseq *kseq, int nice) 479 { 480 int n; 481 482 mtx_assert(&sched_lock, MA_OWNED); 483 /* Normalize to zero. */ 484 n = nice + SCHED_PRI_NHALF; 485 kseq->ksq_nice[n]--; 486 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 487 488 /* 489 * If this wasn't the smallest nice value or there are more in 490 * this bucket we can just return. Otherwise we have to recalculate 491 * the smallest nice. 492 */ 493 if (nice != kseq->ksq_nicemin || 494 kseq->ksq_nice[n] != 0 || 495 kseq->ksq_load_timeshare == 0) 496 return; 497 498 for (; n < SCHED_PRI_NRESV; n++) 499 if (kseq->ksq_nice[n]) { 500 kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 501 return; 502 } 503 } 504 505 #ifdef SMP 506 /* 507 * sched_balance is a simple CPU load balancing algorithm. It operates by 508 * finding the least loaded and most loaded cpu and equalizing their load 509 * by migrating some processes. 510 * 511 * Dealing only with two CPUs at a time has two advantages. Firstly, most 512 * installations will only have 2 cpus. Secondly, load balancing too much at 513 * once can have an unpleasant effect on the system. The scheduler rarely has 514 * enough information to make perfect decisions. So this algorithm chooses 515 * algorithm simplicity and more gradual effects on load in larger systems. 516 * 517 * It could be improved by considering the priorities and slices assigned to 518 * each task prior to balancing them. There are many pathological cases with 519 * any approach and so the semi random algorithm below may work as well as any. 520 * 521 */ 522 static void 523 sched_balance(void) 524 { 525 struct kseq_group *high; 526 struct kseq_group *low; 527 struct kseq_group *ksg; 528 int cnt; 529 int i; 530 531 if (smp_started == 0) 532 goto out; 533 low = high = NULL; 534 i = random() % (ksg_maxid + 1); 535 for (cnt = 0; cnt <= ksg_maxid; cnt++) { 536 ksg = KSEQ_GROUP(i); 537 /* 538 * Find the CPU with the highest load that has some 539 * threads to transfer. 540 */ 541 if ((high == NULL || ksg->ksg_load > high->ksg_load) 542 && ksg->ksg_transferable) 543 high = ksg; 544 if (low == NULL || ksg->ksg_load < low->ksg_load) 545 low = ksg; 546 if (++i > ksg_maxid) 547 i = 0; 548 } 549 if (low != NULL && high != NULL && high != low) 550 sched_balance_pair(LIST_FIRST(&high->ksg_members), 551 LIST_FIRST(&low->ksg_members)); 552 out: 553 bal_tick = ticks + (random() % (hz * 2)); 554 } 555 556 static void 557 sched_balance_groups(void) 558 { 559 int i; 560 561 mtx_assert(&sched_lock, MA_OWNED); 562 if (smp_started) 563 for (i = 0; i <= ksg_maxid; i++) 564 sched_balance_group(KSEQ_GROUP(i)); 565 gbal_tick = ticks + (random() % (hz * 2)); 566 } 567 568 static void 569 sched_balance_group(struct kseq_group *ksg) 570 { 571 struct kseq *kseq; 572 struct kseq *high; 573 struct kseq *low; 574 int load; 575 576 if (ksg->ksg_transferable == 0) 577 return; 578 low = NULL; 579 high = NULL; 580 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 581 load = kseq->ksq_load; 582 if (high == NULL || load > high->ksq_load) 583 high = kseq; 584 if (low == NULL || load < low->ksq_load) 585 low = kseq; 586 } 587 if (high != NULL && low != NULL && high != low) 588 sched_balance_pair(high, low); 589 } 590 591 static void 592 sched_balance_pair(struct kseq *high, struct kseq *low) 593 { 594 int transferable; 595 int high_load; 596 int low_load; 597 int move; 598 int diff; 599 int i; 600 601 /* 602 * If we're transfering within a group we have to use this specific 603 * kseq's transferable count, otherwise we can steal from other members 604 * of the group. 605 */ 606 if (high->ksq_group == low->ksq_group) { 607 transferable = high->ksq_transferable; 608 high_load = high->ksq_load; 609 low_load = low->ksq_load; 610 } else { 611 transferable = high->ksq_group->ksg_transferable; 612 high_load = high->ksq_group->ksg_load; 613 low_load = low->ksq_group->ksg_load; 614 } 615 if (transferable == 0) 616 return; 617 /* 618 * Determine what the imbalance is and then adjust that to how many 619 * kses we actually have to give up (transferable). 620 */ 621 diff = high_load - low_load; 622 move = diff / 2; 623 if (diff & 0x1) 624 move++; 625 move = min(move, transferable); 626 for (i = 0; i < move; i++) 627 kseq_move(high, KSEQ_ID(low)); 628 return; 629 } 630 631 static void 632 kseq_move(struct kseq *from, int cpu) 633 { 634 struct kseq *kseq; 635 struct kseq *to; 636 struct kse *ke; 637 638 kseq = from; 639 to = KSEQ_CPU(cpu); 640 ke = kseq_steal(kseq, 1); 641 if (ke == NULL) { 642 struct kseq_group *ksg; 643 644 ksg = kseq->ksq_group; 645 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 646 if (kseq == from || kseq->ksq_transferable == 0) 647 continue; 648 ke = kseq_steal(kseq, 1); 649 break; 650 } 651 if (ke == NULL) 652 panic("kseq_move: No KSEs available with a " 653 "transferable count of %d\n", 654 ksg->ksg_transferable); 655 } 656 if (kseq == to) 657 return; 658 ke->ke_state = KES_THREAD; 659 kseq_runq_rem(kseq, ke); 660 kseq_load_rem(kseq, ke); 661 kseq_notify(ke, cpu); 662 } 663 664 static int 665 kseq_idled(struct kseq *kseq) 666 { 667 struct kseq_group *ksg; 668 struct kseq *steal; 669 struct kse *ke; 670 671 ksg = kseq->ksq_group; 672 /* 673 * If we're in a cpu group, try and steal kses from another cpu in 674 * the group before idling. 675 */ 676 if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) { 677 LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) { 678 if (steal == kseq || steal->ksq_transferable == 0) 679 continue; 680 ke = kseq_steal(steal, 0); 681 if (ke == NULL) 682 continue; 683 ke->ke_state = KES_THREAD; 684 kseq_runq_rem(steal, ke); 685 kseq_load_rem(steal, ke); 686 ke->ke_cpu = PCPU_GET(cpuid); 687 sched_add_internal(ke->ke_thread, 0); 688 return (0); 689 } 690 } 691 /* 692 * We only set the idled bit when all of the cpus in the group are 693 * idle. Otherwise we could get into a situation where a KSE bounces 694 * back and forth between two idle cores on seperate physical CPUs. 695 */ 696 ksg->ksg_idlemask |= PCPU_GET(cpumask); 697 if (ksg->ksg_idlemask != ksg->ksg_cpumask) 698 return (1); 699 atomic_set_int(&kseq_idle, ksg->ksg_mask); 700 return (1); 701 } 702 703 static void 704 kseq_assign(struct kseq *kseq) 705 { 706 struct kse *nke; 707 struct kse *ke; 708 709 do { 710 *(volatile struct kse **)&ke = kseq->ksq_assigned; 711 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL)); 712 for (; ke != NULL; ke = nke) { 713 nke = ke->ke_assign; 714 ke->ke_flags &= ~KEF_ASSIGNED; 715 sched_add_internal(ke->ke_thread, 0); 716 } 717 } 718 719 static void 720 kseq_notify(struct kse *ke, int cpu) 721 { 722 struct kseq *kseq; 723 struct thread *td; 724 struct pcpu *pcpu; 725 int prio; 726 727 ke->ke_cpu = cpu; 728 ke->ke_flags |= KEF_ASSIGNED; 729 prio = ke->ke_thread->td_priority; 730 731 kseq = KSEQ_CPU(cpu); 732 733 /* 734 * Place a KSE on another cpu's queue and force a resched. 735 */ 736 do { 737 *(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned; 738 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke)); 739 /* 740 * Without sched_lock we could lose a race where we set NEEDRESCHED 741 * on a thread that is switched out before the IPI is delivered. This 742 * would lead us to miss the resched. This will be a problem once 743 * sched_lock is pushed down. 744 */ 745 pcpu = pcpu_find(cpu); 746 td = pcpu->pc_curthread; 747 if (ke->ke_thread->td_priority < td->td_priority || 748 td == pcpu->pc_idlethread) { 749 td->td_flags |= TDF_NEEDRESCHED; 750 ipi_selected(1 << cpu, IPI_AST); 751 } 752 } 753 754 static struct kse * 755 runq_steal(struct runq *rq) 756 { 757 struct rqhead *rqh; 758 struct rqbits *rqb; 759 struct kse *ke; 760 int word; 761 int bit; 762 763 mtx_assert(&sched_lock, MA_OWNED); 764 rqb = &rq->rq_status; 765 for (word = 0; word < RQB_LEN; word++) { 766 if (rqb->rqb_bits[word] == 0) 767 continue; 768 for (bit = 0; bit < RQB_BPW; bit++) { 769 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 770 continue; 771 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 772 TAILQ_FOREACH(ke, rqh, ke_procq) { 773 if (KSE_CAN_MIGRATE(ke, 774 PRI_BASE(ke->ke_ksegrp->kg_pri_class))) 775 return (ke); 776 } 777 } 778 } 779 return (NULL); 780 } 781 782 static struct kse * 783 kseq_steal(struct kseq *kseq, int stealidle) 784 { 785 struct kse *ke; 786 787 /* 788 * Steal from next first to try to get a non-interactive task that 789 * may not have run for a while. 790 */ 791 if ((ke = runq_steal(kseq->ksq_next)) != NULL) 792 return (ke); 793 if ((ke = runq_steal(kseq->ksq_curr)) != NULL) 794 return (ke); 795 if (stealidle) 796 return (runq_steal(&kseq->ksq_idle)); 797 return (NULL); 798 } 799 800 int 801 kseq_transfer(struct kseq *kseq, struct kse *ke, int class) 802 { 803 struct kseq_group *ksg; 804 int cpu; 805 806 if (smp_started == 0) 807 return (0); 808 cpu = 0; 809 /* 810 * If our load exceeds a certain threshold we should attempt to 811 * reassign this thread. The first candidate is the cpu that 812 * originally ran the thread. If it is idle, assign it there, 813 * otherwise, pick an idle cpu. 814 * 815 * The threshold at which we start to reassign kses has a large impact 816 * on the overall performance of the system. Tuned too high and 817 * some CPUs may idle. Too low and there will be excess migration 818 * and context switches. 819 */ 820 ksg = kseq->ksq_group; 821 if (ksg->ksg_load > ksg->ksg_cpus && kseq_idle) { 822 ksg = KSEQ_CPU(ke->ke_cpu)->ksq_group; 823 if (kseq_idle & ksg->ksg_mask) { 824 cpu = ffs(ksg->ksg_idlemask); 825 if (cpu) 826 goto migrate; 827 } 828 /* 829 * Multiple cpus could find this bit simultaneously 830 * but the race shouldn't be terrible. 831 */ 832 cpu = ffs(kseq_idle); 833 if (cpu) 834 goto migrate; 835 } 836 /* 837 * If another cpu in this group has idled, assign a thread over 838 * to them after checking to see if there are idled groups. 839 */ 840 ksg = kseq->ksq_group; 841 if (ksg->ksg_idlemask) { 842 cpu = ffs(ksg->ksg_idlemask); 843 if (cpu) 844 goto migrate; 845 } 846 /* 847 * No new CPU was found. 848 */ 849 return (0); 850 migrate: 851 /* 852 * Now that we've found an idle CPU, migrate the thread. 853 */ 854 cpu--; 855 ke->ke_runq = NULL; 856 kseq_notify(ke, cpu); 857 858 return (1); 859 } 860 861 #endif /* SMP */ 862 863 /* 864 * Pick the highest priority task we have and return it. 865 */ 866 867 static struct kse * 868 kseq_choose(struct kseq *kseq) 869 { 870 struct runq *swap; 871 struct kse *ke; 872 int nice; 873 874 mtx_assert(&sched_lock, MA_OWNED); 875 swap = NULL; 876 877 for (;;) { 878 ke = runq_choose(kseq->ksq_curr); 879 if (ke == NULL) { 880 /* 881 * We already swapped once and didn't get anywhere. 882 */ 883 if (swap) 884 break; 885 swap = kseq->ksq_curr; 886 kseq->ksq_curr = kseq->ksq_next; 887 kseq->ksq_next = swap; 888 continue; 889 } 890 /* 891 * If we encounter a slice of 0 the kse is in a 892 * TIMESHARE kse group and its nice was too far out 893 * of the range that receives slices. 894 */ 895 nice = ke->ke_proc->p_nice + (0 - kseq->ksq_nicemin); 896 #ifdef notyet 897 if (ke->ke_slice == 0 || nice > SCHED_SLICE_NTHRESH) { 898 #else 899 if (ke->ke_slice == 0) { 900 #endif 901 runq_remove(ke->ke_runq, ke); 902 sched_slice(ke); 903 ke->ke_runq = kseq->ksq_next; 904 runq_add(ke->ke_runq, ke, 0); 905 continue; 906 } 907 return (ke); 908 } 909 910 return (runq_choose(&kseq->ksq_idle)); 911 } 912 913 static void 914 kseq_setup(struct kseq *kseq) 915 { 916 runq_init(&kseq->ksq_timeshare[0]); 917 runq_init(&kseq->ksq_timeshare[1]); 918 runq_init(&kseq->ksq_idle); 919 kseq->ksq_curr = &kseq->ksq_timeshare[0]; 920 kseq->ksq_next = &kseq->ksq_timeshare[1]; 921 kseq->ksq_load = 0; 922 kseq->ksq_load_timeshare = 0; 923 } 924 925 static void 926 sched_setup(void *dummy) 927 { 928 #ifdef SMP 929 int balance_groups; 930 int i; 931 #endif 932 933 slice_min = (hz/100); /* 10ms */ 934 slice_max = (hz/7); /* ~140ms */ 935 936 #ifdef SMP 937 balance_groups = 0; 938 /* 939 * Initialize the kseqs. 940 */ 941 for (i = 0; i < MAXCPU; i++) { 942 struct kseq *ksq; 943 944 ksq = &kseq_cpu[i]; 945 ksq->ksq_assigned = NULL; 946 kseq_setup(&kseq_cpu[i]); 947 } 948 if (smp_topology == NULL) { 949 struct kseq_group *ksg; 950 struct kseq *ksq; 951 952 for (i = 0; i < MAXCPU; i++) { 953 ksq = &kseq_cpu[i]; 954 ksg = &kseq_groups[i]; 955 /* 956 * Setup a kseq group with one member. 957 */ 958 ksq->ksq_transferable = 0; 959 ksq->ksq_group = ksg; 960 ksg->ksg_cpus = 1; 961 ksg->ksg_idlemask = 0; 962 ksg->ksg_cpumask = ksg->ksg_mask = 1 << i; 963 ksg->ksg_load = 0; 964 ksg->ksg_transferable = 0; 965 LIST_INIT(&ksg->ksg_members); 966 LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings); 967 } 968 } else { 969 struct kseq_group *ksg; 970 struct cpu_group *cg; 971 int j; 972 973 for (i = 0; i < smp_topology->ct_count; i++) { 974 cg = &smp_topology->ct_group[i]; 975 ksg = &kseq_groups[i]; 976 /* 977 * Initialize the group. 978 */ 979 ksg->ksg_idlemask = 0; 980 ksg->ksg_load = 0; 981 ksg->ksg_transferable = 0; 982 ksg->ksg_cpus = cg->cg_count; 983 ksg->ksg_cpumask = cg->cg_mask; 984 LIST_INIT(&ksg->ksg_members); 985 /* 986 * Find all of the group members and add them. 987 */ 988 for (j = 0; j < MAXCPU; j++) { 989 if ((cg->cg_mask & (1 << j)) != 0) { 990 if (ksg->ksg_mask == 0) 991 ksg->ksg_mask = 1 << j; 992 kseq_cpu[j].ksq_transferable = 0; 993 kseq_cpu[j].ksq_group = ksg; 994 LIST_INSERT_HEAD(&ksg->ksg_members, 995 &kseq_cpu[j], ksq_siblings); 996 } 997 } 998 if (ksg->ksg_cpus > 1) 999 balance_groups = 1; 1000 } 1001 ksg_maxid = smp_topology->ct_count - 1; 1002 } 1003 /* 1004 * Stagger the group and global load balancer so they do not 1005 * interfere with each other. 1006 */ 1007 bal_tick = ticks + hz; 1008 if (balance_groups) 1009 gbal_tick = ticks + (hz / 2); 1010 #else 1011 kseq_setup(KSEQ_SELF()); 1012 #endif 1013 mtx_lock_spin(&sched_lock); 1014 kseq_load_add(KSEQ_SELF(), &kse0); 1015 mtx_unlock_spin(&sched_lock); 1016 } 1017 1018 /* 1019 * Scale the scheduling priority according to the "interactivity" of this 1020 * process. 1021 */ 1022 static void 1023 sched_priority(struct ksegrp *kg) 1024 { 1025 int pri; 1026 1027 if (kg->kg_pri_class != PRI_TIMESHARE) 1028 return; 1029 1030 pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 1031 pri += SCHED_PRI_BASE; 1032 pri += kg->kg_proc->p_nice; 1033 1034 if (pri > PRI_MAX_TIMESHARE) 1035 pri = PRI_MAX_TIMESHARE; 1036 else if (pri < PRI_MIN_TIMESHARE) 1037 pri = PRI_MIN_TIMESHARE; 1038 1039 kg->kg_user_pri = pri; 1040 1041 return; 1042 } 1043 1044 /* 1045 * Calculate a time slice based on the properties of the kseg and the runq 1046 * that we're on. This is only for PRI_TIMESHARE ksegrps. 1047 */ 1048 static void 1049 sched_slice(struct kse *ke) 1050 { 1051 struct kseq *kseq; 1052 struct ksegrp *kg; 1053 1054 kg = ke->ke_ksegrp; 1055 kseq = KSEQ_CPU(ke->ke_cpu); 1056 1057 /* 1058 * Rationale: 1059 * KSEs in interactive ksegs get a minimal slice so that we 1060 * quickly notice if it abuses its advantage. 1061 * 1062 * KSEs in non-interactive ksegs are assigned a slice that is 1063 * based on the ksegs nice value relative to the least nice kseg 1064 * on the run queue for this cpu. 1065 * 1066 * If the KSE is less nice than all others it gets the maximum 1067 * slice and other KSEs will adjust their slice relative to 1068 * this when they first expire. 1069 * 1070 * There is 20 point window that starts relative to the least 1071 * nice kse on the run queue. Slice size is determined by 1072 * the kse distance from the last nice ksegrp. 1073 * 1074 * If the kse is outside of the window it will get no slice 1075 * and will be reevaluated each time it is selected on the 1076 * run queue. The exception to this is nice 0 ksegs when 1077 * a nice -20 is running. They are always granted a minimum 1078 * slice. 1079 */ 1080 if (!SCHED_INTERACTIVE(kg)) { 1081 int nice; 1082 1083 nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin); 1084 if (kseq->ksq_load_timeshare == 0 || 1085 kg->kg_proc->p_nice < kseq->ksq_nicemin) 1086 ke->ke_slice = SCHED_SLICE_MAX; 1087 else if (nice <= SCHED_SLICE_NTHRESH) 1088 ke->ke_slice = SCHED_SLICE_NICE(nice); 1089 else if (kg->kg_proc->p_nice == 0) 1090 ke->ke_slice = SCHED_SLICE_MIN; 1091 else 1092 ke->ke_slice = 0; 1093 } else 1094 ke->ke_slice = SCHED_SLICE_INTERACTIVE; 1095 1096 CTR6(KTR_ULE, 1097 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 1098 ke, ke->ke_slice, kg->kg_proc->p_nice, kseq->ksq_nicemin, 1099 kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg)); 1100 1101 return; 1102 } 1103 1104 /* 1105 * This routine enforces a maximum limit on the amount of scheduling history 1106 * kept. It is called after either the slptime or runtime is adjusted. 1107 * This routine will not operate correctly when slp or run times have been 1108 * adjusted to more than double their maximum. 1109 */ 1110 static void 1111 sched_interact_update(struct ksegrp *kg) 1112 { 1113 int sum; 1114 1115 sum = kg->kg_runtime + kg->kg_slptime; 1116 if (sum < SCHED_SLP_RUN_MAX) 1117 return; 1118 /* 1119 * If we have exceeded by more than 1/5th then the algorithm below 1120 * will not bring us back into range. Dividing by two here forces 1121 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1122 */ 1123 if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1124 kg->kg_runtime /= 2; 1125 kg->kg_slptime /= 2; 1126 return; 1127 } 1128 kg->kg_runtime = (kg->kg_runtime / 5) * 4; 1129 kg->kg_slptime = (kg->kg_slptime / 5) * 4; 1130 } 1131 1132 static void 1133 sched_interact_fork(struct ksegrp *kg) 1134 { 1135 int ratio; 1136 int sum; 1137 1138 sum = kg->kg_runtime + kg->kg_slptime; 1139 if (sum > SCHED_SLP_RUN_FORK) { 1140 ratio = sum / SCHED_SLP_RUN_FORK; 1141 kg->kg_runtime /= ratio; 1142 kg->kg_slptime /= ratio; 1143 } 1144 } 1145 1146 static int 1147 sched_interact_score(struct ksegrp *kg) 1148 { 1149 int div; 1150 1151 if (kg->kg_runtime > kg->kg_slptime) { 1152 div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF); 1153 return (SCHED_INTERACT_HALF + 1154 (SCHED_INTERACT_HALF - (kg->kg_slptime / div))); 1155 } if (kg->kg_slptime > kg->kg_runtime) { 1156 div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF); 1157 return (kg->kg_runtime / div); 1158 } 1159 1160 /* 1161 * This can happen if slptime and runtime are 0. 1162 */ 1163 return (0); 1164 1165 } 1166 1167 /* 1168 * Very early in the boot some setup of scheduler-specific 1169 * parts of proc0 and of soem scheduler resources needs to be done. 1170 * Called from: 1171 * proc0_init() 1172 */ 1173 void 1174 schedinit(void) 1175 { 1176 /* 1177 * Set up the scheduler specific parts of proc0. 1178 */ 1179 proc0.p_sched = NULL; /* XXX */ 1180 ksegrp0.kg_sched = &kg_sched0; 1181 thread0.td_sched = &kse0; 1182 kse0.ke_thread = &thread0; 1183 kse0.ke_oncpu = NOCPU; /* wrong.. can we use PCPU(cpuid) yet? */ 1184 kse0.ke_state = KES_THREAD; 1185 kg_sched0.skg_concurrency = 1; 1186 kg_sched0.skg_avail_opennings = 0; /* we are already running */ 1187 } 1188 1189 /* 1190 * This is only somewhat accurate since given many processes of the same 1191 * priority they will switch when their slices run out, which will be 1192 * at most SCHED_SLICE_MAX. 1193 */ 1194 int 1195 sched_rr_interval(void) 1196 { 1197 return (SCHED_SLICE_MAX); 1198 } 1199 1200 static void 1201 sched_pctcpu_update(struct kse *ke) 1202 { 1203 /* 1204 * Adjust counters and watermark for pctcpu calc. 1205 */ 1206 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) { 1207 /* 1208 * Shift the tick count out so that the divide doesn't 1209 * round away our results. 1210 */ 1211 ke->ke_ticks <<= 10; 1212 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) * 1213 SCHED_CPU_TICKS; 1214 ke->ke_ticks >>= 10; 1215 } else 1216 ke->ke_ticks = 0; 1217 ke->ke_ltick = ticks; 1218 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 1219 } 1220 1221 void 1222 sched_prio(struct thread *td, u_char prio) 1223 { 1224 struct kse *ke; 1225 1226 ke = td->td_kse; 1227 mtx_assert(&sched_lock, MA_OWNED); 1228 if (TD_ON_RUNQ(td)) { 1229 /* 1230 * If the priority has been elevated due to priority 1231 * propagation, we may have to move ourselves to a new 1232 * queue. We still call adjustrunqueue below in case kse 1233 * needs to fix things up. 1234 */ 1235 if (prio < td->td_priority && ke && ke->ke_runq != NULL && 1236 (ke->ke_flags & KEF_ASSIGNED) == 0 && 1237 ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) { 1238 runq_remove(ke->ke_runq, ke); 1239 ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr; 1240 runq_add(ke->ke_runq, ke, 0); 1241 } 1242 /* 1243 * Hold this kse on this cpu so that sched_prio() doesn't 1244 * cause excessive migration. We only want migration to 1245 * happen as the result of a wakeup. 1246 */ 1247 ke->ke_flags |= KEF_HOLD; 1248 adjustrunqueue(td, prio); 1249 } else 1250 td->td_priority = prio; 1251 } 1252 1253 void 1254 sched_switch(struct thread *td, struct thread *newtd, int flags) 1255 { 1256 struct kse *ke; 1257 1258 mtx_assert(&sched_lock, MA_OWNED); 1259 1260 ke = td->td_kse; 1261 1262 td->td_lastcpu = td->td_oncpu; 1263 td->td_oncpu = NOCPU; 1264 td->td_flags &= ~TDF_NEEDRESCHED; 1265 td->td_pflags &= ~TDP_OWEPREEMPT; 1266 1267 /* 1268 * If the KSE has been assigned it may be in the process of switching 1269 * to the new cpu. This is the case in sched_bind(). 1270 */ 1271 if ((ke->ke_flags & KEF_ASSIGNED) == 0) { 1272 if (td == PCPU_GET(idlethread)) { 1273 TD_SET_CAN_RUN(td); 1274 } else { 1275 /* We are ending our run so make our slot available again */ 1276 SLOT_RELEASE(td->td_ksegrp); 1277 if (TD_IS_RUNNING(td)) { 1278 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1279 /* 1280 * Don't allow the thread to migrate 1281 * from a preemption. 1282 */ 1283 ke->ke_flags |= KEF_HOLD; 1284 setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING); 1285 } else { 1286 if (ke->ke_runq) { 1287 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1288 } else if ((td->td_flags & TDF_IDLETD) == 0) 1289 kdb_backtrace(); 1290 /* 1291 * We will not be on the run queue. 1292 * So we must be sleeping or similar. 1293 * Don't use the slot if we will need it 1294 * for newtd. 1295 */ 1296 if ((td->td_proc->p_flag & P_HADTHREADS) && 1297 (newtd == NULL || 1298 newtd->td_ksegrp != td->td_ksegrp)) 1299 slot_fill(td->td_ksegrp); 1300 } 1301 } 1302 } 1303 if (newtd != NULL) { 1304 /* 1305 * If we bring in a thread, 1306 * then account for it as if it had been added to the 1307 * run queue and then chosen. 1308 */ 1309 newtd->td_kse->ke_flags |= KEF_DIDRUN; 1310 SLOT_USE(newtd->td_ksegrp); 1311 TD_SET_RUNNING(newtd); 1312 kseq_load_add(KSEQ_SELF(), newtd->td_kse); 1313 } else 1314 newtd = choosethread(); 1315 if (td != newtd) 1316 cpu_switch(td, newtd); 1317 sched_lock.mtx_lock = (uintptr_t)td; 1318 1319 td->td_oncpu = PCPU_GET(cpuid); 1320 } 1321 1322 void 1323 sched_nice(struct proc *p, int nice) 1324 { 1325 struct ksegrp *kg; 1326 struct kse *ke; 1327 struct thread *td; 1328 struct kseq *kseq; 1329 1330 PROC_LOCK_ASSERT(p, MA_OWNED); 1331 mtx_assert(&sched_lock, MA_OWNED); 1332 /* 1333 * We need to adjust the nice counts for running KSEs. 1334 */ 1335 FOREACH_KSEGRP_IN_PROC(p, kg) { 1336 if (kg->kg_pri_class == PRI_TIMESHARE) { 1337 FOREACH_THREAD_IN_GROUP(kg, td) { 1338 ke = td->td_kse; 1339 if (ke->ke_runq == NULL) 1340 continue; 1341 kseq = KSEQ_CPU(ke->ke_cpu); 1342 kseq_nice_rem(kseq, p->p_nice); 1343 kseq_nice_add(kseq, nice); 1344 } 1345 } 1346 } 1347 p->p_nice = nice; 1348 FOREACH_KSEGRP_IN_PROC(p, kg) { 1349 sched_priority(kg); 1350 FOREACH_THREAD_IN_GROUP(kg, td) 1351 td->td_flags |= TDF_NEEDRESCHED; 1352 } 1353 } 1354 1355 void 1356 sched_sleep(struct thread *td) 1357 { 1358 mtx_assert(&sched_lock, MA_OWNED); 1359 1360 td->td_slptime = ticks; 1361 td->td_base_pri = td->td_priority; 1362 1363 CTR2(KTR_ULE, "sleep thread %p (tick: %d)", 1364 td, td->td_slptime); 1365 } 1366 1367 void 1368 sched_wakeup(struct thread *td) 1369 { 1370 mtx_assert(&sched_lock, MA_OWNED); 1371 1372 /* 1373 * Let the kseg know how long we slept for. This is because process 1374 * interactivity behavior is modeled in the kseg. 1375 */ 1376 if (td->td_slptime) { 1377 struct ksegrp *kg; 1378 int hzticks; 1379 1380 kg = td->td_ksegrp; 1381 hzticks = (ticks - td->td_slptime) << 10; 1382 if (hzticks >= SCHED_SLP_RUN_MAX) { 1383 kg->kg_slptime = SCHED_SLP_RUN_MAX; 1384 kg->kg_runtime = 1; 1385 } else { 1386 kg->kg_slptime += hzticks; 1387 sched_interact_update(kg); 1388 } 1389 sched_priority(kg); 1390 sched_slice(td->td_kse); 1391 CTR2(KTR_ULE, "wakeup thread %p (%d ticks)", td, hzticks); 1392 td->td_slptime = 0; 1393 } 1394 setrunqueue(td, SRQ_BORING); 1395 } 1396 1397 /* 1398 * Penalize the parent for creating a new child and initialize the child's 1399 * priority. 1400 */ 1401 void 1402 sched_fork(struct thread *td, struct thread *childtd) 1403 { 1404 1405 mtx_assert(&sched_lock, MA_OWNED); 1406 1407 sched_fork_ksegrp(td, childtd->td_ksegrp); 1408 sched_fork_thread(td, childtd); 1409 } 1410 1411 void 1412 sched_fork_ksegrp(struct thread *td, struct ksegrp *child) 1413 { 1414 struct ksegrp *kg = td->td_ksegrp; 1415 mtx_assert(&sched_lock, MA_OWNED); 1416 1417 child->kg_slptime = kg->kg_slptime; 1418 child->kg_runtime = kg->kg_runtime; 1419 child->kg_user_pri = kg->kg_user_pri; 1420 sched_interact_fork(child); 1421 kg->kg_runtime += tickincr << 10; 1422 sched_interact_update(kg); 1423 1424 CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)", 1425 kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime, 1426 child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime); 1427 } 1428 1429 void 1430 sched_fork_thread(struct thread *td, struct thread *child) 1431 { 1432 struct kse *ke; 1433 struct kse *ke2; 1434 1435 sched_newthread(child); 1436 ke = td->td_kse; 1437 ke2 = child->td_kse; 1438 ke2->ke_slice = 1; /* Attempt to quickly learn interactivity. */ 1439 ke2->ke_cpu = ke->ke_cpu; 1440 ke2->ke_runq = NULL; 1441 1442 /* Grab our parents cpu estimation information. */ 1443 ke2->ke_ticks = ke->ke_ticks; 1444 ke2->ke_ltick = ke->ke_ltick; 1445 ke2->ke_ftick = ke->ke_ftick; 1446 } 1447 1448 void 1449 sched_class(struct ksegrp *kg, int class) 1450 { 1451 struct kseq *kseq; 1452 struct kse *ke; 1453 struct thread *td; 1454 int nclass; 1455 int oclass; 1456 1457 mtx_assert(&sched_lock, MA_OWNED); 1458 if (kg->kg_pri_class == class) 1459 return; 1460 1461 nclass = PRI_BASE(class); 1462 oclass = PRI_BASE(kg->kg_pri_class); 1463 FOREACH_THREAD_IN_GROUP(kg, td) { 1464 ke = td->td_kse; 1465 if (ke->ke_state != KES_ONRUNQ && 1466 ke->ke_state != KES_THREAD) 1467 continue; 1468 kseq = KSEQ_CPU(ke->ke_cpu); 1469 1470 #ifdef SMP 1471 /* 1472 * On SMP if we're on the RUNQ we must adjust the transferable 1473 * count because could be changing to or from an interrupt 1474 * class. 1475 */ 1476 if (ke->ke_state == KES_ONRUNQ) { 1477 if (KSE_CAN_MIGRATE(ke, oclass)) { 1478 kseq->ksq_transferable--; 1479 kseq->ksq_group->ksg_transferable--; 1480 } 1481 if (KSE_CAN_MIGRATE(ke, nclass)) { 1482 kseq->ksq_transferable++; 1483 kseq->ksq_group->ksg_transferable++; 1484 } 1485 } 1486 #endif 1487 if (oclass == PRI_TIMESHARE) { 1488 kseq->ksq_load_timeshare--; 1489 kseq_nice_rem(kseq, kg->kg_proc->p_nice); 1490 } 1491 if (nclass == PRI_TIMESHARE) { 1492 kseq->ksq_load_timeshare++; 1493 kseq_nice_add(kseq, kg->kg_proc->p_nice); 1494 } 1495 } 1496 1497 kg->kg_pri_class = class; 1498 } 1499 1500 /* 1501 * Return some of the child's priority and interactivity to the parent. 1502 * Avoid using sched_exit_thread to avoid having to decide which 1503 * thread in the parent gets the honour since it isn't used. 1504 */ 1505 void 1506 sched_exit(struct proc *p, struct thread *childtd) 1507 { 1508 mtx_assert(&sched_lock, MA_OWNED); 1509 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), childtd); 1510 kseq_load_rem(KSEQ_CPU(childtd->td_kse->ke_cpu), childtd->td_kse); 1511 } 1512 1513 void 1514 sched_exit_ksegrp(struct ksegrp *kg, struct thread *td) 1515 { 1516 /* kg->kg_slptime += td->td_ksegrp->kg_slptime; */ 1517 kg->kg_runtime += td->td_ksegrp->kg_runtime; 1518 sched_interact_update(kg); 1519 } 1520 1521 void 1522 sched_exit_thread(struct thread *td, struct thread *childtd) 1523 { 1524 kseq_load_rem(KSEQ_CPU(childtd->td_kse->ke_cpu), childtd->td_kse); 1525 } 1526 1527 void 1528 sched_clock(struct thread *td) 1529 { 1530 struct kseq *kseq; 1531 struct ksegrp *kg; 1532 struct kse *ke; 1533 1534 mtx_assert(&sched_lock, MA_OWNED); 1535 kseq = KSEQ_SELF(); 1536 #ifdef SMP 1537 if (ticks == bal_tick) 1538 sched_balance(); 1539 if (ticks == gbal_tick) 1540 sched_balance_groups(); 1541 /* 1542 * We could have been assigned a non real-time thread without an 1543 * IPI. 1544 */ 1545 if (kseq->ksq_assigned) 1546 kseq_assign(kseq); /* Potentially sets NEEDRESCHED */ 1547 #endif 1548 /* 1549 * sched_setup() apparently happens prior to stathz being set. We 1550 * need to resolve the timers earlier in the boot so we can avoid 1551 * calculating this here. 1552 */ 1553 if (realstathz == 0) { 1554 realstathz = stathz ? stathz : hz; 1555 tickincr = hz / realstathz; 1556 /* 1557 * XXX This does not work for values of stathz that are much 1558 * larger than hz. 1559 */ 1560 if (tickincr == 0) 1561 tickincr = 1; 1562 } 1563 1564 ke = td->td_kse; 1565 kg = ke->ke_ksegrp; 1566 1567 /* Adjust ticks for pctcpu */ 1568 ke->ke_ticks++; 1569 ke->ke_ltick = ticks; 1570 1571 /* Go up to one second beyond our max and then trim back down */ 1572 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 1573 sched_pctcpu_update(ke); 1574 1575 if (td->td_flags & TDF_IDLETD) 1576 return; 1577 1578 CTR4(KTR_ULE, "Tick thread %p (slice: %d, slptime: %d, runtime: %d)", 1579 td, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 1580 /* 1581 * We only do slicing code for TIMESHARE ksegrps. 1582 */ 1583 if (kg->kg_pri_class != PRI_TIMESHARE) 1584 return; 1585 /* 1586 * We used a tick charge it to the ksegrp so that we can compute our 1587 * interactivity. 1588 */ 1589 kg->kg_runtime += tickincr << 10; 1590 sched_interact_update(kg); 1591 1592 /* 1593 * We used up one time slice. 1594 */ 1595 if (--ke->ke_slice > 0) 1596 return; 1597 /* 1598 * We're out of time, recompute priorities and requeue. 1599 */ 1600 kseq_load_rem(kseq, ke); 1601 sched_priority(kg); 1602 sched_slice(ke); 1603 if (SCHED_CURR(kg, ke)) 1604 ke->ke_runq = kseq->ksq_curr; 1605 else 1606 ke->ke_runq = kseq->ksq_next; 1607 kseq_load_add(kseq, ke); 1608 td->td_flags |= TDF_NEEDRESCHED; 1609 } 1610 1611 int 1612 sched_runnable(void) 1613 { 1614 struct kseq *kseq; 1615 int load; 1616 1617 load = 1; 1618 1619 kseq = KSEQ_SELF(); 1620 #ifdef SMP 1621 if (kseq->ksq_assigned) { 1622 mtx_lock_spin(&sched_lock); 1623 kseq_assign(kseq); 1624 mtx_unlock_spin(&sched_lock); 1625 } 1626 #endif 1627 if ((curthread->td_flags & TDF_IDLETD) != 0) { 1628 if (kseq->ksq_load > 0) 1629 goto out; 1630 } else 1631 if (kseq->ksq_load - 1 > 0) 1632 goto out; 1633 load = 0; 1634 out: 1635 return (load); 1636 } 1637 1638 void 1639 sched_userret(struct thread *td) 1640 { 1641 struct ksegrp *kg; 1642 1643 kg = td->td_ksegrp; 1644 1645 if (td->td_priority != kg->kg_user_pri) { 1646 mtx_lock_spin(&sched_lock); 1647 td->td_priority = kg->kg_user_pri; 1648 mtx_unlock_spin(&sched_lock); 1649 } 1650 } 1651 1652 struct kse * 1653 sched_choose(void) 1654 { 1655 struct kseq *kseq; 1656 struct kse *ke; 1657 1658 mtx_assert(&sched_lock, MA_OWNED); 1659 kseq = KSEQ_SELF(); 1660 #ifdef SMP 1661 restart: 1662 if (kseq->ksq_assigned) 1663 kseq_assign(kseq); 1664 #endif 1665 ke = kseq_choose(kseq); 1666 if (ke) { 1667 #ifdef SMP 1668 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) 1669 if (kseq_idled(kseq) == 0) 1670 goto restart; 1671 #endif 1672 kseq_runq_rem(kseq, ke); 1673 ke->ke_state = KES_THREAD; 1674 1675 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 1676 CTR4(KTR_ULE, "Run thread %p from %p (slice: %d, pri: %d)", 1677 ke->ke_thread, ke->ke_runq, ke->ke_slice, 1678 ke->ke_thread->td_priority); 1679 } 1680 return (ke); 1681 } 1682 #ifdef SMP 1683 if (kseq_idled(kseq) == 0) 1684 goto restart; 1685 #endif 1686 return (NULL); 1687 } 1688 1689 void 1690 sched_add(struct thread *td, int flags) 1691 { 1692 1693 /* let jeff work out how to map the flags better */ 1694 /* I'm open to suggestions */ 1695 if (flags & SRQ_YIELDING) 1696 /* 1697 * Preempting during switching can be bad JUJU 1698 * especially for KSE processes 1699 */ 1700 sched_add_internal(td, 0); 1701 else 1702 sched_add_internal(td, 1); 1703 } 1704 1705 static void 1706 sched_add_internal(struct thread *td, int preemptive) 1707 { 1708 struct kseq *kseq; 1709 struct ksegrp *kg; 1710 struct kse *ke; 1711 #ifdef SMP 1712 int canmigrate; 1713 #endif 1714 int class; 1715 1716 mtx_assert(&sched_lock, MA_OWNED); 1717 ke = td->td_kse; 1718 kg = td->td_ksegrp; 1719 if (ke->ke_flags & KEF_ASSIGNED) 1720 return; 1721 kseq = KSEQ_SELF(); 1722 KASSERT(ke->ke_state != KES_ONRUNQ, 1723 ("sched_add: kse %p (%s) already in run queue", ke, 1724 ke->ke_proc->p_comm)); 1725 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 1726 ("sched_add: process swapped out")); 1727 KASSERT(ke->ke_runq == NULL, 1728 ("sched_add: KSE %p is still assigned to a run queue", ke)); 1729 1730 class = PRI_BASE(kg->kg_pri_class); 1731 switch (class) { 1732 case PRI_ITHD: 1733 case PRI_REALTIME: 1734 ke->ke_runq = kseq->ksq_curr; 1735 ke->ke_slice = SCHED_SLICE_MAX; 1736 ke->ke_cpu = PCPU_GET(cpuid); 1737 break; 1738 case PRI_TIMESHARE: 1739 if (SCHED_CURR(kg, ke)) 1740 ke->ke_runq = kseq->ksq_curr; 1741 else 1742 ke->ke_runq = kseq->ksq_next; 1743 break; 1744 case PRI_IDLE: 1745 /* 1746 * This is for priority prop. 1747 */ 1748 if (ke->ke_thread->td_priority < PRI_MIN_IDLE) 1749 ke->ke_runq = kseq->ksq_curr; 1750 else 1751 ke->ke_runq = &kseq->ksq_idle; 1752 ke->ke_slice = SCHED_SLICE_MIN; 1753 break; 1754 default: 1755 panic("Unknown pri class."); 1756 break; 1757 } 1758 #ifdef SMP 1759 /* 1760 * Don't migrate running threads here. Force the long term balancer 1761 * to do it. 1762 */ 1763 canmigrate = KSE_CAN_MIGRATE(ke, class); 1764 if (ke->ke_flags & KEF_HOLD) { 1765 ke->ke_flags &= ~KEF_HOLD; 1766 canmigrate = 0; 1767 } 1768 /* 1769 * If this thread is pinned or bound, notify the target cpu. 1770 */ 1771 if (!canmigrate && ke->ke_cpu != PCPU_GET(cpuid) ) { 1772 ke->ke_runq = NULL; 1773 kseq_notify(ke, ke->ke_cpu); 1774 return; 1775 } 1776 /* 1777 * If we had been idle, clear our bit in the group and potentially 1778 * the global bitmap. If not, see if we should transfer this thread. 1779 */ 1780 if ((class == PRI_TIMESHARE || class == PRI_REALTIME) && 1781 (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) { 1782 /* 1783 * Check to see if our group is unidling, and if so, remove it 1784 * from the global idle mask. 1785 */ 1786 if (kseq->ksq_group->ksg_idlemask == 1787 kseq->ksq_group->ksg_cpumask) 1788 atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask); 1789 /* 1790 * Now remove ourselves from the group specific idle mask. 1791 */ 1792 kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask); 1793 } else if (kseq->ksq_load > 1 && canmigrate) 1794 if (kseq_transfer(kseq, ke, class)) 1795 return; 1796 ke->ke_cpu = PCPU_GET(cpuid); 1797 #endif 1798 /* 1799 * XXX With preemption this is not necessary. 1800 */ 1801 if (td->td_priority < curthread->td_priority && 1802 ke->ke_runq == kseq->ksq_curr) 1803 curthread->td_flags |= TDF_NEEDRESCHED; 1804 if (preemptive && maybe_preempt(td)) 1805 return; 1806 SLOT_USE(td->td_ksegrp); 1807 ke->ke_ksegrp->kg_runq_threads++; 1808 ke->ke_state = KES_ONRUNQ; 1809 1810 kseq_runq_add(kseq, ke); 1811 kseq_load_add(kseq, ke); 1812 } 1813 1814 void 1815 sched_rem(struct thread *td) 1816 { 1817 struct kseq *kseq; 1818 struct kse *ke; 1819 1820 ke = td->td_kse; 1821 /* 1822 * It is safe to just return here because sched_rem() is only ever 1823 * used in places where we're immediately going to add the 1824 * kse back on again. In that case it'll be added with the correct 1825 * thread and priority when the caller drops the sched_lock. 1826 */ 1827 if (ke->ke_flags & KEF_ASSIGNED) 1828 return; 1829 mtx_assert(&sched_lock, MA_OWNED); 1830 KASSERT((ke->ke_state == KES_ONRUNQ), 1831 ("sched_rem: KSE not on run queue")); 1832 1833 ke->ke_state = KES_THREAD; 1834 SLOT_RELEASE(td->td_ksegrp); 1835 ke->ke_ksegrp->kg_runq_threads--; 1836 kseq = KSEQ_CPU(ke->ke_cpu); 1837 kseq_runq_rem(kseq, ke); 1838 kseq_load_rem(kseq, ke); 1839 } 1840 1841 fixpt_t 1842 sched_pctcpu(struct thread *td) 1843 { 1844 fixpt_t pctcpu; 1845 struct kse *ke; 1846 1847 pctcpu = 0; 1848 ke = td->td_kse; 1849 if (ke == NULL) 1850 return (0); 1851 1852 mtx_lock_spin(&sched_lock); 1853 if (ke->ke_ticks) { 1854 int rtick; 1855 1856 /* 1857 * Don't update more frequently than twice a second. Allowing 1858 * this causes the cpu usage to decay away too quickly due to 1859 * rounding errors. 1860 */ 1861 if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick || 1862 ke->ke_ltick < (ticks - (hz / 2))) 1863 sched_pctcpu_update(ke); 1864 /* How many rtick per second ? */ 1865 rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); 1866 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 1867 } 1868 1869 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1870 mtx_unlock_spin(&sched_lock); 1871 1872 return (pctcpu); 1873 } 1874 1875 void 1876 sched_bind(struct thread *td, int cpu) 1877 { 1878 struct kse *ke; 1879 1880 mtx_assert(&sched_lock, MA_OWNED); 1881 ke = td->td_kse; 1882 ke->ke_flags |= KEF_BOUND; 1883 #ifdef SMP 1884 if (PCPU_GET(cpuid) == cpu) 1885 return; 1886 /* sched_rem without the runq_remove */ 1887 ke->ke_state = KES_THREAD; 1888 ke->ke_ksegrp->kg_runq_threads--; 1889 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1890 kseq_notify(ke, cpu); 1891 /* When we return from mi_switch we'll be on the correct cpu. */ 1892 mi_switch(SW_VOL, NULL); 1893 #endif 1894 } 1895 1896 void 1897 sched_unbind(struct thread *td) 1898 { 1899 mtx_assert(&sched_lock, MA_OWNED); 1900 td->td_kse->ke_flags &= ~KEF_BOUND; 1901 } 1902 1903 int 1904 sched_load(void) 1905 { 1906 #ifdef SMP 1907 int total; 1908 int i; 1909 1910 total = 0; 1911 for (i = 0; i <= ksg_maxid; i++) 1912 total += KSEQ_GROUP(i)->ksg_load; 1913 return (total); 1914 #else 1915 return (KSEQ_SELF()->ksq_sysload); 1916 #endif 1917 } 1918 1919 int 1920 sched_sizeof_ksegrp(void) 1921 { 1922 return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 1923 } 1924 1925 int 1926 sched_sizeof_proc(void) 1927 { 1928 return (sizeof(struct proc)); 1929 } 1930 1931 int 1932 sched_sizeof_thread(void) 1933 { 1934 return (sizeof(struct thread) + sizeof(struct td_sched)); 1935 } 1936 #define KERN_SWITCH_INCLUDE 1 1937 #include "kern/kern_switch.c" 1938