1 /*- 2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_sched.h" 32 33 #ifndef KERN_SWITCH_INCLUDE 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kdb.h> 37 #include <sys/kernel.h> 38 #include <sys/ktr.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/proc.h> 42 #include <sys/queue.h> 43 #include <sys/sched.h> 44 #else /* KERN_SWITCH_INCLUDE */ 45 #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 46 #include <sys/smp.h> 47 #endif 48 #if defined(SMP) && defined(SCHED_4BSD) 49 #include <sys/sysctl.h> 50 #endif 51 52 #include <machine/cpu.h> 53 54 /* Uncomment this to enable logging of critical_enter/exit. */ 55 #if 0 56 #define KTR_CRITICAL KTR_SCHED 57 #else 58 #define KTR_CRITICAL 0 59 #endif 60 61 #ifdef FULL_PREEMPTION 62 #ifndef PREEMPTION 63 #error "The FULL_PREEMPTION option requires the PREEMPTION option" 64 #endif 65 #endif 66 67 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); 68 69 /* 70 * kern.sched.preemption allows user space to determine if preemption support 71 * is compiled in or not. It is not currently a boot or runtime flag that 72 * can be changed. 73 */ 74 #ifdef PREEMPTION 75 static int kern_sched_preemption = 1; 76 #else 77 static int kern_sched_preemption = 0; 78 #endif 79 SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD, 80 &kern_sched_preemption, 0, "Kernel preemption enabled"); 81 82 #ifdef SCHED_STATS 83 long switch_preempt; 84 long switch_owepreempt; 85 long switch_turnstile; 86 long switch_sleepq; 87 long switch_sleepqtimo; 88 long switch_relinquish; 89 long switch_needresched; 90 static SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats"); 91 SYSCTL_INT(_kern_sched_stats, OID_AUTO, preempt, CTLFLAG_RD, &switch_preempt, 0, ""); 92 SYSCTL_INT(_kern_sched_stats, OID_AUTO, owepreempt, CTLFLAG_RD, &switch_owepreempt, 0, ""); 93 SYSCTL_INT(_kern_sched_stats, OID_AUTO, turnstile, CTLFLAG_RD, &switch_turnstile, 0, ""); 94 SYSCTL_INT(_kern_sched_stats, OID_AUTO, sleepq, CTLFLAG_RD, &switch_sleepq, 0, ""); 95 SYSCTL_INT(_kern_sched_stats, OID_AUTO, sleepqtimo, CTLFLAG_RD, &switch_sleepqtimo, 0, ""); 96 SYSCTL_INT(_kern_sched_stats, OID_AUTO, relinquish, CTLFLAG_RD, &switch_relinquish, 0, ""); 97 SYSCTL_INT(_kern_sched_stats, OID_AUTO, needresched, CTLFLAG_RD, &switch_needresched, 0, ""); 98 static int 99 sysctl_stats_reset(SYSCTL_HANDLER_ARGS) 100 { 101 int error; 102 int val; 103 104 val = 0; 105 error = sysctl_handle_int(oidp, &val, 0, req); 106 if (error != 0 || req->newptr == NULL) 107 return (error); 108 if (val == 0) 109 return (0); 110 switch_preempt = 0; 111 switch_owepreempt = 0; 112 switch_turnstile = 0; 113 switch_sleepq = 0; 114 switch_sleepqtimo = 0; 115 switch_relinquish = 0; 116 switch_needresched = 0; 117 118 return (0); 119 } 120 121 SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_WR, NULL, 122 0, sysctl_stats_reset, "I", "Reset scheduler statistics"); 123 #endif 124 125 /************************************************************************ 126 * Functions that manipulate runnability from a thread perspective. * 127 ************************************************************************/ 128 /* 129 * Select the thread that will be run next. 130 */ 131 struct thread * 132 choosethread(void) 133 { 134 struct thread *td; 135 136 #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 137 if (smp_active == 0 && PCPU_GET(cpuid) != 0) { 138 /* Shutting down, run idlethread on AP's */ 139 td = PCPU_GET(idlethread); 140 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 141 TD_SET_RUNNING(td); 142 return (td); 143 } 144 #endif 145 146 retry: 147 td = sched_choose(); 148 149 /* 150 * If we are in panic, only allow system threads, 151 * plus the one we are running in, to be run. 152 */ 153 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 && 154 (td->td_flags & TDF_INPANIC) == 0)) { 155 /* note that it is no longer on the run queue */ 156 TD_SET_CAN_RUN(td); 157 goto retry; 158 } 159 160 TD_SET_RUNNING(td); 161 return (td); 162 } 163 164 /* 165 * Kernel thread preemption implementation. Critical sections mark 166 * regions of code in which preemptions are not allowed. 167 */ 168 void 169 critical_enter(void) 170 { 171 struct thread *td; 172 173 td = curthread; 174 td->td_critnest++; 175 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td, 176 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest); 177 } 178 179 void 180 critical_exit(void) 181 { 182 struct thread *td; 183 184 td = curthread; 185 KASSERT(td->td_critnest != 0, 186 ("critical_exit: td_critnest == 0")); 187 #ifdef PREEMPTION 188 if (td->td_critnest == 1) { 189 td->td_critnest = 0; 190 if (td->td_owepreempt) { 191 td->td_critnest = 1; 192 thread_lock(td); 193 td->td_critnest--; 194 SCHED_STAT_INC(switch_owepreempt); 195 mi_switch(SW_INVOL, NULL); 196 thread_unlock(td); 197 } 198 } else 199 #endif 200 td->td_critnest--; 201 202 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td, 203 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest); 204 } 205 206 /* 207 * This function is called when a thread is about to be put on run queue 208 * because it has been made runnable or its priority has been adjusted. It 209 * determines if the new thread should be immediately preempted to. If so, 210 * it switches to it and eventually returns true. If not, it returns false 211 * so that the caller may place the thread on an appropriate run queue. 212 */ 213 int 214 maybe_preempt(struct thread *td) 215 { 216 #ifdef PREEMPTION 217 struct thread *ctd; 218 int cpri, pri; 219 #endif 220 221 #ifdef PREEMPTION 222 /* 223 * The new thread should not preempt the current thread if any of the 224 * following conditions are true: 225 * 226 * - The kernel is in the throes of crashing (panicstr). 227 * - The current thread has a higher (numerically lower) or 228 * equivalent priority. Note that this prevents curthread from 229 * trying to preempt to itself. 230 * - It is too early in the boot for context switches (cold is set). 231 * - The current thread has an inhibitor set or is in the process of 232 * exiting. In this case, the current thread is about to switch 233 * out anyways, so there's no point in preempting. If we did, 234 * the current thread would not be properly resumed as well, so 235 * just avoid that whole landmine. 236 * - If the new thread's priority is not a realtime priority and 237 * the current thread's priority is not an idle priority and 238 * FULL_PREEMPTION is disabled. 239 * 240 * If all of these conditions are false, but the current thread is in 241 * a nested critical section, then we have to defer the preemption 242 * until we exit the critical section. Otherwise, switch immediately 243 * to the new thread. 244 */ 245 ctd = curthread; 246 THREAD_LOCK_ASSERT(td, MA_OWNED); 247 KASSERT ((ctd->td_sched != NULL && ctd->td_sched->ts_thread == ctd), 248 ("thread has no (or wrong) sched-private part.")); 249 KASSERT((td->td_inhibitors == 0), 250 ("maybe_preempt: trying to run inhibited thread")); 251 pri = td->td_priority; 252 cpri = ctd->td_priority; 253 if (panicstr != NULL || pri >= cpri || cold /* || dumping */ || 254 TD_IS_INHIBITED(ctd)) 255 return (0); 256 #ifndef FULL_PREEMPTION 257 if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE) 258 return (0); 259 #endif 260 261 if (ctd->td_critnest > 1) { 262 CTR1(KTR_PROC, "maybe_preempt: in critical section %d", 263 ctd->td_critnest); 264 ctd->td_owepreempt = 1; 265 return (0); 266 } 267 /* 268 * Thread is runnable but not yet put on system run queue. 269 */ 270 MPASS(ctd->td_lock == td->td_lock); 271 MPASS(TD_ON_RUNQ(td)); 272 TD_SET_RUNNING(td); 273 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, 274 td->td_proc->p_pid, td->td_proc->p_comm); 275 SCHED_STAT_INC(switch_preempt); 276 mi_switch(SW_INVOL|SW_PREEMPT, td); 277 /* 278 * td's lock pointer may have changed. We have to return with it 279 * locked. 280 */ 281 spinlock_enter(); 282 thread_unlock(ctd); 283 thread_lock(td); 284 spinlock_exit(); 285 return (1); 286 #else 287 return (0); 288 #endif 289 } 290 291 #if 0 292 #ifndef PREEMPTION 293 /* XXX: There should be a non-static version of this. */ 294 static void 295 printf_caddr_t(void *data) 296 { 297 printf("%s", (char *)data); 298 } 299 static char preempt_warning[] = 300 "WARNING: Kernel preemption is disabled, expect reduced performance.\n"; 301 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t, 302 preempt_warning) 303 #endif 304 #endif 305 306 /************************************************************************ 307 * SYSTEM RUN QUEUE manipulations and tests * 308 ************************************************************************/ 309 /* 310 * Initialize a run structure. 311 */ 312 void 313 runq_init(struct runq *rq) 314 { 315 int i; 316 317 bzero(rq, sizeof *rq); 318 for (i = 0; i < RQ_NQS; i++) 319 TAILQ_INIT(&rq->rq_queues[i]); 320 } 321 322 /* 323 * Clear the status bit of the queue corresponding to priority level pri, 324 * indicating that it is empty. 325 */ 326 static __inline void 327 runq_clrbit(struct runq *rq, int pri) 328 { 329 struct rqbits *rqb; 330 331 rqb = &rq->rq_status; 332 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d", 333 rqb->rqb_bits[RQB_WORD(pri)], 334 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri), 335 RQB_BIT(pri), RQB_WORD(pri)); 336 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri); 337 } 338 339 /* 340 * Find the index of the first non-empty run queue. This is done by 341 * scanning the status bits, a set bit indicates a non-empty queue. 342 */ 343 static __inline int 344 runq_findbit(struct runq *rq) 345 { 346 struct rqbits *rqb; 347 int pri; 348 int i; 349 350 rqb = &rq->rq_status; 351 for (i = 0; i < RQB_LEN; i++) 352 if (rqb->rqb_bits[i]) { 353 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW); 354 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d", 355 rqb->rqb_bits[i], i, pri); 356 return (pri); 357 } 358 359 return (-1); 360 } 361 362 static __inline int 363 runq_findbit_from(struct runq *rq, u_char start) 364 { 365 struct rqbits *rqb; 366 int bit; 367 int pri; 368 int i; 369 370 rqb = &rq->rq_status; 371 bit = start & (RQB_BPW -1); 372 pri = 0; 373 CTR1(KTR_RUNQ, "runq_findbit_from: start %d", start); 374 again: 375 for (i = RQB_WORD(start); i < RQB_LEN; i++) { 376 CTR3(KTR_RUNQ, "runq_findbit_from: bits %d = %#x bit = %d", 377 i, rqb->rqb_bits[i], bit); 378 if (rqb->rqb_bits[i]) { 379 if (bit != 0) { 380 for (pri = bit; pri < RQB_BPW; pri++) 381 if (rqb->rqb_bits[i] & (1ul << pri)) 382 break; 383 bit = 0; 384 if (pri >= RQB_BPW) 385 continue; 386 } else 387 pri = RQB_FFS(rqb->rqb_bits[i]); 388 pri += (i << RQB_L2BPW); 389 CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d", 390 rqb->rqb_bits[i], i, pri); 391 return (pri); 392 } 393 bit = 0; 394 } 395 if (start != 0) { 396 CTR0(KTR_RUNQ, "runq_findbit_from: restarting"); 397 start = 0; 398 goto again; 399 } 400 401 return (-1); 402 } 403 404 /* 405 * Set the status bit of the queue corresponding to priority level pri, 406 * indicating that it is non-empty. 407 */ 408 static __inline void 409 runq_setbit(struct runq *rq, int pri) 410 { 411 struct rqbits *rqb; 412 413 rqb = &rq->rq_status; 414 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d", 415 rqb->rqb_bits[RQB_WORD(pri)], 416 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri), 417 RQB_BIT(pri), RQB_WORD(pri)); 418 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri); 419 } 420 421 /* 422 * Add the thread to the queue specified by its priority, and set the 423 * corresponding status bit. 424 */ 425 void 426 runq_add(struct runq *rq, struct td_sched *ts, int flags) 427 { 428 struct rqhead *rqh; 429 int pri; 430 431 pri = ts->ts_thread->td_priority / RQ_PPQ; 432 ts->ts_rqindex = pri; 433 runq_setbit(rq, pri); 434 rqh = &rq->rq_queues[pri]; 435 CTR5(KTR_RUNQ, "runq_add: td=%p ts=%p pri=%d %d rqh=%p", 436 ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh); 437 if (flags & SRQ_PREEMPTED) { 438 TAILQ_INSERT_HEAD(rqh, ts, ts_procq); 439 } else { 440 TAILQ_INSERT_TAIL(rqh, ts, ts_procq); 441 } 442 } 443 444 void 445 runq_add_pri(struct runq *rq, struct td_sched *ts, u_char pri, int flags) 446 { 447 struct rqhead *rqh; 448 449 KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri)); 450 ts->ts_rqindex = pri; 451 runq_setbit(rq, pri); 452 rqh = &rq->rq_queues[pri]; 453 CTR5(KTR_RUNQ, "runq_add_pri: td=%p ke=%p pri=%d idx=%d rqh=%p", 454 ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh); 455 if (flags & SRQ_PREEMPTED) { 456 TAILQ_INSERT_HEAD(rqh, ts, ts_procq); 457 } else { 458 TAILQ_INSERT_TAIL(rqh, ts, ts_procq); 459 } 460 } 461 /* 462 * Return true if there are runnable processes of any priority on the run 463 * queue, false otherwise. Has no side effects, does not modify the run 464 * queue structure. 465 */ 466 int 467 runq_check(struct runq *rq) 468 { 469 struct rqbits *rqb; 470 int i; 471 472 rqb = &rq->rq_status; 473 for (i = 0; i < RQB_LEN; i++) 474 if (rqb->rqb_bits[i]) { 475 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d", 476 rqb->rqb_bits[i], i); 477 return (1); 478 } 479 CTR0(KTR_RUNQ, "runq_check: empty"); 480 481 return (0); 482 } 483 484 #if defined(SMP) && defined(SCHED_4BSD) 485 int runq_fuzz = 1; 486 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, ""); 487 #endif 488 489 /* 490 * Find the highest priority process on the run queue. 491 */ 492 struct td_sched * 493 runq_choose(struct runq *rq) 494 { 495 struct rqhead *rqh; 496 struct td_sched *ts; 497 int pri; 498 499 while ((pri = runq_findbit(rq)) != -1) { 500 rqh = &rq->rq_queues[pri]; 501 #if defined(SMP) && defined(SCHED_4BSD) 502 /* fuzz == 1 is normal.. 0 or less are ignored */ 503 if (runq_fuzz > 1) { 504 /* 505 * In the first couple of entries, check if 506 * there is one for our CPU as a preference. 507 */ 508 int count = runq_fuzz; 509 int cpu = PCPU_GET(cpuid); 510 struct td_sched *ts2; 511 ts2 = ts = TAILQ_FIRST(rqh); 512 513 while (count-- && ts2) { 514 if (ts->ts_thread->td_lastcpu == cpu) { 515 ts = ts2; 516 break; 517 } 518 ts2 = TAILQ_NEXT(ts2, ts_procq); 519 } 520 } else 521 #endif 522 ts = TAILQ_FIRST(rqh); 523 KASSERT(ts != NULL, ("runq_choose: no proc on busy queue")); 524 CTR3(KTR_RUNQ, 525 "runq_choose: pri=%d td_sched=%p rqh=%p", pri, ts, rqh); 526 return (ts); 527 } 528 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri); 529 530 return (NULL); 531 } 532 533 struct td_sched * 534 runq_choose_from(struct runq *rq, u_char idx) 535 { 536 struct rqhead *rqh; 537 struct td_sched *ts; 538 int pri; 539 540 if ((pri = runq_findbit_from(rq, idx)) != -1) { 541 rqh = &rq->rq_queues[pri]; 542 ts = TAILQ_FIRST(rqh); 543 KASSERT(ts != NULL, ("runq_choose: no proc on busy queue")); 544 CTR4(KTR_RUNQ, 545 "runq_choose_from: pri=%d kse=%p idx=%d rqh=%p", 546 pri, ts, ts->ts_rqindex, rqh); 547 return (ts); 548 } 549 CTR1(KTR_RUNQ, "runq_choose_from: idleproc pri=%d", pri); 550 551 return (NULL); 552 } 553 /* 554 * Remove the thread from the queue specified by its priority, and clear the 555 * corresponding status bit if the queue becomes empty. 556 * Caller must set state afterwards. 557 */ 558 void 559 runq_remove(struct runq *rq, struct td_sched *ts) 560 { 561 562 runq_remove_idx(rq, ts, NULL); 563 } 564 565 void 566 runq_remove_idx(struct runq *rq, struct td_sched *ts, u_char *idx) 567 { 568 struct rqhead *rqh; 569 u_char pri; 570 571 KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM, 572 ("runq_remove_idx: process swapped out")); 573 pri = ts->ts_rqindex; 574 KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri)); 575 rqh = &rq->rq_queues[pri]; 576 CTR5(KTR_RUNQ, "runq_remove_idx: td=%p, ts=%p pri=%d %d rqh=%p", 577 ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh); 578 { 579 struct td_sched *nts; 580 581 TAILQ_FOREACH(nts, rqh, ts_procq) 582 if (nts == ts) 583 break; 584 if (ts != nts) 585 panic("runq_remove_idx: ts %p not on rqindex %d", 586 ts, pri); 587 } 588 TAILQ_REMOVE(rqh, ts, ts_procq); 589 if (TAILQ_EMPTY(rqh)) { 590 CTR0(KTR_RUNQ, "runq_remove_idx: empty"); 591 runq_clrbit(rq, pri); 592 if (idx != NULL && *idx == pri) 593 *idx = (pri + 1) % RQ_NQS; 594 } 595 } 596 597 /****** functions that are temporarily here ***********/ 598 #include <vm/uma.h> 599 extern struct mtx kse_zombie_lock; 600 601 /* 602 * Allocate scheduler specific per-process resources. 603 * The thread and proc have already been linked in. 604 * 605 * Called from: 606 * proc_init() (UMA init method) 607 */ 608 void 609 sched_newproc(struct proc *p, struct thread *td) 610 { 611 } 612 613 /* 614 * thread is being either created or recycled. 615 * Fix up the per-scheduler resources associated with it. 616 * Called from: 617 * sched_fork_thread() 618 * thread_dtor() (*may go away) 619 * thread_init() (*may go away) 620 */ 621 void 622 sched_newthread(struct thread *td) 623 { 624 struct td_sched *ts; 625 626 ts = (struct td_sched *) (td + 1); 627 bzero(ts, sizeof(*ts)); 628 td->td_sched = ts; 629 ts->ts_thread = td; 630 } 631 632 #endif /* KERN_SWITCH_INCLUDE */ 633