1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_sched.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kdb.h> 38 #include <sys/kernel.h> 39 #include <sys/ktr.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/proc.h> 43 #include <sys/queue.h> 44 #include <sys/sched.h> 45 #include <sys/smp.h> 46 #include <sys/sysctl.h> 47 48 #include <machine/cpu.h> 49 50 /* Uncomment this to enable logging of critical_enter/exit. */ 51 #if 0 52 #define KTR_CRITICAL KTR_SCHED 53 #else 54 #define KTR_CRITICAL 0 55 #endif 56 57 #ifdef FULL_PREEMPTION 58 #ifndef PREEMPTION 59 #error "The FULL_PREEMPTION option requires the PREEMPTION option" 60 #endif 61 #endif 62 63 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); 64 65 /* 66 * kern.sched.preemption allows user space to determine if preemption support 67 * is compiled in or not. It is not currently a boot or runtime flag that 68 * can be changed. 69 */ 70 #ifdef PREEMPTION 71 static int kern_sched_preemption = 1; 72 #else 73 static int kern_sched_preemption = 0; 74 #endif 75 SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD, 76 &kern_sched_preemption, 0, "Kernel preemption enabled"); 77 78 /* 79 * Support for scheduler stats exported via kern.sched.stats. All stats may 80 * be reset with kern.sched.stats.reset = 1. Stats may be defined elsewhere 81 * with SCHED_STAT_DEFINE(). 82 */ 83 #ifdef SCHED_STATS 84 SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats"); 85 86 /* Switch reasons from mi_switch(). */ 87 DPCPU_DEFINE(long, sched_switch_stats[SWT_COUNT]); 88 SCHED_STAT_DEFINE_VAR(uncategorized, 89 &DPCPU_NAME(sched_switch_stats[SWT_NONE]), ""); 90 SCHED_STAT_DEFINE_VAR(preempt, 91 &DPCPU_NAME(sched_switch_stats[SWT_PREEMPT]), ""); 92 SCHED_STAT_DEFINE_VAR(owepreempt, 93 &DPCPU_NAME(sched_switch_stats[SWT_OWEPREEMPT]), ""); 94 SCHED_STAT_DEFINE_VAR(turnstile, 95 &DPCPU_NAME(sched_switch_stats[SWT_TURNSTILE]), ""); 96 SCHED_STAT_DEFINE_VAR(sleepq, 97 &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQ]), ""); 98 SCHED_STAT_DEFINE_VAR(sleepqtimo, 99 &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQTIMO]), ""); 100 SCHED_STAT_DEFINE_VAR(relinquish, 101 &DPCPU_NAME(sched_switch_stats[SWT_RELINQUISH]), ""); 102 SCHED_STAT_DEFINE_VAR(needresched, 103 &DPCPU_NAME(sched_switch_stats[SWT_NEEDRESCHED]), ""); 104 SCHED_STAT_DEFINE_VAR(idle, 105 &DPCPU_NAME(sched_switch_stats[SWT_IDLE]), ""); 106 SCHED_STAT_DEFINE_VAR(iwait, 107 &DPCPU_NAME(sched_switch_stats[SWT_IWAIT]), ""); 108 SCHED_STAT_DEFINE_VAR(suspend, 109 &DPCPU_NAME(sched_switch_stats[SWT_SUSPEND]), ""); 110 SCHED_STAT_DEFINE_VAR(remotepreempt, 111 &DPCPU_NAME(sched_switch_stats[SWT_REMOTEPREEMPT]), ""); 112 SCHED_STAT_DEFINE_VAR(remotewakeidle, 113 &DPCPU_NAME(sched_switch_stats[SWT_REMOTEWAKEIDLE]), ""); 114 115 static int 116 sysctl_stats_reset(SYSCTL_HANDLER_ARGS) 117 { 118 struct sysctl_oid *p; 119 uintptr_t counter; 120 int error; 121 int val; 122 int i; 123 124 val = 0; 125 error = sysctl_handle_int(oidp, &val, 0, req); 126 if (error != 0 || req->newptr == NULL) 127 return (error); 128 if (val == 0) 129 return (0); 130 /* 131 * Traverse the list of children of _kern_sched_stats and reset each 132 * to 0. Skip the reset entry. 133 */ 134 SLIST_FOREACH(p, oidp->oid_parent, oid_link) { 135 if (p == oidp || p->oid_arg1 == NULL) 136 continue; 137 counter = (uintptr_t)p->oid_arg1; 138 CPU_FOREACH(i) { 139 *(long *)(dpcpu_off[i] + counter) = 0; 140 } 141 } 142 return (0); 143 } 144 145 SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_WR, NULL, 146 0, sysctl_stats_reset, "I", "Reset scheduler statistics"); 147 #endif 148 149 /************************************************************************ 150 * Functions that manipulate runnability from a thread perspective. * 151 ************************************************************************/ 152 /* 153 * Select the thread that will be run next. 154 */ 155 156 static __noinline struct thread * 157 choosethread_panic(struct thread *td) 158 { 159 160 /* 161 * If we are in panic, only allow system threads, 162 * plus the one we are running in, to be run. 163 */ 164 retry: 165 if (((td->td_proc->p_flag & P_SYSTEM) == 0 && 166 (td->td_flags & TDF_INPANIC) == 0)) { 167 /* note that it is no longer on the run queue */ 168 TD_SET_CAN_RUN(td); 169 td = sched_choose(); 170 goto retry; 171 } 172 173 TD_SET_RUNNING(td); 174 return (td); 175 } 176 177 struct thread * 178 choosethread(void) 179 { 180 struct thread *td; 181 182 td = sched_choose(); 183 184 if (__predict_false(panicstr != NULL)) 185 return (choosethread_panic(td)); 186 187 TD_SET_RUNNING(td); 188 return (td); 189 } 190 191 /* 192 * Kernel thread preemption implementation. Critical sections mark 193 * regions of code in which preemptions are not allowed. 194 * 195 * It might seem a good idea to inline critical_enter() but, in order 196 * to prevent instructions reordering by the compiler, a __compiler_membar() 197 * would have to be used here (the same as sched_pin()). The performance 198 * penalty imposed by the membar could, then, produce slower code than 199 * the function call itself, for most cases. 200 */ 201 void 202 critical_enter(void) 203 { 204 struct thread *td; 205 206 td = curthread; 207 td->td_critnest++; 208 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td, 209 (long)td->td_proc->p_pid, td->td_name, td->td_critnest); 210 } 211 212 static void __noinline 213 critical_exit_preempt(void) 214 { 215 struct thread *td; 216 int flags; 217 218 /* 219 * If td_critnest is 0, it is possible that we are going to get 220 * preempted again before reaching the code below. This happens 221 * rarely and is harmless. However, this means td_owepreempt may 222 * now be unset. 223 */ 224 td = curthread; 225 if (td->td_critnest != 0) 226 return; 227 if (kdb_active) 228 return; 229 230 /* 231 * Microoptimization: we committed to switch, 232 * disable preemption in interrupt handlers 233 * while spinning for the thread lock. 234 */ 235 td->td_critnest = 1; 236 thread_lock(td); 237 td->td_critnest--; 238 flags = SW_INVOL | SW_PREEMPT; 239 if (TD_IS_IDLETHREAD(td)) 240 flags |= SWT_IDLE; 241 else 242 flags |= SWT_OWEPREEMPT; 243 mi_switch(flags, NULL); 244 thread_unlock(td); 245 } 246 247 void 248 critical_exit(void) 249 { 250 struct thread *td; 251 252 td = curthread; 253 KASSERT(td->td_critnest != 0, 254 ("critical_exit: td_critnest == 0")); 255 td->td_critnest--; 256 __compiler_membar(); 257 if (__predict_false(td->td_owepreempt)) 258 critical_exit_preempt(); 259 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td, 260 (long)td->td_proc->p_pid, td->td_name, td->td_critnest); 261 } 262 263 /************************************************************************ 264 * SYSTEM RUN QUEUE manipulations and tests * 265 ************************************************************************/ 266 /* 267 * Initialize a run structure. 268 */ 269 void 270 runq_init(struct runq *rq) 271 { 272 int i; 273 274 bzero(rq, sizeof *rq); 275 for (i = 0; i < RQ_NQS; i++) 276 TAILQ_INIT(&rq->rq_queues[i]); 277 } 278 279 /* 280 * Clear the status bit of the queue corresponding to priority level pri, 281 * indicating that it is empty. 282 */ 283 static __inline void 284 runq_clrbit(struct runq *rq, int pri) 285 { 286 struct rqbits *rqb; 287 288 rqb = &rq->rq_status; 289 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d", 290 rqb->rqb_bits[RQB_WORD(pri)], 291 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri), 292 RQB_BIT(pri), RQB_WORD(pri)); 293 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri); 294 } 295 296 /* 297 * Find the index of the first non-empty run queue. This is done by 298 * scanning the status bits, a set bit indicates a non-empty queue. 299 */ 300 static __inline int 301 runq_findbit(struct runq *rq) 302 { 303 struct rqbits *rqb; 304 int pri; 305 int i; 306 307 rqb = &rq->rq_status; 308 for (i = 0; i < RQB_LEN; i++) 309 if (rqb->rqb_bits[i]) { 310 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW); 311 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d", 312 rqb->rqb_bits[i], i, pri); 313 return (pri); 314 } 315 316 return (-1); 317 } 318 319 static __inline int 320 runq_findbit_from(struct runq *rq, u_char pri) 321 { 322 struct rqbits *rqb; 323 rqb_word_t mask; 324 int i; 325 326 /* 327 * Set the mask for the first word so we ignore priorities before 'pri'. 328 */ 329 mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1)); 330 rqb = &rq->rq_status; 331 again: 332 for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) { 333 mask = rqb->rqb_bits[i] & mask; 334 if (mask == 0) 335 continue; 336 pri = RQB_FFS(mask) + (i << RQB_L2BPW); 337 CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d", 338 mask, i, pri); 339 return (pri); 340 } 341 if (pri == 0) 342 return (-1); 343 /* 344 * Wrap back around to the beginning of the list just once so we 345 * scan the whole thing. 346 */ 347 pri = 0; 348 goto again; 349 } 350 351 /* 352 * Set the status bit of the queue corresponding to priority level pri, 353 * indicating that it is non-empty. 354 */ 355 static __inline void 356 runq_setbit(struct runq *rq, int pri) 357 { 358 struct rqbits *rqb; 359 360 rqb = &rq->rq_status; 361 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d", 362 rqb->rqb_bits[RQB_WORD(pri)], 363 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri), 364 RQB_BIT(pri), RQB_WORD(pri)); 365 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri); 366 } 367 368 /* 369 * Add the thread to the queue specified by its priority, and set the 370 * corresponding status bit. 371 */ 372 void 373 runq_add(struct runq *rq, struct thread *td, int flags) 374 { 375 struct rqhead *rqh; 376 int pri; 377 378 pri = td->td_priority / RQ_PPQ; 379 td->td_rqindex = pri; 380 runq_setbit(rq, pri); 381 rqh = &rq->rq_queues[pri]; 382 CTR4(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p", 383 td, td->td_priority, pri, rqh); 384 if (flags & SRQ_PREEMPTED) { 385 TAILQ_INSERT_HEAD(rqh, td, td_runq); 386 } else { 387 TAILQ_INSERT_TAIL(rqh, td, td_runq); 388 } 389 } 390 391 void 392 runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags) 393 { 394 struct rqhead *rqh; 395 396 KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri)); 397 td->td_rqindex = pri; 398 runq_setbit(rq, pri); 399 rqh = &rq->rq_queues[pri]; 400 CTR4(KTR_RUNQ, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p", 401 td, td->td_priority, pri, rqh); 402 if (flags & SRQ_PREEMPTED) { 403 TAILQ_INSERT_HEAD(rqh, td, td_runq); 404 } else { 405 TAILQ_INSERT_TAIL(rqh, td, td_runq); 406 } 407 } 408 /* 409 * Return true if there are runnable processes of any priority on the run 410 * queue, false otherwise. Has no side effects, does not modify the run 411 * queue structure. 412 */ 413 int 414 runq_check(struct runq *rq) 415 { 416 struct rqbits *rqb; 417 int i; 418 419 rqb = &rq->rq_status; 420 for (i = 0; i < RQB_LEN; i++) 421 if (rqb->rqb_bits[i]) { 422 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d", 423 rqb->rqb_bits[i], i); 424 return (1); 425 } 426 CTR0(KTR_RUNQ, "runq_check: empty"); 427 428 return (0); 429 } 430 431 /* 432 * Find the highest priority process on the run queue. 433 */ 434 struct thread * 435 runq_choose_fuzz(struct runq *rq, int fuzz) 436 { 437 struct rqhead *rqh; 438 struct thread *td; 439 int pri; 440 441 while ((pri = runq_findbit(rq)) != -1) { 442 rqh = &rq->rq_queues[pri]; 443 /* fuzz == 1 is normal.. 0 or less are ignored */ 444 if (fuzz > 1) { 445 /* 446 * In the first couple of entries, check if 447 * there is one for our CPU as a preference. 448 */ 449 int count = fuzz; 450 int cpu = PCPU_GET(cpuid); 451 struct thread *td2; 452 td2 = td = TAILQ_FIRST(rqh); 453 454 while (count-- && td2) { 455 if (td2->td_lastcpu == cpu) { 456 td = td2; 457 break; 458 } 459 td2 = TAILQ_NEXT(td2, td_runq); 460 } 461 } else 462 td = TAILQ_FIRST(rqh); 463 KASSERT(td != NULL, ("runq_choose_fuzz: no proc on busy queue")); 464 CTR3(KTR_RUNQ, 465 "runq_choose_fuzz: pri=%d thread=%p rqh=%p", pri, td, rqh); 466 return (td); 467 } 468 CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc pri=%d", pri); 469 470 return (NULL); 471 } 472 473 /* 474 * Find the highest priority process on the run queue. 475 */ 476 struct thread * 477 runq_choose(struct runq *rq) 478 { 479 struct rqhead *rqh; 480 struct thread *td; 481 int pri; 482 483 while ((pri = runq_findbit(rq)) != -1) { 484 rqh = &rq->rq_queues[pri]; 485 td = TAILQ_FIRST(rqh); 486 KASSERT(td != NULL, ("runq_choose: no thread on busy queue")); 487 CTR3(KTR_RUNQ, 488 "runq_choose: pri=%d thread=%p rqh=%p", pri, td, rqh); 489 return (td); 490 } 491 CTR1(KTR_RUNQ, "runq_choose: idlethread pri=%d", pri); 492 493 return (NULL); 494 } 495 496 struct thread * 497 runq_choose_from(struct runq *rq, u_char idx) 498 { 499 struct rqhead *rqh; 500 struct thread *td; 501 int pri; 502 503 if ((pri = runq_findbit_from(rq, idx)) != -1) { 504 rqh = &rq->rq_queues[pri]; 505 td = TAILQ_FIRST(rqh); 506 KASSERT(td != NULL, ("runq_choose: no thread on busy queue")); 507 CTR4(KTR_RUNQ, 508 "runq_choose_from: pri=%d thread=%p idx=%d rqh=%p", 509 pri, td, td->td_rqindex, rqh); 510 return (td); 511 } 512 CTR1(KTR_RUNQ, "runq_choose_from: idlethread pri=%d", pri); 513 514 return (NULL); 515 } 516 /* 517 * Remove the thread from the queue specified by its priority, and clear the 518 * corresponding status bit if the queue becomes empty. 519 * Caller must set state afterwards. 520 */ 521 void 522 runq_remove(struct runq *rq, struct thread *td) 523 { 524 525 runq_remove_idx(rq, td, NULL); 526 } 527 528 void 529 runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx) 530 { 531 struct rqhead *rqh; 532 u_char pri; 533 534 KASSERT(td->td_flags & TDF_INMEM, 535 ("runq_remove_idx: thread swapped out")); 536 pri = td->td_rqindex; 537 KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri)); 538 rqh = &rq->rq_queues[pri]; 539 CTR4(KTR_RUNQ, "runq_remove_idx: td=%p, pri=%d %d rqh=%p", 540 td, td->td_priority, pri, rqh); 541 TAILQ_REMOVE(rqh, td, td_runq); 542 if (TAILQ_EMPTY(rqh)) { 543 CTR0(KTR_RUNQ, "runq_remove_idx: empty"); 544 runq_clrbit(rq, pri); 545 if (idx != NULL && *idx == pri) 546 *idx = (pri + 1) % RQ_NQS; 547 } 548 } 549