1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_sched.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kdb.h> 37 #include <sys/kernel.h> 38 #include <sys/ktr.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/proc.h> 42 #include <sys/queue.h> 43 #include <sys/sched.h> 44 #include <sys/smp.h> 45 #include <sys/sysctl.h> 46 47 #include <machine/cpu.h> 48 49 /* Uncomment this to enable logging of critical_enter/exit. */ 50 #if 0 51 #define KTR_CRITICAL KTR_SCHED 52 #else 53 #define KTR_CRITICAL 0 54 #endif 55 56 #ifdef FULL_PREEMPTION 57 #ifndef PREEMPTION 58 #error "The FULL_PREEMPTION option requires the PREEMPTION option" 59 #endif 60 #endif 61 62 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); 63 64 /* 65 * kern.sched.preemption allows user space to determine if preemption support 66 * is compiled in or not. It is not currently a boot or runtime flag that 67 * can be changed. 68 */ 69 #ifdef PREEMPTION 70 static int kern_sched_preemption = 1; 71 #else 72 static int kern_sched_preemption = 0; 73 #endif 74 SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD, 75 &kern_sched_preemption, 0, "Kernel preemption enabled"); 76 77 /* 78 * Support for scheduler stats exported via kern.sched.stats. All stats may 79 * be reset with kern.sched.stats.reset = 1. Stats may be defined elsewhere 80 * with SCHED_STAT_DEFINE(). 81 */ 82 #ifdef SCHED_STATS 83 SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 84 "switch stats"); 85 86 /* Switch reasons from mi_switch(9). */ 87 DPCPU_DEFINE(long, sched_switch_stats[SWT_COUNT]); 88 SCHED_STAT_DEFINE_VAR(owepreempt, 89 &DPCPU_NAME(sched_switch_stats[SWT_OWEPREEMPT]), ""); 90 SCHED_STAT_DEFINE_VAR(turnstile, 91 &DPCPU_NAME(sched_switch_stats[SWT_TURNSTILE]), ""); 92 SCHED_STAT_DEFINE_VAR(sleepq, 93 &DPCPU_NAME(sched_switch_stats[SWT_SLEEPQ]), ""); 94 SCHED_STAT_DEFINE_VAR(relinquish, 95 &DPCPU_NAME(sched_switch_stats[SWT_RELINQUISH]), ""); 96 SCHED_STAT_DEFINE_VAR(needresched, 97 &DPCPU_NAME(sched_switch_stats[SWT_NEEDRESCHED]), ""); 98 SCHED_STAT_DEFINE_VAR(idle, 99 &DPCPU_NAME(sched_switch_stats[SWT_IDLE]), ""); 100 SCHED_STAT_DEFINE_VAR(iwait, 101 &DPCPU_NAME(sched_switch_stats[SWT_IWAIT]), ""); 102 SCHED_STAT_DEFINE_VAR(suspend, 103 &DPCPU_NAME(sched_switch_stats[SWT_SUSPEND]), ""); 104 SCHED_STAT_DEFINE_VAR(remotepreempt, 105 &DPCPU_NAME(sched_switch_stats[SWT_REMOTEPREEMPT]), ""); 106 SCHED_STAT_DEFINE_VAR(remotewakeidle, 107 &DPCPU_NAME(sched_switch_stats[SWT_REMOTEWAKEIDLE]), ""); 108 SCHED_STAT_DEFINE_VAR(bind, 109 &DPCPU_NAME(sched_switch_stats[SWT_BIND]), ""); 110 111 static int 112 sysctl_stats_reset(SYSCTL_HANDLER_ARGS) 113 { 114 struct sysctl_oid *p; 115 uintptr_t counter; 116 int error; 117 int val; 118 int i; 119 120 val = 0; 121 error = sysctl_handle_int(oidp, &val, 0, req); 122 if (error != 0 || req->newptr == NULL) 123 return (error); 124 if (val == 0) 125 return (0); 126 /* 127 * Traverse the list of children of _kern_sched_stats and reset each 128 * to 0. Skip the reset entry. 129 */ 130 RB_FOREACH(p, sysctl_oid_list, oidp->oid_parent) { 131 if (p == oidp || p->oid_arg1 == NULL) 132 continue; 133 counter = (uintptr_t)p->oid_arg1; 134 CPU_FOREACH(i) { 135 *(long *)(dpcpu_off[i] + counter) = 0; 136 } 137 } 138 return (0); 139 } 140 141 SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, 142 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_MPSAFE, NULL, 0, 143 sysctl_stats_reset, "I", 144 "Reset scheduler statistics"); 145 #endif 146 147 /************************************************************************ 148 * Functions that manipulate runnability from a thread perspective. * 149 ************************************************************************/ 150 /* 151 * Select the thread that will be run next. 152 */ 153 154 static __noinline struct thread * 155 choosethread_panic(struct thread *td) 156 { 157 158 /* 159 * If we are in panic, only allow system threads, 160 * plus the one we are running in, to be run. 161 */ 162 retry: 163 if (((td->td_proc->p_flag & P_SYSTEM) == 0 && 164 (td->td_flags & TDF_INPANIC) == 0)) { 165 /* note that it is no longer on the run queue */ 166 TD_SET_CAN_RUN(td); 167 td = sched_choose(); 168 goto retry; 169 } 170 171 TD_SET_RUNNING(td); 172 return (td); 173 } 174 175 struct thread * 176 choosethread(void) 177 { 178 struct thread *td; 179 180 td = sched_choose(); 181 182 if (KERNEL_PANICKED()) 183 return (choosethread_panic(td)); 184 185 TD_SET_RUNNING(td); 186 return (td); 187 } 188 189 /* 190 * Kernel thread preemption implementation. Critical sections mark 191 * regions of code in which preemptions are not allowed. 192 * 193 * It might seem a good idea to inline critical_enter() but, in order 194 * to prevent instructions reordering by the compiler, a __compiler_membar() 195 * would have to be used here (the same as sched_pin()). The performance 196 * penalty imposed by the membar could, then, produce slower code than 197 * the function call itself, for most cases. 198 */ 199 void 200 critical_enter_KBI(void) 201 { 202 #ifdef KTR 203 struct thread *td = curthread; 204 #endif 205 critical_enter(); 206 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td, 207 (long)td->td_proc->p_pid, td->td_name, td->td_critnest); 208 } 209 210 void __noinline 211 critical_exit_preempt(void) 212 { 213 struct thread *td; 214 int flags; 215 216 /* 217 * If td_critnest is 0, it is possible that we are going to get 218 * preempted again before reaching the code below. This happens 219 * rarely and is harmless. However, this means td_owepreempt may 220 * now be unset. 221 */ 222 td = curthread; 223 if (td->td_critnest != 0) 224 return; 225 if (kdb_active) 226 return; 227 228 /* 229 * Microoptimization: we committed to switch, 230 * disable preemption in interrupt handlers 231 * while spinning for the thread lock. 232 */ 233 td->td_critnest = 1; 234 thread_lock(td); 235 td->td_critnest--; 236 flags = SW_INVOL | SW_PREEMPT; 237 if (TD_IS_IDLETHREAD(td)) 238 flags |= SWT_IDLE; 239 else 240 flags |= SWT_OWEPREEMPT; 241 mi_switch(flags); 242 } 243 244 void 245 critical_exit_KBI(void) 246 { 247 #ifdef KTR 248 struct thread *td = curthread; 249 #endif 250 critical_exit(); 251 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td, 252 (long)td->td_proc->p_pid, td->td_name, td->td_critnest); 253 } 254 255 /************************************************************************ 256 * SYSTEM RUN QUEUE manipulations and tests * 257 ************************************************************************/ 258 /* 259 * Initialize a run structure. 260 */ 261 void 262 runq_init(struct runq *rq) 263 { 264 int i; 265 266 bzero(rq, sizeof *rq); 267 for (i = 0; i < RQ_NQS; i++) 268 TAILQ_INIT(&rq->rq_queues[i]); 269 } 270 271 /* 272 * Clear the status bit of the queue corresponding to priority level pri, 273 * indicating that it is empty. 274 */ 275 static __inline void 276 runq_clrbit(struct runq *rq, int pri) 277 { 278 struct rqbits *rqb; 279 280 rqb = &rq->rq_status; 281 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d", 282 rqb->rqb_bits[RQB_WORD(pri)], 283 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri), 284 RQB_BIT(pri), RQB_WORD(pri)); 285 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri); 286 } 287 288 /* 289 * Find the index of the first non-empty run queue. This is done by 290 * scanning the status bits, a set bit indicates a non-empty queue. 291 */ 292 static __inline int 293 runq_findbit(struct runq *rq) 294 { 295 struct rqbits *rqb; 296 int pri; 297 int i; 298 299 rqb = &rq->rq_status; 300 for (i = 0; i < RQB_LEN; i++) 301 if (rqb->rqb_bits[i]) { 302 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW); 303 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d", 304 rqb->rqb_bits[i], i, pri); 305 return (pri); 306 } 307 308 return (-1); 309 } 310 311 static __inline int 312 runq_findbit_from(struct runq *rq, u_char pri) 313 { 314 struct rqbits *rqb; 315 rqb_word_t mask; 316 int i; 317 318 /* 319 * Set the mask for the first word so we ignore priorities before 'pri'. 320 */ 321 mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1)); 322 rqb = &rq->rq_status; 323 again: 324 for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) { 325 mask = rqb->rqb_bits[i] & mask; 326 if (mask == 0) 327 continue; 328 pri = RQB_FFS(mask) + (i << RQB_L2BPW); 329 CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d", 330 mask, i, pri); 331 return (pri); 332 } 333 if (pri == 0) 334 return (-1); 335 /* 336 * Wrap back around to the beginning of the list just once so we 337 * scan the whole thing. 338 */ 339 pri = 0; 340 goto again; 341 } 342 343 /* 344 * Set the status bit of the queue corresponding to priority level pri, 345 * indicating that it is non-empty. 346 */ 347 static __inline void 348 runq_setbit(struct runq *rq, int pri) 349 { 350 struct rqbits *rqb; 351 352 rqb = &rq->rq_status; 353 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d", 354 rqb->rqb_bits[RQB_WORD(pri)], 355 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri), 356 RQB_BIT(pri), RQB_WORD(pri)); 357 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri); 358 } 359 360 /* 361 * Add the thread to the queue specified by its priority, and set the 362 * corresponding status bit. 363 */ 364 void 365 runq_add(struct runq *rq, struct thread *td, int flags) 366 { 367 struct rqhead *rqh; 368 int pri; 369 370 pri = td->td_priority / RQ_PPQ; 371 td->td_rqindex = pri; 372 runq_setbit(rq, pri); 373 rqh = &rq->rq_queues[pri]; 374 CTR4(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p", 375 td, td->td_priority, pri, rqh); 376 if (flags & SRQ_PREEMPTED) { 377 TAILQ_INSERT_HEAD(rqh, td, td_runq); 378 } else { 379 TAILQ_INSERT_TAIL(rqh, td, td_runq); 380 } 381 } 382 383 void 384 runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags) 385 { 386 struct rqhead *rqh; 387 388 KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri)); 389 td->td_rqindex = pri; 390 runq_setbit(rq, pri); 391 rqh = &rq->rq_queues[pri]; 392 CTR4(KTR_RUNQ, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p", 393 td, td->td_priority, pri, rqh); 394 if (flags & SRQ_PREEMPTED) { 395 TAILQ_INSERT_HEAD(rqh, td, td_runq); 396 } else { 397 TAILQ_INSERT_TAIL(rqh, td, td_runq); 398 } 399 } 400 /* 401 * Return true if there are runnable processes of any priority on the run 402 * queue, false otherwise. Has no side effects, does not modify the run 403 * queue structure. 404 */ 405 int 406 runq_check(struct runq *rq) 407 { 408 struct rqbits *rqb; 409 int i; 410 411 rqb = &rq->rq_status; 412 for (i = 0; i < RQB_LEN; i++) 413 if (rqb->rqb_bits[i]) { 414 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d", 415 rqb->rqb_bits[i], i); 416 return (1); 417 } 418 CTR0(KTR_RUNQ, "runq_check: empty"); 419 420 return (0); 421 } 422 423 /* 424 * Find the highest priority process on the run queue. 425 */ 426 struct thread * 427 runq_choose_fuzz(struct runq *rq, int fuzz) 428 { 429 struct rqhead *rqh; 430 struct thread *td; 431 int pri; 432 433 while ((pri = runq_findbit(rq)) != -1) { 434 rqh = &rq->rq_queues[pri]; 435 /* fuzz == 1 is normal.. 0 or less are ignored */ 436 if (fuzz > 1) { 437 /* 438 * In the first couple of entries, check if 439 * there is one for our CPU as a preference. 440 */ 441 int count = fuzz; 442 int cpu = PCPU_GET(cpuid); 443 struct thread *td2; 444 td2 = td = TAILQ_FIRST(rqh); 445 446 while (count-- && td2) { 447 if (td2->td_lastcpu == cpu) { 448 td = td2; 449 break; 450 } 451 td2 = TAILQ_NEXT(td2, td_runq); 452 } 453 } else 454 td = TAILQ_FIRST(rqh); 455 KASSERT(td != NULL, ("runq_choose_fuzz: no proc on busy queue")); 456 CTR3(KTR_RUNQ, 457 "runq_choose_fuzz: pri=%d thread=%p rqh=%p", pri, td, rqh); 458 return (td); 459 } 460 CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc pri=%d", pri); 461 462 return (NULL); 463 } 464 465 /* 466 * Find the highest priority process on the run queue. 467 */ 468 struct thread * 469 runq_choose(struct runq *rq) 470 { 471 struct rqhead *rqh; 472 struct thread *td; 473 int pri; 474 475 while ((pri = runq_findbit(rq)) != -1) { 476 rqh = &rq->rq_queues[pri]; 477 td = TAILQ_FIRST(rqh); 478 KASSERT(td != NULL, ("runq_choose: no thread on busy queue")); 479 CTR3(KTR_RUNQ, 480 "runq_choose: pri=%d thread=%p rqh=%p", pri, td, rqh); 481 return (td); 482 } 483 CTR1(KTR_RUNQ, "runq_choose: idlethread pri=%d", pri); 484 485 return (NULL); 486 } 487 488 struct thread * 489 runq_choose_from(struct runq *rq, u_char idx) 490 { 491 struct rqhead *rqh; 492 struct thread *td; 493 int pri; 494 495 if ((pri = runq_findbit_from(rq, idx)) != -1) { 496 rqh = &rq->rq_queues[pri]; 497 td = TAILQ_FIRST(rqh); 498 KASSERT(td != NULL, ("runq_choose: no thread on busy queue")); 499 CTR4(KTR_RUNQ, 500 "runq_choose_from: pri=%d thread=%p idx=%d rqh=%p", 501 pri, td, td->td_rqindex, rqh); 502 return (td); 503 } 504 CTR1(KTR_RUNQ, "runq_choose_from: idlethread pri=%d", pri); 505 506 return (NULL); 507 } 508 /* 509 * Remove the thread from the queue specified by its priority, and clear the 510 * corresponding status bit if the queue becomes empty. 511 * Caller must set state afterwards. 512 */ 513 void 514 runq_remove(struct runq *rq, struct thread *td) 515 { 516 517 runq_remove_idx(rq, td, NULL); 518 } 519 520 void 521 runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx) 522 { 523 struct rqhead *rqh; 524 u_char pri; 525 526 KASSERT(td->td_flags & TDF_INMEM, 527 ("runq_remove_idx: thread swapped out")); 528 pri = td->td_rqindex; 529 KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri)); 530 rqh = &rq->rq_queues[pri]; 531 CTR4(KTR_RUNQ, "runq_remove_idx: td=%p, pri=%d %d rqh=%p", 532 td, td->td_priority, pri, rqh); 533 TAILQ_REMOVE(rqh, td, td_runq); 534 if (TAILQ_EMPTY(rqh)) { 535 CTR0(KTR_RUNQ, "runq_remove_idx: empty"); 536 runq_clrbit(rq, pri); 537 if (idx != NULL && *idx == pri) 538 *idx = (pri + 1) % RQ_NQS; 539 } 540 } 541