1 /* 2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /*** 28 Here is the logic.. 29 30 If there are N processors, then there are at most N KSEs (kernel 31 schedulable entities) working to process threads that belong to a 32 KSEGROUP (kg). If there are X of these KSEs actually running at the 33 moment in question, then there are at most M (N-X) of these KSEs on 34 the run queue, as running KSEs are not on the queue. 35 36 Runnable threads are queued off the KSEGROUP in priority order. 37 If there are M or more threads runnable, the top M threads 38 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take 39 their priority from those threads and are put on the run queue. 40 41 The last thread that had a priority high enough to have a KSE associated 42 with it, AND IS ON THE RUN QUEUE is pointed to by 43 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs 44 assigned as all the available KSEs are activly running, or because there 45 are no threads queued, that pointer is NULL. 46 47 When a KSE is removed from the run queue to become runnable, we know 48 it was associated with the highest priority thread in the queue (at the head 49 of the queue). If it is also the last assigned we know M was 1 and must 50 now be 0. Since the thread is no longer queued that pointer must be 51 removed from it. Since we know there were no more KSEs available, 52 (M was 1 and is now 0) and since we are not FREEING our KSE 53 but using it, we know there are STILL no more KSEs available, we can prove 54 that the next thread in the ksegrp list will not have a KSE to assign to 55 it, so we can show that the pointer must be made 'invalid' (NULL). 56 57 The pointer exists so that when a new thread is made runnable, it can 58 have its priority compared with the last assigned thread to see if 59 it should 'steal' its KSE or not.. i.e. is it 'earlier' 60 on the list than that thread or later.. If it's earlier, then the KSE is 61 removed from the last assigned (which is now not assigned a KSE) 62 and reassigned to the new thread, which is placed earlier in the list. 63 The pointer is then backed up to the previous thread (which may or may not 64 be the new thread). 65 66 When a thread sleeps or is removed, the KSE becomes available and if there 67 are queued threads that are not assigned KSEs, the highest priority one of 68 them is assigned the KSE, which is then placed back on the run queue at 69 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down 70 to point to it. 71 72 The following diagram shows 2 KSEs and 3 threads from a single process. 73 74 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads) 75 \ \____ 76 \ \ 77 KSEGROUP---thread--thread--thread (queued in priority order) 78 \ / 79 \_______________/ 80 (last_assigned) 81 82 The result of this scheme is that the M available KSEs are always 83 queued at the priorities they have inherrited from the M highest priority 84 threads for that KSEGROUP. If this situation changes, the KSEs are 85 reassigned to keep this true. 86 ***/ 87 88 #include <sys/cdefs.h> 89 __FBSDID("$FreeBSD$"); 90 91 #include "opt_sched.h" 92 93 #ifndef KERN_SWITCH_INCLUDE 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/kdb.h> 97 #include <sys/kernel.h> 98 #include <sys/ktr.h> 99 #include <sys/lock.h> 100 #include <sys/mutex.h> 101 #include <sys/proc.h> 102 #include <sys/queue.h> 103 #include <sys/sched.h> 104 #else /* KERN_SWITCH_INCLUDE */ 105 #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 106 #include <sys/smp.h> 107 #endif 108 #include <machine/critical.h> 109 #if defined(SMP) && defined(SCHED_4BSD) 110 #include <sys/sysctl.h> 111 #endif 112 113 #ifdef FULL_PREEMPTION 114 #ifndef PREEMPTION 115 #error "The FULL_PREEMPTION option requires the PREEMPTION option" 116 #endif 117 #endif 118 119 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); 120 121 #define td_kse td_sched 122 123 /************************************************************************ 124 * Functions that manipulate runnability from a thread perspective. * 125 ************************************************************************/ 126 /* 127 * Select the KSE that will be run next. From that find the thread, and 128 * remove it from the KSEGRP's run queue. If there is thread clustering, 129 * this will be what does it. 130 */ 131 struct thread * 132 choosethread(void) 133 { 134 struct kse *ke; 135 struct thread *td; 136 struct ksegrp *kg; 137 138 #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 139 if (smp_active == 0 && PCPU_GET(cpuid) != 0) { 140 /* Shutting down, run idlethread on AP's */ 141 td = PCPU_GET(idlethread); 142 ke = td->td_kse; 143 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 144 ke->ke_flags |= KEF_DIDRUN; 145 TD_SET_RUNNING(td); 146 return (td); 147 } 148 #endif 149 150 retry: 151 ke = sched_choose(); 152 if (ke) { 153 td = ke->ke_thread; 154 KASSERT((td->td_kse == ke), ("kse/thread mismatch")); 155 kg = ke->ke_ksegrp; 156 if (td->td_proc->p_flag & P_HADTHREADS) { 157 if (kg->kg_last_assigned == td) { 158 kg->kg_last_assigned = TAILQ_PREV(td, 159 threadqueue, td_runq); 160 } 161 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 162 kg->kg_runnable--; 163 } 164 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d", 165 td, td->td_priority); 166 } else { 167 /* Simulate runq_choose() having returned the idle thread */ 168 td = PCPU_GET(idlethread); 169 ke = td->td_kse; 170 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 171 } 172 ke->ke_flags |= KEF_DIDRUN; 173 174 /* 175 * If we are in panic, only allow system threads, 176 * plus the one we are running in, to be run. 177 */ 178 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 && 179 (td->td_flags & TDF_INPANIC) == 0)) { 180 /* note that it is no longer on the run queue */ 181 TD_SET_CAN_RUN(td); 182 goto retry; 183 } 184 185 TD_SET_RUNNING(td); 186 return (td); 187 } 188 189 /* 190 * Given a surplus system slot, try assign a new runnable thread to it. 191 * Called from: 192 * sched_thread_exit() (local) 193 * sched_switch() (local) 194 * sched_thread_exit() (local) 195 * remrunqueue() (local) (not at the moment) 196 */ 197 static void 198 slot_fill(struct ksegrp *kg) 199 { 200 struct thread *td; 201 202 mtx_assert(&sched_lock, MA_OWNED); 203 while (kg->kg_avail_opennings > 0) { 204 /* 205 * Find the first unassigned thread 206 */ 207 if ((td = kg->kg_last_assigned) != NULL) 208 td = TAILQ_NEXT(td, td_runq); 209 else 210 td = TAILQ_FIRST(&kg->kg_runq); 211 212 /* 213 * If we found one, send it to the system scheduler. 214 */ 215 if (td) { 216 kg->kg_last_assigned = td; 217 sched_add(td, SRQ_YIELDING); 218 CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg); 219 } else { 220 /* no threads to use up the slots. quit now */ 221 break; 222 } 223 } 224 } 225 226 #ifdef SCHED_4BSD 227 /* 228 * Remove a thread from its KSEGRP's run queue. 229 * This in turn may remove it from a KSE if it was already assigned 230 * to one, possibly causing a new thread to be assigned to the KSE 231 * and the KSE getting a new priority. 232 */ 233 static void 234 remrunqueue(struct thread *td) 235 { 236 struct thread *td2, *td3; 237 struct ksegrp *kg; 238 struct kse *ke; 239 240 mtx_assert(&sched_lock, MA_OWNED); 241 KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue")); 242 kg = td->td_ksegrp; 243 ke = td->td_kse; 244 CTR1(KTR_RUNQ, "remrunqueue: td%p", td); 245 TD_SET_CAN_RUN(td); 246 /* 247 * If it is not a threaded process, take the shortcut. 248 */ 249 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 250 /* remve from sys run queue and free up a slot */ 251 sched_rem(td); 252 ke->ke_state = KES_THREAD; 253 return; 254 } 255 td3 = TAILQ_PREV(td, threadqueue, td_runq); 256 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 257 kg->kg_runnable--; 258 if (ke->ke_state == KES_ONRUNQ) { 259 /* 260 * This thread has been assigned to the system run queue. 261 * We need to dissociate it and try assign the 262 * KSE to the next available thread. Then, we should 263 * see if we need to move the KSE in the run queues. 264 */ 265 sched_rem(td); 266 ke->ke_state = KES_THREAD; 267 td2 = kg->kg_last_assigned; 268 KASSERT((td2 != NULL), ("last assigned has wrong value")); 269 if (td2 == td) 270 kg->kg_last_assigned = td3; 271 /* slot_fill(kg); */ /* will replace it with another */ 272 } 273 } 274 #endif 275 276 /* 277 * Change the priority of a thread that is on the run queue. 278 */ 279 void 280 adjustrunqueue( struct thread *td, int newpri) 281 { 282 struct ksegrp *kg; 283 struct kse *ke; 284 285 mtx_assert(&sched_lock, MA_OWNED); 286 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue")); 287 288 ke = td->td_kse; 289 CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td); 290 /* 291 * If it is not a threaded process, take the shortcut. 292 */ 293 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 294 /* We only care about the kse in the run queue. */ 295 td->td_priority = newpri; 296 if (ke->ke_rqindex != (newpri / RQ_PPQ)) { 297 sched_rem(td); 298 sched_add(td, SRQ_BORING); 299 } 300 return; 301 } 302 303 /* It is a threaded process */ 304 kg = td->td_ksegrp; 305 if (ke->ke_state == KES_ONRUNQ) { 306 if (kg->kg_last_assigned == td) { 307 kg->kg_last_assigned = 308 TAILQ_PREV(td, threadqueue, td_runq); 309 } 310 sched_rem(td); 311 } 312 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 313 kg->kg_runnable--; 314 TD_SET_CAN_RUN(td); 315 td->td_priority = newpri; 316 setrunqueue(td, SRQ_BORING); 317 } 318 319 /* 320 * This function is called when a thread is about to be put on a 321 * ksegrp run queue because it has been made runnable or its 322 * priority has been adjusted and the ksegrp does not have a 323 * free kse slot. It determines if a thread from the same ksegrp 324 * should be preempted. If so, it tries to switch threads 325 * if the thread is on the same cpu or notifies another cpu that 326 * it should switch threads. 327 */ 328 329 static void 330 maybe_preempt_in_ksegrp(struct thread *td) 331 #if !defined(SMP) 332 { 333 struct thread *running_thread; 334 335 #ifndef FULL_PREEMPTION 336 int pri; 337 pri = td->td_priority; 338 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD)) 339 return; 340 #endif 341 mtx_assert(&sched_lock, MA_OWNED); 342 running_thread = curthread; 343 344 if (running_thread->td_ksegrp != td->td_ksegrp) 345 return; 346 347 if (td->td_priority > running_thread->td_priority) 348 return; 349 #ifdef PREEMPTION 350 if (running_thread->td_critnest > 1) 351 running_thread->td_pflags |= TDP_OWEPREEMPT; 352 else 353 mi_switch(SW_INVOL, NULL); 354 355 #else 356 running_thread->td_flags |= TDF_NEEDRESCHED; 357 #endif 358 return; 359 } 360 361 #else /* SMP */ 362 { 363 struct thread *running_thread; 364 int worst_pri; 365 struct ksegrp *kg; 366 cpumask_t cpumask,dontuse; 367 struct pcpu *pc; 368 struct pcpu *best_pcpu; 369 struct thread *cputhread; 370 371 #ifndef FULL_PREEMPTION 372 int pri; 373 pri = td->td_priority; 374 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD)) 375 return; 376 #endif 377 378 mtx_assert(&sched_lock, MA_OWNED); 379 380 running_thread = curthread; 381 382 #if !defined(KSEG_PEEMPT_BEST_CPU) 383 if (running_thread->td_ksegrp != td->td_ksegrp) { 384 #endif 385 kg = td->td_ksegrp; 386 387 /* if someone is ahead of this thread, wait our turn */ 388 if (td != TAILQ_FIRST(&kg->kg_runq)) 389 return; 390 391 worst_pri = td->td_priority; 392 best_pcpu = NULL; 393 dontuse = stopped_cpus | idle_cpus_mask; 394 395 /* 396 * Find a cpu with the worst priority that runs at thread from 397 * the same ksegrp - if multiple exist give first the last run 398 * cpu and then the current cpu priority 399 */ 400 401 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 402 cpumask = pc->pc_cpumask; 403 cputhread = pc->pc_curthread; 404 405 if ((cpumask & dontuse) || 406 cputhread->td_ksegrp != kg) 407 continue; 408 409 if (cputhread->td_priority > worst_pri) { 410 worst_pri = cputhread->td_priority; 411 best_pcpu = pc; 412 continue; 413 } 414 415 if (cputhread->td_priority == worst_pri && 416 best_pcpu != NULL && 417 (td->td_lastcpu == pc->pc_cpuid || 418 (PCPU_GET(cpumask) == cpumask && 419 td->td_lastcpu != best_pcpu->pc_cpuid))) 420 best_pcpu = pc; 421 } 422 423 /* Check if we need to preempt someone */ 424 if (best_pcpu == NULL) 425 return; 426 427 if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) { 428 best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED; 429 ipi_selected(best_pcpu->pc_cpumask, IPI_AST); 430 return; 431 } 432 #if !defined(KSEG_PEEMPT_BEST_CPU) 433 } 434 #endif 435 436 if (td->td_priority > running_thread->td_priority) 437 return; 438 #ifdef PREEMPTION 439 if (running_thread->td_critnest > 1) 440 running_thread->td_pflags |= TDP_OWEPREEMPT; 441 else 442 mi_switch(SW_INVOL, NULL); 443 444 #else 445 running_thread->td_flags |= TDF_NEEDRESCHED; 446 #endif 447 return; 448 } 449 #endif /* !SMP */ 450 451 452 int limitcount; 453 void 454 setrunqueue(struct thread *td, int flags) 455 { 456 struct ksegrp *kg; 457 struct thread *td2; 458 struct thread *tda; 459 460 CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d", 461 td, td->td_ksegrp, td->td_proc->p_pid); 462 mtx_assert(&sched_lock, MA_OWNED); 463 KASSERT((td->td_inhibitors == 0), 464 ("setrunqueue: trying to run inhibitted thread")); 465 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 466 ("setrunqueue: bad thread state")); 467 TD_SET_RUNQ(td); 468 kg = td->td_ksegrp; 469 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 470 /* 471 * Common path optimisation: Only one of everything 472 * and the KSE is always already attached. 473 * Totally ignore the ksegrp run queue. 474 */ 475 if (kg->kg_avail_opennings != 1) { 476 if (limitcount < 1) { 477 limitcount++; 478 printf("pid %d: corrected slot count (%d->1)\n", 479 td->td_proc->p_pid, kg->kg_avail_opennings); 480 481 } 482 kg->kg_avail_opennings = 1; 483 } 484 sched_add(td, flags); 485 return; 486 } 487 488 /* 489 * If the concurrency has reduced, and we would go in the 490 * assigned section, then keep removing entries from the 491 * system run queue, until we are not in that section 492 * or there is room for us to be put in that section. 493 * What we MUST avoid is the case where there are threads of less 494 * priority than the new one scheduled, but it can not 495 * be scheduled itself. That would lead to a non contiguous set 496 * of scheduled threads, and everything would break. 497 */ 498 tda = kg->kg_last_assigned; 499 while ((kg->kg_avail_opennings <= 0) && 500 (tda && (tda->td_priority > td->td_priority))) { 501 /* 502 * None free, but there is one we can commandeer. 503 */ 504 CTR2(KTR_RUNQ, 505 "setrunqueue: kg:%p: take slot from td: %p", kg, tda); 506 sched_rem(tda); 507 tda = kg->kg_last_assigned = 508 TAILQ_PREV(tda, threadqueue, td_runq); 509 } 510 511 /* 512 * Add the thread to the ksegrp's run queue at 513 * the appropriate place. 514 */ 515 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { 516 if (td2->td_priority > td->td_priority) { 517 kg->kg_runnable++; 518 TAILQ_INSERT_BEFORE(td2, td, td_runq); 519 break; 520 } 521 } 522 if (td2 == NULL) { 523 /* We ran off the end of the TAILQ or it was empty. */ 524 kg->kg_runnable++; 525 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq); 526 } 527 528 /* 529 * If we have a slot to use, then put the thread on the system 530 * run queue and if needed, readjust the last_assigned pointer. 531 * it may be that we need to schedule something anyhow 532 * even if the availabel slots are -ve so that 533 * all the items < last_assigned are scheduled. 534 */ 535 if (kg->kg_avail_opennings > 0) { 536 if (tda == NULL) { 537 /* 538 * No pre-existing last assigned so whoever is first 539 * gets the slot.. (maybe us) 540 */ 541 td2 = TAILQ_FIRST(&kg->kg_runq); 542 kg->kg_last_assigned = td2; 543 } else if (tda->td_priority > td->td_priority) { 544 td2 = td; 545 } else { 546 /* 547 * We are past last_assigned, so 548 * give the next slot to whatever is next, 549 * which may or may not be us. 550 */ 551 td2 = TAILQ_NEXT(tda, td_runq); 552 kg->kg_last_assigned = td2; 553 } 554 sched_add(td2, flags); 555 } else { 556 CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d", 557 td, td->td_ksegrp, td->td_proc->p_pid); 558 if ((flags & SRQ_YIELDING) == 0) 559 maybe_preempt_in_ksegrp(td); 560 } 561 } 562 563 /* 564 * Kernel thread preemption implementation. Critical sections mark 565 * regions of code in which preemptions are not allowed. 566 */ 567 void 568 critical_enter(void) 569 { 570 struct thread *td; 571 572 td = curthread; 573 if (td->td_critnest == 0) 574 cpu_critical_enter(td); 575 td->td_critnest++; 576 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td, 577 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest); 578 } 579 580 void 581 critical_exit(void) 582 { 583 struct thread *td; 584 585 td = curthread; 586 KASSERT(td->td_critnest != 0, 587 ("critical_exit: td_critnest == 0")); 588 if (td->td_critnest == 1) { 589 if (td->td_pflags & TDP_WAKEPROC0) { 590 td->td_pflags &= ~TDP_WAKEPROC0; 591 wakeup(&proc0); 592 } 593 #ifdef PREEMPTION 594 mtx_assert(&sched_lock, MA_NOTOWNED); 595 if (td->td_pflags & TDP_OWEPREEMPT) { 596 mtx_lock_spin(&sched_lock); 597 mi_switch(SW_INVOL, NULL); 598 mtx_unlock_spin(&sched_lock); 599 } 600 #endif 601 td->td_critnest = 0; 602 cpu_critical_exit(td); 603 } else { 604 td->td_critnest--; 605 } 606 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td, 607 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest); 608 } 609 610 /* 611 * This function is called when a thread is about to be put on run queue 612 * because it has been made runnable or its priority has been adjusted. It 613 * determines if the new thread should be immediately preempted to. If so, 614 * it switches to it and eventually returns true. If not, it returns false 615 * so that the caller may place the thread on an appropriate run queue. 616 */ 617 int 618 maybe_preempt(struct thread *td) 619 { 620 #ifdef PREEMPTION 621 struct thread *ctd; 622 int cpri, pri; 623 #endif 624 625 mtx_assert(&sched_lock, MA_OWNED); 626 #ifdef PREEMPTION 627 /* 628 * The new thread should not preempt the current thread if any of the 629 * following conditions are true: 630 * 631 * - The current thread has a higher (numerically lower) or 632 * equivalent priority. Note that this prevents curthread from 633 * trying to preempt to itself. 634 * - It is too early in the boot for context switches (cold is set). 635 * - The current thread has an inhibitor set or is in the process of 636 * exiting. In this case, the current thread is about to switch 637 * out anyways, so there's no point in preempting. If we did, 638 * the current thread would not be properly resumed as well, so 639 * just avoid that whole landmine. 640 * - If the new thread's priority is not a realtime priority and 641 * the current thread's priority is not an idle priority and 642 * FULL_PREEMPTION is disabled. 643 * 644 * If all of these conditions are false, but the current thread is in 645 * a nested critical section, then we have to defer the preemption 646 * until we exit the critical section. Otherwise, switch immediately 647 * to the new thread. 648 */ 649 ctd = curthread; 650 KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd), 651 ("thread has no (or wrong) sched-private part.")); 652 KASSERT((td->td_inhibitors == 0), 653 ("maybe_preempt: trying to run inhibitted thread")); 654 pri = td->td_priority; 655 cpri = ctd->td_priority; 656 if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) || 657 td->td_kse->ke_state != KES_THREAD) 658 return (0); 659 #ifndef FULL_PREEMPTION 660 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) && 661 !(cpri >= PRI_MIN_IDLE)) 662 return (0); 663 #endif 664 if (ctd->td_critnest > 1) { 665 CTR1(KTR_PROC, "maybe_preempt: in critical section %d", 666 ctd->td_critnest); 667 ctd->td_pflags |= TDP_OWEPREEMPT; 668 return (0); 669 } 670 671 /* 672 * Thread is runnable but not yet put on system run queue. 673 */ 674 MPASS(TD_ON_RUNQ(td)); 675 MPASS(td->td_sched->ke_state != KES_ONRUNQ); 676 if (td->td_proc->p_flag & P_HADTHREADS) { 677 /* 678 * If this is a threaded process we actually ARE on the 679 * ksegrp run queue so take it off that first. 680 * Also undo any damage done to the last_assigned pointer. 681 * XXX Fix setrunqueue so this isn't needed 682 */ 683 struct ksegrp *kg; 684 685 kg = td->td_ksegrp; 686 if (kg->kg_last_assigned == td) 687 kg->kg_last_assigned = 688 TAILQ_PREV(td, threadqueue, td_runq); 689 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 690 } 691 692 TD_SET_RUNNING(td); 693 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, 694 td->td_proc->p_pid, td->td_proc->p_comm); 695 mi_switch(SW_INVOL|SW_PREEMPT, td); 696 return (1); 697 #else 698 return (0); 699 #endif 700 } 701 702 #if 0 703 #ifndef PREEMPTION 704 /* XXX: There should be a non-static version of this. */ 705 static void 706 printf_caddr_t(void *data) 707 { 708 printf("%s", (char *)data); 709 } 710 static char preempt_warning[] = 711 "WARNING: Kernel preemption is disabled, expect reduced performance.\n"; 712 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t, 713 preempt_warning) 714 #endif 715 #endif 716 717 /************************************************************************ 718 * SYSTEM RUN QUEUE manipulations and tests * 719 ************************************************************************/ 720 /* 721 * Initialize a run structure. 722 */ 723 void 724 runq_init(struct runq *rq) 725 { 726 int i; 727 728 bzero(rq, sizeof *rq); 729 for (i = 0; i < RQ_NQS; i++) 730 TAILQ_INIT(&rq->rq_queues[i]); 731 } 732 733 /* 734 * Clear the status bit of the queue corresponding to priority level pri, 735 * indicating that it is empty. 736 */ 737 static __inline void 738 runq_clrbit(struct runq *rq, int pri) 739 { 740 struct rqbits *rqb; 741 742 rqb = &rq->rq_status; 743 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d", 744 rqb->rqb_bits[RQB_WORD(pri)], 745 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri), 746 RQB_BIT(pri), RQB_WORD(pri)); 747 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri); 748 } 749 750 /* 751 * Find the index of the first non-empty run queue. This is done by 752 * scanning the status bits, a set bit indicates a non-empty queue. 753 */ 754 static __inline int 755 runq_findbit(struct runq *rq) 756 { 757 struct rqbits *rqb; 758 int pri; 759 int i; 760 761 rqb = &rq->rq_status; 762 for (i = 0; i < RQB_LEN; i++) 763 if (rqb->rqb_bits[i]) { 764 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW); 765 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d", 766 rqb->rqb_bits[i], i, pri); 767 return (pri); 768 } 769 770 return (-1); 771 } 772 773 /* 774 * Set the status bit of the queue corresponding to priority level pri, 775 * indicating that it is non-empty. 776 */ 777 static __inline void 778 runq_setbit(struct runq *rq, int pri) 779 { 780 struct rqbits *rqb; 781 782 rqb = &rq->rq_status; 783 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d", 784 rqb->rqb_bits[RQB_WORD(pri)], 785 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri), 786 RQB_BIT(pri), RQB_WORD(pri)); 787 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri); 788 } 789 790 /* 791 * Add the KSE to the queue specified by its priority, and set the 792 * corresponding status bit. 793 */ 794 void 795 runq_add(struct runq *rq, struct kse *ke, int flags) 796 { 797 struct rqhead *rqh; 798 int pri; 799 800 pri = ke->ke_thread->td_priority / RQ_PPQ; 801 ke->ke_rqindex = pri; 802 runq_setbit(rq, pri); 803 rqh = &rq->rq_queues[pri]; 804 CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p", 805 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); 806 if (flags & SRQ_PREEMPTED) { 807 TAILQ_INSERT_HEAD(rqh, ke, ke_procq); 808 } else { 809 TAILQ_INSERT_TAIL(rqh, ke, ke_procq); 810 } 811 } 812 813 /* 814 * Return true if there are runnable processes of any priority on the run 815 * queue, false otherwise. Has no side effects, does not modify the run 816 * queue structure. 817 */ 818 int 819 runq_check(struct runq *rq) 820 { 821 struct rqbits *rqb; 822 int i; 823 824 rqb = &rq->rq_status; 825 for (i = 0; i < RQB_LEN; i++) 826 if (rqb->rqb_bits[i]) { 827 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d", 828 rqb->rqb_bits[i], i); 829 return (1); 830 } 831 CTR0(KTR_RUNQ, "runq_check: empty"); 832 833 return (0); 834 } 835 836 #if defined(SMP) && defined(SCHED_4BSD) 837 int runq_fuzz = 1; 838 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, ""); 839 #endif 840 841 /* 842 * Find the highest priority process on the run queue. 843 */ 844 struct kse * 845 runq_choose(struct runq *rq) 846 { 847 struct rqhead *rqh; 848 struct kse *ke; 849 int pri; 850 851 mtx_assert(&sched_lock, MA_OWNED); 852 while ((pri = runq_findbit(rq)) != -1) { 853 rqh = &rq->rq_queues[pri]; 854 #if defined(SMP) && defined(SCHED_4BSD) 855 /* fuzz == 1 is normal.. 0 or less are ignored */ 856 if (runq_fuzz > 1) { 857 /* 858 * In the first couple of entries, check if 859 * there is one for our CPU as a preference. 860 */ 861 int count = runq_fuzz; 862 int cpu = PCPU_GET(cpuid); 863 struct kse *ke2; 864 ke2 = ke = TAILQ_FIRST(rqh); 865 866 while (count-- && ke2) { 867 if (ke->ke_thread->td_lastcpu == cpu) { 868 ke = ke2; 869 break; 870 } 871 ke2 = TAILQ_NEXT(ke2, ke_procq); 872 } 873 } else 874 #endif 875 ke = TAILQ_FIRST(rqh); 876 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue")); 877 CTR3(KTR_RUNQ, 878 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh); 879 return (ke); 880 } 881 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri); 882 883 return (NULL); 884 } 885 886 /* 887 * Remove the KSE from the queue specified by its priority, and clear the 888 * corresponding status bit if the queue becomes empty. 889 * Caller must set ke->ke_state afterwards. 890 */ 891 void 892 runq_remove(struct runq *rq, struct kse *ke) 893 { 894 struct rqhead *rqh; 895 int pri; 896 897 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 898 ("runq_remove: process swapped out")); 899 pri = ke->ke_rqindex; 900 rqh = &rq->rq_queues[pri]; 901 CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p", 902 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); 903 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue")); 904 TAILQ_REMOVE(rqh, ke, ke_procq); 905 if (TAILQ_EMPTY(rqh)) { 906 CTR0(KTR_RUNQ, "runq_remove: empty"); 907 runq_clrbit(rq, pri); 908 } 909 } 910 911 /****** functions that are temporarily here ***********/ 912 #include <vm/uma.h> 913 extern struct mtx kse_zombie_lock; 914 915 /* 916 * Allocate scheduler specific per-process resources. 917 * The thread and ksegrp have already been linked in. 918 * In this case just set the default concurrency value. 919 * 920 * Called from: 921 * proc_init() (UMA init method) 922 */ 923 void 924 sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td) 925 { 926 927 /* This can go in sched_fork */ 928 sched_init_concurrency(kg); 929 } 930 931 /* 932 * thread is being either created or recycled. 933 * Fix up the per-scheduler resources associated with it. 934 * Called from: 935 * sched_fork_thread() 936 * thread_dtor() (*may go away) 937 * thread_init() (*may go away) 938 */ 939 void 940 sched_newthread(struct thread *td) 941 { 942 struct td_sched *ke; 943 944 ke = (struct td_sched *) (td + 1); 945 bzero(ke, sizeof(*ke)); 946 td->td_sched = ke; 947 ke->ke_thread = td; 948 ke->ke_oncpu = NOCPU; 949 ke->ke_state = KES_THREAD; 950 } 951 952 /* 953 * Set up an initial concurrency of 1 954 * and set the given thread (if given) to be using that 955 * concurrency slot. 956 * May be used "offline"..before the ksegrp is attached to the world 957 * and thus wouldn't need schedlock in that case. 958 * Called from: 959 * thr_create() 960 * proc_init() (UMA) via sched_newproc() 961 */ 962 void 963 sched_init_concurrency(struct ksegrp *kg) 964 { 965 966 CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg); 967 kg->kg_concurrency = 1; 968 kg->kg_avail_opennings = 1; 969 } 970 971 /* 972 * Change the concurrency of an existing ksegrp to N 973 * Called from: 974 * kse_create() 975 * kse_exit() 976 * thread_exit() 977 * thread_single() 978 */ 979 void 980 sched_set_concurrency(struct ksegrp *kg, int concurrency) 981 { 982 983 CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d", 984 kg, 985 concurrency, 986 kg->kg_avail_opennings, 987 kg->kg_avail_opennings + (concurrency - kg->kg_concurrency)); 988 kg->kg_avail_opennings += (concurrency - kg->kg_concurrency); 989 kg->kg_concurrency = concurrency; 990 } 991 992 /* 993 * Called from thread_exit() for all exiting thread 994 * 995 * Not to be confused with sched_exit_thread() 996 * that is only called from thread_exit() for threads exiting 997 * without the rest of the process exiting because it is also called from 998 * sched_exit() and we wouldn't want to call it twice. 999 * XXX This can probably be fixed. 1000 */ 1001 void 1002 sched_thread_exit(struct thread *td) 1003 { 1004 1005 SLOT_RELEASE(td->td_ksegrp); 1006 slot_fill(td->td_ksegrp); 1007 } 1008 1009 #endif /* KERN_SWITCH_INCLUDE */ 1010