1 /* 2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /*** 28 Here is the logic.. 29 30 If there are N processors, then there are at most N KSEs (kernel 31 schedulable entities) working to process threads that belong to a 32 KSEGROUP (kg). If there are X of these KSEs actually running at the 33 moment in question, then there are at most M (N-X) of these KSEs on 34 the run queue, as running KSEs are not on the queue. 35 36 Runnable threads are queued off the KSEGROUP in priority order. 37 If there are M or more threads runnable, the top M threads 38 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take 39 their priority from those threads and are put on the run queue. 40 41 The last thread that had a priority high enough to have a KSE associated 42 with it, AND IS ON THE RUN QUEUE is pointed to by 43 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs 44 assigned as all the available KSEs are activly running, or because there 45 are no threads queued, that pointer is NULL. 46 47 When a KSE is removed from the run queue to become runnable, we know 48 it was associated with the highest priority thread in the queue (at the head 49 of the queue). If it is also the last assigned we know M was 1 and must 50 now be 0. Since the thread is no longer queued that pointer must be 51 removed from it. Since we know there were no more KSEs available, 52 (M was 1 and is now 0) and since we are not FREEING our KSE 53 but using it, we know there are STILL no more KSEs available, we can prove 54 that the next thread in the ksegrp list will not have a KSE to assign to 55 it, so we can show that the pointer must be made 'invalid' (NULL). 56 57 The pointer exists so that when a new thread is made runnable, it can 58 have its priority compared with the last assigned thread to see if 59 it should 'steal' its KSE or not.. i.e. is it 'earlier' 60 on the list than that thread or later.. If it's earlier, then the KSE is 61 removed from the last assigned (which is now not assigned a KSE) 62 and reassigned to the new thread, which is placed earlier in the list. 63 The pointer is then backed up to the previous thread (which may or may not 64 be the new thread). 65 66 When a thread sleeps or is removed, the KSE becomes available and if there 67 are queued threads that are not assigned KSEs, the highest priority one of 68 them is assigned the KSE, which is then placed back on the run queue at 69 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down 70 to point to it. 71 72 The following diagram shows 2 KSEs and 3 threads from a single process. 73 74 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads) 75 \ \____ 76 \ \ 77 KSEGROUP---thread--thread--thread (queued in priority order) 78 \ / 79 \_______________/ 80 (last_assigned) 81 82 The result of this scheme is that the M available KSEs are always 83 queued at the priorities they have inherrited from the M highest priority 84 threads for that KSEGROUP. If this situation changes, the KSEs are 85 reassigned to keep this true. 86 ***/ 87 88 #include <sys/cdefs.h> 89 __FBSDID("$FreeBSD$"); 90 91 #include "opt_sched.h" 92 93 #ifndef KERN_SWITCH_INCLUDE 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/kdb.h> 97 #include <sys/kernel.h> 98 #include <sys/ktr.h> 99 #include <sys/lock.h> 100 #include <sys/mutex.h> 101 #include <sys/proc.h> 102 #include <sys/queue.h> 103 #include <sys/sched.h> 104 #else /* KERN_SWITCH_INCLUDE */ 105 #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 106 #include <sys/smp.h> 107 #endif 108 #include <machine/critical.h> 109 #if defined(SMP) && defined(SCHED_4BSD) 110 #include <sys/sysctl.h> 111 #endif 112 113 #ifdef FULL_PREEMPTION 114 #ifndef PREEMPTION 115 #error "The FULL_PREEMPTION option requires the PREEMPTION option" 116 #endif 117 #endif 118 119 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); 120 121 #define td_kse td_sched 122 123 /************************************************************************ 124 * Functions that manipulate runnability from a thread perspective. * 125 ************************************************************************/ 126 /* 127 * Select the KSE that will be run next. From that find the thread, and 128 * remove it from the KSEGRP's run queue. If there is thread clustering, 129 * this will be what does it. 130 */ 131 struct thread * 132 choosethread(void) 133 { 134 struct kse *ke; 135 struct thread *td; 136 struct ksegrp *kg; 137 138 #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 139 if (smp_active == 0 && PCPU_GET(cpuid) != 0) { 140 /* Shutting down, run idlethread on AP's */ 141 td = PCPU_GET(idlethread); 142 ke = td->td_kse; 143 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 144 ke->ke_flags |= KEF_DIDRUN; 145 TD_SET_RUNNING(td); 146 return (td); 147 } 148 #endif 149 150 retry: 151 ke = sched_choose(); 152 if (ke) { 153 td = ke->ke_thread; 154 KASSERT((td->td_kse == ke), ("kse/thread mismatch")); 155 kg = ke->ke_ksegrp; 156 if (td->td_proc->p_flag & P_HADTHREADS) { 157 if (kg->kg_last_assigned == td) { 158 kg->kg_last_assigned = TAILQ_PREV(td, 159 threadqueue, td_runq); 160 } 161 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 162 } 163 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d", 164 td, td->td_priority); 165 } else { 166 /* Simulate runq_choose() having returned the idle thread */ 167 td = PCPU_GET(idlethread); 168 ke = td->td_kse; 169 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 170 } 171 ke->ke_flags |= KEF_DIDRUN; 172 173 /* 174 * If we are in panic, only allow system threads, 175 * plus the one we are running in, to be run. 176 */ 177 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 && 178 (td->td_flags & TDF_INPANIC) == 0)) { 179 /* note that it is no longer on the run queue */ 180 TD_SET_CAN_RUN(td); 181 goto retry; 182 } 183 184 TD_SET_RUNNING(td); 185 return (td); 186 } 187 188 /* 189 * Given a surplus system slot, try assign a new runnable thread to it. 190 * Called from: 191 * sched_thread_exit() (local) 192 * sched_switch() (local) 193 * sched_thread_exit() (local) 194 * remrunqueue() (local) (not at the moment) 195 */ 196 static void 197 slot_fill(struct ksegrp *kg) 198 { 199 struct thread *td; 200 201 mtx_assert(&sched_lock, MA_OWNED); 202 while (kg->kg_avail_opennings > 0) { 203 /* 204 * Find the first unassigned thread 205 */ 206 if ((td = kg->kg_last_assigned) != NULL) 207 td = TAILQ_NEXT(td, td_runq); 208 else 209 td = TAILQ_FIRST(&kg->kg_runq); 210 211 /* 212 * If we found one, send it to the system scheduler. 213 */ 214 if (td) { 215 kg->kg_last_assigned = td; 216 sched_add(td, SRQ_YIELDING); 217 CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg); 218 } else { 219 /* no threads to use up the slots. quit now */ 220 break; 221 } 222 } 223 } 224 225 #ifdef SCHED_4BSD 226 /* 227 * Remove a thread from its KSEGRP's run queue. 228 * This in turn may remove it from a KSE if it was already assigned 229 * to one, possibly causing a new thread to be assigned to the KSE 230 * and the KSE getting a new priority. 231 */ 232 static void 233 remrunqueue(struct thread *td) 234 { 235 struct thread *td2, *td3; 236 struct ksegrp *kg; 237 struct kse *ke; 238 239 mtx_assert(&sched_lock, MA_OWNED); 240 KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue")); 241 kg = td->td_ksegrp; 242 ke = td->td_kse; 243 CTR1(KTR_RUNQ, "remrunqueue: td%p", td); 244 TD_SET_CAN_RUN(td); 245 /* 246 * If it is not a threaded process, take the shortcut. 247 */ 248 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 249 /* remve from sys run queue and free up a slot */ 250 sched_rem(td); 251 ke->ke_state = KES_THREAD; 252 return; 253 } 254 td3 = TAILQ_PREV(td, threadqueue, td_runq); 255 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 256 if (ke->ke_state == KES_ONRUNQ) { 257 /* 258 * This thread has been assigned to the system run queue. 259 * We need to dissociate it and try assign the 260 * KSE to the next available thread. Then, we should 261 * see if we need to move the KSE in the run queues. 262 */ 263 sched_rem(td); 264 ke->ke_state = KES_THREAD; 265 td2 = kg->kg_last_assigned; 266 KASSERT((td2 != NULL), ("last assigned has wrong value")); 267 if (td2 == td) 268 kg->kg_last_assigned = td3; 269 /* slot_fill(kg); */ /* will replace it with another */ 270 } 271 } 272 #endif 273 274 /* 275 * Change the priority of a thread that is on the run queue. 276 */ 277 void 278 adjustrunqueue( struct thread *td, int newpri) 279 { 280 struct ksegrp *kg; 281 struct kse *ke; 282 283 mtx_assert(&sched_lock, MA_OWNED); 284 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue")); 285 286 ke = td->td_kse; 287 CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td); 288 /* 289 * If it is not a threaded process, take the shortcut. 290 */ 291 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 292 /* We only care about the kse in the run queue. */ 293 td->td_priority = newpri; 294 if (ke->ke_rqindex != (newpri / RQ_PPQ)) { 295 sched_rem(td); 296 sched_add(td, SRQ_BORING); 297 } 298 return; 299 } 300 301 /* It is a threaded process */ 302 kg = td->td_ksegrp; 303 if (ke->ke_state == KES_ONRUNQ) { 304 if (kg->kg_last_assigned == td) { 305 kg->kg_last_assigned = 306 TAILQ_PREV(td, threadqueue, td_runq); 307 } 308 sched_rem(td); 309 } 310 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 311 TD_SET_CAN_RUN(td); 312 td->td_priority = newpri; 313 setrunqueue(td, SRQ_BORING); 314 } 315 316 /* 317 * This function is called when a thread is about to be put on a 318 * ksegrp run queue because it has been made runnable or its 319 * priority has been adjusted and the ksegrp does not have a 320 * free kse slot. It determines if a thread from the same ksegrp 321 * should be preempted. If so, it tries to switch threads 322 * if the thread is on the same cpu or notifies another cpu that 323 * it should switch threads. 324 */ 325 326 static void 327 maybe_preempt_in_ksegrp(struct thread *td) 328 #if !defined(SMP) 329 { 330 struct thread *running_thread; 331 332 #ifndef FULL_PREEMPTION 333 int pri; 334 pri = td->td_priority; 335 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD)) 336 return; 337 #endif 338 mtx_assert(&sched_lock, MA_OWNED); 339 running_thread = curthread; 340 341 if (running_thread->td_ksegrp != td->td_ksegrp) 342 return; 343 344 if (td->td_priority > running_thread->td_priority) 345 return; 346 #ifdef PREEMPTION 347 if (running_thread->td_critnest > 1) 348 running_thread->td_pflags |= TDP_OWEPREEMPT; 349 else 350 mi_switch(SW_INVOL, NULL); 351 352 #else 353 running_thread->td_flags |= TDF_NEEDRESCHED; 354 #endif 355 return; 356 } 357 358 #else /* SMP */ 359 { 360 struct thread *running_thread; 361 int worst_pri; 362 struct ksegrp *kg; 363 cpumask_t cpumask,dontuse; 364 struct pcpu *pc; 365 struct pcpu *best_pcpu; 366 struct thread *cputhread; 367 368 #ifndef FULL_PREEMPTION 369 int pri; 370 pri = td->td_priority; 371 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD)) 372 return; 373 #endif 374 375 mtx_assert(&sched_lock, MA_OWNED); 376 377 running_thread = curthread; 378 379 #if !defined(KSEG_PEEMPT_BEST_CPU) 380 if (running_thread->td_ksegrp != td->td_ksegrp) { 381 #endif 382 kg = td->td_ksegrp; 383 384 /* if someone is ahead of this thread, wait our turn */ 385 if (td != TAILQ_FIRST(&kg->kg_runq)) 386 return; 387 388 worst_pri = td->td_priority; 389 best_pcpu = NULL; 390 dontuse = stopped_cpus | idle_cpus_mask; 391 392 /* 393 * Find a cpu with the worst priority that runs at thread from 394 * the same ksegrp - if multiple exist give first the last run 395 * cpu and then the current cpu priority 396 */ 397 398 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 399 cpumask = pc->pc_cpumask; 400 cputhread = pc->pc_curthread; 401 402 if ((cpumask & dontuse) || 403 cputhread->td_ksegrp != kg) 404 continue; 405 406 if (cputhread->td_priority > worst_pri) { 407 worst_pri = cputhread->td_priority; 408 best_pcpu = pc; 409 continue; 410 } 411 412 if (cputhread->td_priority == worst_pri && 413 best_pcpu != NULL && 414 (td->td_lastcpu == pc->pc_cpuid || 415 (PCPU_GET(cpumask) == cpumask && 416 td->td_lastcpu != best_pcpu->pc_cpuid))) 417 best_pcpu = pc; 418 } 419 420 /* Check if we need to preempt someone */ 421 if (best_pcpu == NULL) 422 return; 423 424 if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) { 425 best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED; 426 ipi_selected(best_pcpu->pc_cpumask, IPI_AST); 427 return; 428 } 429 #if !defined(KSEG_PEEMPT_BEST_CPU) 430 } 431 #endif 432 433 if (td->td_priority > running_thread->td_priority) 434 return; 435 #ifdef PREEMPTION 436 if (running_thread->td_critnest > 1) 437 running_thread->td_pflags |= TDP_OWEPREEMPT; 438 else 439 mi_switch(SW_INVOL, NULL); 440 441 #else 442 running_thread->td_flags |= TDF_NEEDRESCHED; 443 #endif 444 return; 445 } 446 #endif /* !SMP */ 447 448 449 int limitcount; 450 void 451 setrunqueue(struct thread *td, int flags) 452 { 453 struct ksegrp *kg; 454 struct thread *td2; 455 struct thread *tda; 456 457 CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d", 458 td, td->td_ksegrp, td->td_proc->p_pid); 459 mtx_assert(&sched_lock, MA_OWNED); 460 KASSERT((td->td_inhibitors == 0), 461 ("setrunqueue: trying to run inhibitted thread")); 462 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 463 ("setrunqueue: bad thread state")); 464 TD_SET_RUNQ(td); 465 kg = td->td_ksegrp; 466 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 467 /* 468 * Common path optimisation: Only one of everything 469 * and the KSE is always already attached. 470 * Totally ignore the ksegrp run queue. 471 */ 472 if (kg->kg_avail_opennings != 1) { 473 if (limitcount < 1) { 474 limitcount++; 475 printf("pid %d: corrected slot count (%d->1)\n", 476 td->td_proc->p_pid, kg->kg_avail_opennings); 477 478 } 479 kg->kg_avail_opennings = 1; 480 } 481 sched_add(td, flags); 482 return; 483 } 484 485 /* 486 * If the concurrency has reduced, and we would go in the 487 * assigned section, then keep removing entries from the 488 * system run queue, until we are not in that section 489 * or there is room for us to be put in that section. 490 * What we MUST avoid is the case where there are threads of less 491 * priority than the new one scheduled, but it can not 492 * be scheduled itself. That would lead to a non contiguous set 493 * of scheduled threads, and everything would break. 494 */ 495 tda = kg->kg_last_assigned; 496 while ((kg->kg_avail_opennings <= 0) && 497 (tda && (tda->td_priority > td->td_priority))) { 498 /* 499 * None free, but there is one we can commandeer. 500 */ 501 CTR2(KTR_RUNQ, 502 "setrunqueue: kg:%p: take slot from td: %p", kg, tda); 503 sched_rem(tda); 504 tda = kg->kg_last_assigned = 505 TAILQ_PREV(tda, threadqueue, td_runq); 506 } 507 508 /* 509 * Add the thread to the ksegrp's run queue at 510 * the appropriate place. 511 */ 512 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { 513 if (td2->td_priority > td->td_priority) { 514 TAILQ_INSERT_BEFORE(td2, td, td_runq); 515 break; 516 } 517 } 518 if (td2 == NULL) { 519 /* We ran off the end of the TAILQ or it was empty. */ 520 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq); 521 } 522 523 /* 524 * If we have a slot to use, then put the thread on the system 525 * run queue and if needed, readjust the last_assigned pointer. 526 * it may be that we need to schedule something anyhow 527 * even if the availabel slots are -ve so that 528 * all the items < last_assigned are scheduled. 529 */ 530 if (kg->kg_avail_opennings > 0) { 531 if (tda == NULL) { 532 /* 533 * No pre-existing last assigned so whoever is first 534 * gets the slot.. (maybe us) 535 */ 536 td2 = TAILQ_FIRST(&kg->kg_runq); 537 kg->kg_last_assigned = td2; 538 } else if (tda->td_priority > td->td_priority) { 539 td2 = td; 540 } else { 541 /* 542 * We are past last_assigned, so 543 * give the next slot to whatever is next, 544 * which may or may not be us. 545 */ 546 td2 = TAILQ_NEXT(tda, td_runq); 547 kg->kg_last_assigned = td2; 548 } 549 sched_add(td2, flags); 550 } else { 551 CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d", 552 td, td->td_ksegrp, td->td_proc->p_pid); 553 if ((flags & SRQ_YIELDING) == 0) 554 maybe_preempt_in_ksegrp(td); 555 } 556 } 557 558 /* 559 * Kernel thread preemption implementation. Critical sections mark 560 * regions of code in which preemptions are not allowed. 561 */ 562 void 563 critical_enter(void) 564 { 565 struct thread *td; 566 567 td = curthread; 568 if (td->td_critnest == 0) 569 cpu_critical_enter(td); 570 td->td_critnest++; 571 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td, 572 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest); 573 } 574 575 void 576 critical_exit(void) 577 { 578 struct thread *td; 579 580 td = curthread; 581 KASSERT(td->td_critnest != 0, 582 ("critical_exit: td_critnest == 0")); 583 if (td->td_critnest == 1) { 584 if (td->td_pflags & TDP_WAKEPROC0) { 585 td->td_pflags &= ~TDP_WAKEPROC0; 586 wakeup(&proc0); 587 } 588 #ifdef PREEMPTION 589 mtx_assert(&sched_lock, MA_NOTOWNED); 590 if (td->td_pflags & TDP_OWEPREEMPT) { 591 mtx_lock_spin(&sched_lock); 592 mi_switch(SW_INVOL, NULL); 593 mtx_unlock_spin(&sched_lock); 594 } 595 #endif 596 td->td_critnest = 0; 597 cpu_critical_exit(td); 598 } else { 599 td->td_critnest--; 600 } 601 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td, 602 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest); 603 } 604 605 /* 606 * This function is called when a thread is about to be put on run queue 607 * because it has been made runnable or its priority has been adjusted. It 608 * determines if the new thread should be immediately preempted to. If so, 609 * it switches to it and eventually returns true. If not, it returns false 610 * so that the caller may place the thread on an appropriate run queue. 611 */ 612 int 613 maybe_preempt(struct thread *td) 614 { 615 #ifdef PREEMPTION 616 struct thread *ctd; 617 int cpri, pri; 618 #endif 619 620 mtx_assert(&sched_lock, MA_OWNED); 621 #ifdef PREEMPTION 622 /* 623 * The new thread should not preempt the current thread if any of the 624 * following conditions are true: 625 * 626 * - The current thread has a higher (numerically lower) or 627 * equivalent priority. Note that this prevents curthread from 628 * trying to preempt to itself. 629 * - It is too early in the boot for context switches (cold is set). 630 * - The current thread has an inhibitor set or is in the process of 631 * exiting. In this case, the current thread is about to switch 632 * out anyways, so there's no point in preempting. If we did, 633 * the current thread would not be properly resumed as well, so 634 * just avoid that whole landmine. 635 * - If the new thread's priority is not a realtime priority and 636 * the current thread's priority is not an idle priority and 637 * FULL_PREEMPTION is disabled. 638 * 639 * If all of these conditions are false, but the current thread is in 640 * a nested critical section, then we have to defer the preemption 641 * until we exit the critical section. Otherwise, switch immediately 642 * to the new thread. 643 */ 644 ctd = curthread; 645 KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd), 646 ("thread has no (or wrong) sched-private part.")); 647 KASSERT((td->td_inhibitors == 0), 648 ("maybe_preempt: trying to run inhibitted thread")); 649 pri = td->td_priority; 650 cpri = ctd->td_priority; 651 if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) || 652 td->td_kse->ke_state != KES_THREAD) 653 return (0); 654 #ifndef FULL_PREEMPTION 655 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) && 656 !(cpri >= PRI_MIN_IDLE)) 657 return (0); 658 #endif 659 if (ctd->td_critnest > 1) { 660 CTR1(KTR_PROC, "maybe_preempt: in critical section %d", 661 ctd->td_critnest); 662 ctd->td_pflags |= TDP_OWEPREEMPT; 663 return (0); 664 } 665 666 /* 667 * Thread is runnable but not yet put on system run queue. 668 */ 669 MPASS(TD_ON_RUNQ(td)); 670 MPASS(td->td_sched->ke_state != KES_ONRUNQ); 671 if (td->td_proc->p_flag & P_HADTHREADS) { 672 /* 673 * If this is a threaded process we actually ARE on the 674 * ksegrp run queue so take it off that first. 675 * Also undo any damage done to the last_assigned pointer. 676 * XXX Fix setrunqueue so this isn't needed 677 */ 678 struct ksegrp *kg; 679 680 kg = td->td_ksegrp; 681 if (kg->kg_last_assigned == td) 682 kg->kg_last_assigned = 683 TAILQ_PREV(td, threadqueue, td_runq); 684 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 685 } 686 687 TD_SET_RUNNING(td); 688 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, 689 td->td_proc->p_pid, td->td_proc->p_comm); 690 mi_switch(SW_INVOL|SW_PREEMPT, td); 691 return (1); 692 #else 693 return (0); 694 #endif 695 } 696 697 #if 0 698 #ifndef PREEMPTION 699 /* XXX: There should be a non-static version of this. */ 700 static void 701 printf_caddr_t(void *data) 702 { 703 printf("%s", (char *)data); 704 } 705 static char preempt_warning[] = 706 "WARNING: Kernel preemption is disabled, expect reduced performance.\n"; 707 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t, 708 preempt_warning) 709 #endif 710 #endif 711 712 /************************************************************************ 713 * SYSTEM RUN QUEUE manipulations and tests * 714 ************************************************************************/ 715 /* 716 * Initialize a run structure. 717 */ 718 void 719 runq_init(struct runq *rq) 720 { 721 int i; 722 723 bzero(rq, sizeof *rq); 724 for (i = 0; i < RQ_NQS; i++) 725 TAILQ_INIT(&rq->rq_queues[i]); 726 } 727 728 /* 729 * Clear the status bit of the queue corresponding to priority level pri, 730 * indicating that it is empty. 731 */ 732 static __inline void 733 runq_clrbit(struct runq *rq, int pri) 734 { 735 struct rqbits *rqb; 736 737 rqb = &rq->rq_status; 738 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d", 739 rqb->rqb_bits[RQB_WORD(pri)], 740 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri), 741 RQB_BIT(pri), RQB_WORD(pri)); 742 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri); 743 } 744 745 /* 746 * Find the index of the first non-empty run queue. This is done by 747 * scanning the status bits, a set bit indicates a non-empty queue. 748 */ 749 static __inline int 750 runq_findbit(struct runq *rq) 751 { 752 struct rqbits *rqb; 753 int pri; 754 int i; 755 756 rqb = &rq->rq_status; 757 for (i = 0; i < RQB_LEN; i++) 758 if (rqb->rqb_bits[i]) { 759 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW); 760 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d", 761 rqb->rqb_bits[i], i, pri); 762 return (pri); 763 } 764 765 return (-1); 766 } 767 768 /* 769 * Set the status bit of the queue corresponding to priority level pri, 770 * indicating that it is non-empty. 771 */ 772 static __inline void 773 runq_setbit(struct runq *rq, int pri) 774 { 775 struct rqbits *rqb; 776 777 rqb = &rq->rq_status; 778 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d", 779 rqb->rqb_bits[RQB_WORD(pri)], 780 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri), 781 RQB_BIT(pri), RQB_WORD(pri)); 782 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri); 783 } 784 785 /* 786 * Add the KSE to the queue specified by its priority, and set the 787 * corresponding status bit. 788 */ 789 void 790 runq_add(struct runq *rq, struct kse *ke, int flags) 791 { 792 struct rqhead *rqh; 793 int pri; 794 795 pri = ke->ke_thread->td_priority / RQ_PPQ; 796 ke->ke_rqindex = pri; 797 runq_setbit(rq, pri); 798 rqh = &rq->rq_queues[pri]; 799 CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p", 800 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); 801 if (flags & SRQ_PREEMPTED) { 802 TAILQ_INSERT_HEAD(rqh, ke, ke_procq); 803 } else { 804 TAILQ_INSERT_TAIL(rqh, ke, ke_procq); 805 } 806 } 807 808 /* 809 * Return true if there are runnable processes of any priority on the run 810 * queue, false otherwise. Has no side effects, does not modify the run 811 * queue structure. 812 */ 813 int 814 runq_check(struct runq *rq) 815 { 816 struct rqbits *rqb; 817 int i; 818 819 rqb = &rq->rq_status; 820 for (i = 0; i < RQB_LEN; i++) 821 if (rqb->rqb_bits[i]) { 822 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d", 823 rqb->rqb_bits[i], i); 824 return (1); 825 } 826 CTR0(KTR_RUNQ, "runq_check: empty"); 827 828 return (0); 829 } 830 831 #if defined(SMP) && defined(SCHED_4BSD) 832 int runq_fuzz = 1; 833 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, ""); 834 #endif 835 836 /* 837 * Find the highest priority process on the run queue. 838 */ 839 struct kse * 840 runq_choose(struct runq *rq) 841 { 842 struct rqhead *rqh; 843 struct kse *ke; 844 int pri; 845 846 mtx_assert(&sched_lock, MA_OWNED); 847 while ((pri = runq_findbit(rq)) != -1) { 848 rqh = &rq->rq_queues[pri]; 849 #if defined(SMP) && defined(SCHED_4BSD) 850 /* fuzz == 1 is normal.. 0 or less are ignored */ 851 if (runq_fuzz > 1) { 852 /* 853 * In the first couple of entries, check if 854 * there is one for our CPU as a preference. 855 */ 856 int count = runq_fuzz; 857 int cpu = PCPU_GET(cpuid); 858 struct kse *ke2; 859 ke2 = ke = TAILQ_FIRST(rqh); 860 861 while (count-- && ke2) { 862 if (ke->ke_thread->td_lastcpu == cpu) { 863 ke = ke2; 864 break; 865 } 866 ke2 = TAILQ_NEXT(ke2, ke_procq); 867 } 868 } else 869 #endif 870 ke = TAILQ_FIRST(rqh); 871 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue")); 872 CTR3(KTR_RUNQ, 873 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh); 874 return (ke); 875 } 876 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri); 877 878 return (NULL); 879 } 880 881 /* 882 * Remove the KSE from the queue specified by its priority, and clear the 883 * corresponding status bit if the queue becomes empty. 884 * Caller must set ke->ke_state afterwards. 885 */ 886 void 887 runq_remove(struct runq *rq, struct kse *ke) 888 { 889 struct rqhead *rqh; 890 int pri; 891 892 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 893 ("runq_remove: process swapped out")); 894 pri = ke->ke_rqindex; 895 rqh = &rq->rq_queues[pri]; 896 CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p", 897 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); 898 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue")); 899 TAILQ_REMOVE(rqh, ke, ke_procq); 900 if (TAILQ_EMPTY(rqh)) { 901 CTR0(KTR_RUNQ, "runq_remove: empty"); 902 runq_clrbit(rq, pri); 903 } 904 } 905 906 /****** functions that are temporarily here ***********/ 907 #include <vm/uma.h> 908 extern struct mtx kse_zombie_lock; 909 910 /* 911 * Allocate scheduler specific per-process resources. 912 * The thread and ksegrp have already been linked in. 913 * In this case just set the default concurrency value. 914 * 915 * Called from: 916 * proc_init() (UMA init method) 917 */ 918 void 919 sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td) 920 { 921 922 /* This can go in sched_fork */ 923 sched_init_concurrency(kg); 924 } 925 926 /* 927 * thread is being either created or recycled. 928 * Fix up the per-scheduler resources associated with it. 929 * Called from: 930 * sched_fork_thread() 931 * thread_dtor() (*may go away) 932 * thread_init() (*may go away) 933 */ 934 void 935 sched_newthread(struct thread *td) 936 { 937 struct td_sched *ke; 938 939 ke = (struct td_sched *) (td + 1); 940 bzero(ke, sizeof(*ke)); 941 td->td_sched = ke; 942 ke->ke_thread = td; 943 ke->ke_state = KES_THREAD; 944 } 945 946 /* 947 * Set up an initial concurrency of 1 948 * and set the given thread (if given) to be using that 949 * concurrency slot. 950 * May be used "offline"..before the ksegrp is attached to the world 951 * and thus wouldn't need schedlock in that case. 952 * Called from: 953 * thr_create() 954 * proc_init() (UMA) via sched_newproc() 955 */ 956 void 957 sched_init_concurrency(struct ksegrp *kg) 958 { 959 960 CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg); 961 kg->kg_concurrency = 1; 962 kg->kg_avail_opennings = 1; 963 } 964 965 /* 966 * Change the concurrency of an existing ksegrp to N 967 * Called from: 968 * kse_create() 969 * kse_exit() 970 * thread_exit() 971 * thread_single() 972 */ 973 void 974 sched_set_concurrency(struct ksegrp *kg, int concurrency) 975 { 976 977 CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d", 978 kg, 979 concurrency, 980 kg->kg_avail_opennings, 981 kg->kg_avail_opennings + (concurrency - kg->kg_concurrency)); 982 kg->kg_avail_opennings += (concurrency - kg->kg_concurrency); 983 kg->kg_concurrency = concurrency; 984 } 985 986 /* 987 * Called from thread_exit() for all exiting thread 988 * 989 * Not to be confused with sched_exit_thread() 990 * that is only called from thread_exit() for threads exiting 991 * without the rest of the process exiting because it is also called from 992 * sched_exit() and we wouldn't want to call it twice. 993 * XXX This can probably be fixed. 994 */ 995 void 996 sched_thread_exit(struct thread *td) 997 { 998 999 SLOT_RELEASE(td->td_ksegrp); 1000 slot_fill(td->td_ksegrp); 1001 } 1002 1003 #endif /* KERN_SWITCH_INCLUDE */ 1004