1 /* 2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /*** 28 Here is the logic.. 29 30 If there are N processors, then there are at most N KSEs (kernel 31 schedulable entities) working to process threads that belong to a 32 KSEGROUP (kg). If there are X of these KSEs actually running at the 33 moment in question, then there are at most M (N-X) of these KSEs on 34 the run queue, as running KSEs are not on the queue. 35 36 Runnable threads are queued off the KSEGROUP in priority order. 37 If there are M or more threads runnable, the top M threads 38 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take 39 their priority from those threads and are put on the run queue. 40 41 The last thread that had a priority high enough to have a KSE associated 42 with it, AND IS ON THE RUN QUEUE is pointed to by 43 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs 44 assigned as all the available KSEs are activly running, or because there 45 are no threads queued, that pointer is NULL. 46 47 When a KSE is removed from the run queue to become runnable, we know 48 it was associated with the highest priority thread in the queue (at the head 49 of the queue). If it is also the last assigned we know M was 1 and must 50 now be 0. Since the thread is no longer queued that pointer must be 51 removed from it. Since we know there were no more KSEs available, 52 (M was 1 and is now 0) and since we are not FREEING our KSE 53 but using it, we know there are STILL no more KSEs available, we can prove 54 that the next thread in the ksegrp list will not have a KSE to assign to 55 it, so we can show that the pointer must be made 'invalid' (NULL). 56 57 The pointer exists so that when a new thread is made runnable, it can 58 have its priority compared with the last assigned thread to see if 59 it should 'steal' its KSE or not.. i.e. is it 'earlier' 60 on the list than that thread or later.. If it's earlier, then the KSE is 61 removed from the last assigned (which is now not assigned a KSE) 62 and reassigned to the new thread, which is placed earlier in the list. 63 The pointer is then backed up to the previous thread (which may or may not 64 be the new thread). 65 66 When a thread sleeps or is removed, the KSE becomes available and if there 67 are queued threads that are not assigned KSEs, the highest priority one of 68 them is assigned the KSE, which is then placed back on the run queue at 69 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down 70 to point to it. 71 72 The following diagram shows 2 KSEs and 3 threads from a single process. 73 74 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads) 75 \ \____ 76 \ \ 77 KSEGROUP---thread--thread--thread (queued in priority order) 78 \ / 79 \_______________/ 80 (last_assigned) 81 82 The result of this scheme is that the M available KSEs are always 83 queued at the priorities they have inherrited from the M highest priority 84 threads for that KSEGROUP. If this situation changes, the KSEs are 85 reassigned to keep this true. 86 ***/ 87 88 #include <sys/cdefs.h> 89 __FBSDID("$FreeBSD$"); 90 91 #include "opt_sched.h" 92 93 #ifndef KERN_SWITCH_INCLUDE 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/kdb.h> 97 #include <sys/kernel.h> 98 #include <sys/ktr.h> 99 #include <sys/lock.h> 100 #include <sys/mutex.h> 101 #include <sys/proc.h> 102 #include <sys/queue.h> 103 #include <sys/sched.h> 104 #else /* KERN_SWITCH_INCLUDE */ 105 #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 106 #include <sys/smp.h> 107 #endif 108 #include <machine/critical.h> 109 #if defined(SMP) && defined(SCHED_4BSD) 110 #include <sys/sysctl.h> 111 #endif 112 113 #ifdef FULL_PREEMPTION 114 #ifndef PREEMPTION 115 #error "The FULL_PREEMPTION option requires the PREEMPTION option" 116 #endif 117 #endif 118 119 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); 120 121 #define td_kse td_sched 122 123 /************************************************************************ 124 * Functions that manipulate runnability from a thread perspective. * 125 ************************************************************************/ 126 /* 127 * Select the KSE that will be run next. From that find the thread, and 128 * remove it from the KSEGRP's run queue. If there is thread clustering, 129 * this will be what does it. 130 */ 131 struct thread * 132 choosethread(void) 133 { 134 struct kse *ke; 135 struct thread *td; 136 struct ksegrp *kg; 137 138 #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 139 if (smp_active == 0 && PCPU_GET(cpuid) != 0) { 140 /* Shutting down, run idlethread on AP's */ 141 td = PCPU_GET(idlethread); 142 ke = td->td_kse; 143 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 144 ke->ke_flags |= KEF_DIDRUN; 145 TD_SET_RUNNING(td); 146 return (td); 147 } 148 #endif 149 150 retry: 151 ke = sched_choose(); 152 if (ke) { 153 td = ke->ke_thread; 154 KASSERT((td->td_kse == ke), ("kse/thread mismatch")); 155 kg = ke->ke_ksegrp; 156 if (td->td_proc->p_flag & P_HADTHREADS) { 157 if (kg->kg_last_assigned == td) { 158 kg->kg_last_assigned = TAILQ_PREV(td, 159 threadqueue, td_runq); 160 } 161 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 162 kg->kg_runnable--; 163 } 164 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d", 165 td, td->td_priority); 166 } else { 167 /* Simulate runq_choose() having returned the idle thread */ 168 td = PCPU_GET(idlethread); 169 ke = td->td_kse; 170 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 171 } 172 ke->ke_flags |= KEF_DIDRUN; 173 174 /* 175 * If we are in panic, only allow system threads, 176 * plus the one we are running in, to be run. 177 */ 178 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 && 179 (td->td_flags & TDF_INPANIC) == 0)) { 180 /* note that it is no longer on the run queue */ 181 TD_SET_CAN_RUN(td); 182 goto retry; 183 } 184 185 TD_SET_RUNNING(td); 186 return (td); 187 } 188 189 /* 190 * Given a surplus system slot, try assign a new runnable thread to it. 191 * Called from: 192 * sched_thread_exit() (local) 193 * sched_switch() (local) 194 * sched_thread_exit() (local) 195 * remrunqueue() (local) (not at the moment) 196 */ 197 static void 198 slot_fill(struct ksegrp *kg) 199 { 200 struct thread *td; 201 202 mtx_assert(&sched_lock, MA_OWNED); 203 while (kg->kg_avail_opennings > 0) { 204 /* 205 * Find the first unassigned thread 206 */ 207 if ((td = kg->kg_last_assigned) != NULL) 208 td = TAILQ_NEXT(td, td_runq); 209 else 210 td = TAILQ_FIRST(&kg->kg_runq); 211 212 /* 213 * If we found one, send it to the system scheduler. 214 */ 215 if (td) { 216 kg->kg_last_assigned = td; 217 sched_add(td, SRQ_YIELDING); 218 CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg); 219 } else { 220 /* no threads to use up the slots. quit now */ 221 break; 222 } 223 } 224 } 225 226 #ifdef SCHED_4BSD 227 /* 228 * Remove a thread from its KSEGRP's run queue. 229 * This in turn may remove it from a KSE if it was already assigned 230 * to one, possibly causing a new thread to be assigned to the KSE 231 * and the KSE getting a new priority. 232 */ 233 static void 234 remrunqueue(struct thread *td) 235 { 236 struct thread *td2, *td3; 237 struct ksegrp *kg; 238 struct kse *ke; 239 240 mtx_assert(&sched_lock, MA_OWNED); 241 KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue")); 242 kg = td->td_ksegrp; 243 ke = td->td_kse; 244 CTR1(KTR_RUNQ, "remrunqueue: td%p", td); 245 TD_SET_CAN_RUN(td); 246 /* 247 * If it is not a threaded process, take the shortcut. 248 */ 249 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 250 /* remve from sys run queue and free up a slot */ 251 sched_rem(td); 252 ke->ke_state = KES_THREAD; 253 return; 254 } 255 td3 = TAILQ_PREV(td, threadqueue, td_runq); 256 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 257 kg->kg_runnable--; 258 if (ke->ke_state == KES_ONRUNQ) { 259 /* 260 * This thread has been assigned to the system run queue. 261 * We need to dissociate it and try assign the 262 * KSE to the next available thread. Then, we should 263 * see if we need to move the KSE in the run queues. 264 */ 265 sched_rem(td); 266 ke->ke_state = KES_THREAD; 267 td2 = kg->kg_last_assigned; 268 KASSERT((td2 != NULL), ("last assigned has wrong value")); 269 if (td2 == td) 270 kg->kg_last_assigned = td3; 271 /* slot_fill(kg); */ /* will replace it with another */ 272 } 273 } 274 #endif 275 276 /* 277 * Change the priority of a thread that is on the run queue. 278 */ 279 void 280 adjustrunqueue( struct thread *td, int newpri) 281 { 282 struct ksegrp *kg; 283 struct kse *ke; 284 285 mtx_assert(&sched_lock, MA_OWNED); 286 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue")); 287 288 ke = td->td_kse; 289 CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td); 290 /* 291 * If it is not a threaded process, take the shortcut. 292 */ 293 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 294 /* We only care about the kse in the run queue. */ 295 td->td_priority = newpri; 296 if (ke->ke_rqindex != (newpri / RQ_PPQ)) { 297 sched_rem(td); 298 sched_add(td, SRQ_BORING); 299 } 300 return; 301 } 302 303 /* It is a threaded process */ 304 kg = td->td_ksegrp; 305 if (ke->ke_state == KES_ONRUNQ) { 306 if (kg->kg_last_assigned == td) { 307 kg->kg_last_assigned = 308 TAILQ_PREV(td, threadqueue, td_runq); 309 } 310 sched_rem(td); 311 } 312 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 313 kg->kg_runnable--; 314 TD_SET_CAN_RUN(td); 315 td->td_priority = newpri; 316 setrunqueue(td, SRQ_BORING); 317 } 318 319 /* 320 * This function is called when a thread is about to be put on a 321 * ksegrp run queue because it has been made runnable or its 322 * priority has been adjusted and the ksegrp does not have a 323 * free kse slot. It determines if a thread from the same ksegrp 324 * should be preempted. If so, it tries to switch threads 325 * if the thread is on the same cpu or notifies another cpu that 326 * it should switch threads. 327 */ 328 329 static void 330 maybe_preempt_in_ksegrp(struct thread *td) 331 { 332 #if defined(SMP) 333 int worst_pri; 334 struct ksegrp *kg; 335 cpumask_t cpumask,dontuse; 336 struct pcpu *pc; 337 struct pcpu *best_pcpu; 338 struct thread *running_thread; 339 struct thread *cputhread; 340 341 #ifndef FULL_PREEMPTION 342 int pri; 343 pri = td->td_priority; 344 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD)) 345 return; 346 #endif 347 348 mtx_assert(&sched_lock, MA_OWNED); 349 350 running_thread = curthread; 351 352 #if !defined(KSEG_PEEMPT_BEST_CPU) 353 if (running_thread->td_ksegrp != td->td_ksegrp) { 354 #endif 355 kg = td->td_ksegrp; 356 357 /* if someone is ahead of this thread, wait our turn */ 358 if (td != TAILQ_FIRST(&kg->kg_runq)) 359 return; 360 361 worst_pri = td->td_priority; 362 best_pcpu = NULL; 363 dontuse = stopped_cpus | idle_cpus_mask; 364 365 /* 366 * Find a cpu with the worst priority that runs at thread from 367 * the same ksegrp - if multiple exist give first the last run 368 * cpu and then the current cpu priority 369 */ 370 371 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 372 cpumask = pc->pc_cpumask; 373 cputhread = pc->pc_curthread; 374 375 if ((cpumask & dontuse) || 376 cputhread->td_ksegrp != kg) 377 continue; 378 379 if (cputhread->td_priority > worst_pri) { 380 worst_pri = cputhread->td_priority; 381 best_pcpu = pc; 382 continue; 383 } 384 385 if (cputhread->td_priority == worst_pri && 386 best_pcpu != NULL && 387 (td->td_lastcpu == pc->pc_cpuid || 388 (PCPU_GET(cpumask) == cpumask && 389 td->td_lastcpu != best_pcpu->pc_cpuid))) 390 best_pcpu = pc; 391 } 392 393 /* Check if we need to preempt someone */ 394 if (best_pcpu == NULL) 395 return; 396 397 if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) { 398 best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED; 399 ipi_selected(best_pcpu->pc_cpumask, IPI_AST); 400 return; 401 } 402 #if !defined(KSEG_PEEMPT_BEST_CPU) 403 } 404 #endif 405 406 #else 407 KASSERT(running_thread->td_ksegrp == td->td_ksegrp, 408 ("maybe_preempt_in_ksegrp: No chance to run thread")); 409 #endif 410 411 if (td->td_priority > running_thread->td_priority) 412 return; 413 #ifdef PREEMPTION 414 if (running_thread->td_critnest > 1) 415 running_thread->td_pflags |= TDP_OWEPREEMPT; 416 else 417 mi_switch(SW_INVOL, NULL); 418 419 #else 420 running_thread->td_flags |= TDF_NEEDRESCHED; 421 #endif 422 return; 423 } 424 425 int limitcount; 426 void 427 setrunqueue(struct thread *td, int flags) 428 { 429 struct ksegrp *kg; 430 struct thread *td2; 431 struct thread *tda; 432 433 CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d", 434 td, td->td_ksegrp, td->td_proc->p_pid); 435 mtx_assert(&sched_lock, MA_OWNED); 436 KASSERT((td->td_inhibitors == 0), 437 ("setrunqueue: trying to run inhibitted thread")); 438 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 439 ("setrunqueue: bad thread state")); 440 TD_SET_RUNQ(td); 441 kg = td->td_ksegrp; 442 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 443 /* 444 * Common path optimisation: Only one of everything 445 * and the KSE is always already attached. 446 * Totally ignore the ksegrp run queue. 447 */ 448 if (kg->kg_avail_opennings != 1) { 449 if (limitcount < 1) { 450 limitcount++; 451 printf("pid %d: corrected slot count (%d->1)\n", 452 td->td_proc->p_pid, kg->kg_avail_opennings); 453 454 } 455 kg->kg_avail_opennings = 1; 456 } 457 sched_add(td, flags); 458 return; 459 } 460 461 /* 462 * If the concurrency has reduced, and we would go in the 463 * assigned section, then keep removing entries from the 464 * system run queue, until we are not in that section 465 * or there is room for us to be put in that section. 466 * What we MUST avoid is the case where there are threads of less 467 * priority than the new one scheduled, but it can not 468 * be scheduled itself. That would lead to a non contiguous set 469 * of scheduled threads, and everything would break. 470 */ 471 tda = kg->kg_last_assigned; 472 while ((kg->kg_avail_opennings <= 0) && 473 (tda && (tda->td_priority > td->td_priority))) { 474 /* 475 * None free, but there is one we can commandeer. 476 */ 477 CTR2(KTR_RUNQ, 478 "setrunqueue: kg:%p: take slot from td: %p", kg, tda); 479 sched_rem(tda); 480 tda = kg->kg_last_assigned = 481 TAILQ_PREV(tda, threadqueue, td_runq); 482 } 483 484 /* 485 * Add the thread to the ksegrp's run queue at 486 * the appropriate place. 487 */ 488 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { 489 if (td2->td_priority > td->td_priority) { 490 kg->kg_runnable++; 491 TAILQ_INSERT_BEFORE(td2, td, td_runq); 492 break; 493 } 494 } 495 if (td2 == NULL) { 496 /* We ran off the end of the TAILQ or it was empty. */ 497 kg->kg_runnable++; 498 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq); 499 } 500 501 /* 502 * If we have a slot to use, then put the thread on the system 503 * run queue and if needed, readjust the last_assigned pointer. 504 * it may be that we need to schedule something anyhow 505 * even if the availabel slots are -ve so that 506 * all the items < last_assigned are scheduled. 507 */ 508 if (kg->kg_avail_opennings > 0) { 509 if (tda == NULL) { 510 /* 511 * No pre-existing last assigned so whoever is first 512 * gets the slot.. (maybe us) 513 */ 514 td2 = TAILQ_FIRST(&kg->kg_runq); 515 kg->kg_last_assigned = td2; 516 } else if (tda->td_priority > td->td_priority) { 517 td2 = td; 518 } else { 519 /* 520 * We are past last_assigned, so 521 * give the next slot to whatever is next, 522 * which may or may not be us. 523 */ 524 td2 = TAILQ_NEXT(tda, td_runq); 525 kg->kg_last_assigned = td2; 526 } 527 sched_add(td2, flags); 528 } else { 529 CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d", 530 td, td->td_ksegrp, td->td_proc->p_pid); 531 if ((flags & SRQ_YIELDING) == 0) 532 maybe_preempt_in_ksegrp(td); 533 } 534 } 535 536 /* 537 * Kernel thread preemption implementation. Critical sections mark 538 * regions of code in which preemptions are not allowed. 539 */ 540 void 541 critical_enter(void) 542 { 543 struct thread *td; 544 545 td = curthread; 546 if (td->td_critnest == 0) 547 cpu_critical_enter(td); 548 td->td_critnest++; 549 } 550 551 void 552 critical_exit(void) 553 { 554 struct thread *td; 555 556 td = curthread; 557 KASSERT(td->td_critnest != 0, 558 ("critical_exit: td_critnest == 0")); 559 if (td->td_critnest == 1) { 560 #ifdef PREEMPTION 561 mtx_assert(&sched_lock, MA_NOTOWNED); 562 if (td->td_pflags & TDP_OWEPREEMPT) { 563 mtx_lock_spin(&sched_lock); 564 mi_switch(SW_INVOL, NULL); 565 mtx_unlock_spin(&sched_lock); 566 } 567 #endif 568 td->td_critnest = 0; 569 cpu_critical_exit(td); 570 } else { 571 td->td_critnest--; 572 } 573 } 574 575 /* 576 * This function is called when a thread is about to be put on run queue 577 * because it has been made runnable or its priority has been adjusted. It 578 * determines if the new thread should be immediately preempted to. If so, 579 * it switches to it and eventually returns true. If not, it returns false 580 * so that the caller may place the thread on an appropriate run queue. 581 */ 582 int 583 maybe_preempt(struct thread *td) 584 { 585 #ifdef PREEMPTION 586 struct thread *ctd; 587 int cpri, pri; 588 #endif 589 590 mtx_assert(&sched_lock, MA_OWNED); 591 #ifdef PREEMPTION 592 /* 593 * The new thread should not preempt the current thread if any of the 594 * following conditions are true: 595 * 596 * - The current thread has a higher (numerically lower) or 597 * equivalent priority. Note that this prevents curthread from 598 * trying to preempt to itself. 599 * - It is too early in the boot for context switches (cold is set). 600 * - The current thread has an inhibitor set or is in the process of 601 * exiting. In this case, the current thread is about to switch 602 * out anyways, so there's no point in preempting. If we did, 603 * the current thread would not be properly resumed as well, so 604 * just avoid that whole landmine. 605 * - If the new thread's priority is not a realtime priority and 606 * the current thread's priority is not an idle priority and 607 * FULL_PREEMPTION is disabled. 608 * 609 * If all of these conditions are false, but the current thread is in 610 * a nested critical section, then we have to defer the preemption 611 * until we exit the critical section. Otherwise, switch immediately 612 * to the new thread. 613 */ 614 ctd = curthread; 615 KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd), 616 ("thread has no (or wrong) sched-private part.")); 617 KASSERT((td->td_inhibitors == 0), 618 ("maybe_preempt: trying to run inhibitted thread")); 619 pri = td->td_priority; 620 cpri = ctd->td_priority; 621 if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) || 622 td->td_kse->ke_state != KES_THREAD) 623 return (0); 624 #ifndef FULL_PREEMPTION 625 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) && 626 !(cpri >= PRI_MIN_IDLE)) 627 return (0); 628 #endif 629 if (ctd->td_critnest > 1) { 630 CTR1(KTR_PROC, "maybe_preempt: in critical section %d", 631 ctd->td_critnest); 632 ctd->td_pflags |= TDP_OWEPREEMPT; 633 return (0); 634 } 635 636 /* 637 * Thread is runnable but not yet put on system run queue. 638 */ 639 MPASS(TD_ON_RUNQ(td)); 640 MPASS(td->td_sched->ke_state != KES_ONRUNQ); 641 if (td->td_proc->p_flag & P_HADTHREADS) { 642 /* 643 * If this is a threaded process we actually ARE on the 644 * ksegrp run queue so take it off that first. 645 * Also undo any damage done to the last_assigned pointer. 646 * XXX Fix setrunqueue so this isn't needed 647 */ 648 struct ksegrp *kg; 649 650 kg = td->td_ksegrp; 651 if (kg->kg_last_assigned == td) 652 kg->kg_last_assigned = 653 TAILQ_PREV(td, threadqueue, td_runq); 654 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 655 } 656 657 TD_SET_RUNNING(td); 658 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, 659 td->td_proc->p_pid, td->td_proc->p_comm); 660 mi_switch(SW_INVOL|SW_PREEMPT, td); 661 return (1); 662 #else 663 return (0); 664 #endif 665 } 666 667 #if 0 668 #ifndef PREEMPTION 669 /* XXX: There should be a non-static version of this. */ 670 static void 671 printf_caddr_t(void *data) 672 { 673 printf("%s", (char *)data); 674 } 675 static char preempt_warning[] = 676 "WARNING: Kernel preemption is disabled, expect reduced performance.\n"; 677 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t, 678 preempt_warning) 679 #endif 680 #endif 681 682 /************************************************************************ 683 * SYSTEM RUN QUEUE manipulations and tests * 684 ************************************************************************/ 685 /* 686 * Initialize a run structure. 687 */ 688 void 689 runq_init(struct runq *rq) 690 { 691 int i; 692 693 bzero(rq, sizeof *rq); 694 for (i = 0; i < RQ_NQS; i++) 695 TAILQ_INIT(&rq->rq_queues[i]); 696 } 697 698 /* 699 * Clear the status bit of the queue corresponding to priority level pri, 700 * indicating that it is empty. 701 */ 702 static __inline void 703 runq_clrbit(struct runq *rq, int pri) 704 { 705 struct rqbits *rqb; 706 707 rqb = &rq->rq_status; 708 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d", 709 rqb->rqb_bits[RQB_WORD(pri)], 710 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri), 711 RQB_BIT(pri), RQB_WORD(pri)); 712 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri); 713 } 714 715 /* 716 * Find the index of the first non-empty run queue. This is done by 717 * scanning the status bits, a set bit indicates a non-empty queue. 718 */ 719 static __inline int 720 runq_findbit(struct runq *rq) 721 { 722 struct rqbits *rqb; 723 int pri; 724 int i; 725 726 rqb = &rq->rq_status; 727 for (i = 0; i < RQB_LEN; i++) 728 if (rqb->rqb_bits[i]) { 729 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW); 730 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d", 731 rqb->rqb_bits[i], i, pri); 732 return (pri); 733 } 734 735 return (-1); 736 } 737 738 /* 739 * Set the status bit of the queue corresponding to priority level pri, 740 * indicating that it is non-empty. 741 */ 742 static __inline void 743 runq_setbit(struct runq *rq, int pri) 744 { 745 struct rqbits *rqb; 746 747 rqb = &rq->rq_status; 748 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d", 749 rqb->rqb_bits[RQB_WORD(pri)], 750 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri), 751 RQB_BIT(pri), RQB_WORD(pri)); 752 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri); 753 } 754 755 /* 756 * Add the KSE to the queue specified by its priority, and set the 757 * corresponding status bit. 758 */ 759 void 760 runq_add(struct runq *rq, struct kse *ke, int flags) 761 { 762 struct rqhead *rqh; 763 int pri; 764 765 pri = ke->ke_thread->td_priority / RQ_PPQ; 766 ke->ke_rqindex = pri; 767 runq_setbit(rq, pri); 768 rqh = &rq->rq_queues[pri]; 769 CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p", 770 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); 771 if (flags & SRQ_PREEMPTED) { 772 TAILQ_INSERT_HEAD(rqh, ke, ke_procq); 773 } else { 774 TAILQ_INSERT_TAIL(rqh, ke, ke_procq); 775 } 776 } 777 778 /* 779 * Return true if there are runnable processes of any priority on the run 780 * queue, false otherwise. Has no side effects, does not modify the run 781 * queue structure. 782 */ 783 int 784 runq_check(struct runq *rq) 785 { 786 struct rqbits *rqb; 787 int i; 788 789 rqb = &rq->rq_status; 790 for (i = 0; i < RQB_LEN; i++) 791 if (rqb->rqb_bits[i]) { 792 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d", 793 rqb->rqb_bits[i], i); 794 return (1); 795 } 796 CTR0(KTR_RUNQ, "runq_check: empty"); 797 798 return (0); 799 } 800 801 #if defined(SMP) && defined(SCHED_4BSD) 802 int runq_fuzz = 1; 803 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, ""); 804 #endif 805 806 /* 807 * Find the highest priority process on the run queue. 808 */ 809 struct kse * 810 runq_choose(struct runq *rq) 811 { 812 struct rqhead *rqh; 813 struct kse *ke; 814 int pri; 815 816 mtx_assert(&sched_lock, MA_OWNED); 817 while ((pri = runq_findbit(rq)) != -1) { 818 rqh = &rq->rq_queues[pri]; 819 #if defined(SMP) && defined(SCHED_4BSD) 820 /* fuzz == 1 is normal.. 0 or less are ignored */ 821 if (runq_fuzz > 1) { 822 /* 823 * In the first couple of entries, check if 824 * there is one for our CPU as a preference. 825 */ 826 int count = runq_fuzz; 827 int cpu = PCPU_GET(cpuid); 828 struct kse *ke2; 829 ke2 = ke = TAILQ_FIRST(rqh); 830 831 while (count-- && ke2) { 832 if (ke->ke_thread->td_lastcpu == cpu) { 833 ke = ke2; 834 break; 835 } 836 ke2 = TAILQ_NEXT(ke2, ke_procq); 837 } 838 } else 839 #endif 840 ke = TAILQ_FIRST(rqh); 841 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue")); 842 CTR3(KTR_RUNQ, 843 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh); 844 return (ke); 845 } 846 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri); 847 848 return (NULL); 849 } 850 851 /* 852 * Remove the KSE from the queue specified by its priority, and clear the 853 * corresponding status bit if the queue becomes empty. 854 * Caller must set ke->ke_state afterwards. 855 */ 856 void 857 runq_remove(struct runq *rq, struct kse *ke) 858 { 859 struct rqhead *rqh; 860 int pri; 861 862 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 863 ("runq_remove: process swapped out")); 864 pri = ke->ke_rqindex; 865 rqh = &rq->rq_queues[pri]; 866 CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p", 867 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); 868 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue")); 869 TAILQ_REMOVE(rqh, ke, ke_procq); 870 if (TAILQ_EMPTY(rqh)) { 871 CTR0(KTR_RUNQ, "runq_remove: empty"); 872 runq_clrbit(rq, pri); 873 } 874 } 875 876 /****** functions that are temporarily here ***********/ 877 #include <vm/uma.h> 878 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 879 extern struct mtx kse_zombie_lock; 880 881 /* 882 * Allocate scheduler specific per-process resources. 883 * The thread and ksegrp have already been linked in. 884 * In this case just set the default concurrency value. 885 * 886 * Called from: 887 * proc_init() (UMA init method) 888 */ 889 void 890 sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td) 891 { 892 893 /* This can go in sched_fork */ 894 sched_init_concurrency(kg); 895 } 896 897 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 898 /* 899 * thread is being either created or recycled. 900 * Fix up the per-scheduler resources associated with it. 901 * Called from: 902 * sched_fork_thread() 903 * thread_dtor() (*may go away) 904 * thread_init() (*may go away) 905 */ 906 void 907 sched_newthread(struct thread *td) 908 { 909 struct td_sched *ke; 910 911 ke = (struct td_sched *) (td + 1); 912 bzero(ke, sizeof(*ke)); 913 td->td_sched = ke; 914 ke->ke_thread = td; 915 ke->ke_oncpu = NOCPU; 916 ke->ke_state = KES_THREAD; 917 } 918 919 /* 920 * Set up an initial concurrency of 1 921 * and set the given thread (if given) to be using that 922 * concurrency slot. 923 * May be used "offline"..before the ksegrp is attached to the world 924 * and thus wouldn't need schedlock in that case. 925 * Called from: 926 * thr_create() 927 * proc_init() (UMA) via sched_newproc() 928 */ 929 void 930 sched_init_concurrency(struct ksegrp *kg) 931 { 932 933 CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg); 934 kg->kg_concurrency = 1; 935 kg->kg_avail_opennings = 1; 936 } 937 938 /* 939 * Change the concurrency of an existing ksegrp to N 940 * Called from: 941 * kse_create() 942 * kse_exit() 943 * thread_exit() 944 * thread_single() 945 */ 946 void 947 sched_set_concurrency(struct ksegrp *kg, int concurrency) 948 { 949 950 CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d", 951 kg, 952 concurrency, 953 kg->kg_avail_opennings, 954 kg->kg_avail_opennings + (concurrency - kg->kg_concurrency)); 955 kg->kg_avail_opennings += (concurrency - kg->kg_concurrency); 956 kg->kg_concurrency = concurrency; 957 } 958 959 /* 960 * Called from thread_exit() for all exiting thread 961 * 962 * Not to be confused with sched_exit_thread() 963 * that is only called from thread_exit() for threads exiting 964 * without the rest of the process exiting because it is also called from 965 * sched_exit() and we wouldn't want to call it twice. 966 * XXX This can probably be fixed. 967 */ 968 void 969 sched_thread_exit(struct thread *td) 970 { 971 972 SLOT_RELEASE(td->td_ksegrp); 973 slot_fill(td->td_ksegrp); 974 } 975 976 #endif /* KERN_SWITCH_INCLUDE */ 977