1 /* 2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /*** 28 Here is the logic.. 29 30 If there are N processors, then there are at most N KSEs (kernel 31 schedulable entities) working to process threads that belong to a 32 KSEGROUP (kg). If there are X of these KSEs actually running at the 33 moment in question, then there are at most M (N-X) of these KSEs on 34 the run queue, as running KSEs are not on the queue. 35 36 Runnable threads are queued off the KSEGROUP in priority order. 37 If there are M or more threads runnable, the top M threads 38 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take 39 their priority from those threads and are put on the run queue. 40 41 The last thread that had a priority high enough to have a KSE associated 42 with it, AND IS ON THE RUN QUEUE is pointed to by 43 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs 44 assigned as all the available KSEs are activly running, or because there 45 are no threads queued, that pointer is NULL. 46 47 When a KSE is removed from the run queue to become runnable, we know 48 it was associated with the highest priority thread in the queue (at the head 49 of the queue). If it is also the last assigned we know M was 1 and must 50 now be 0. Since the thread is no longer queued that pointer must be 51 removed from it. Since we know there were no more KSEs available, 52 (M was 1 and is now 0) and since we are not FREEING our KSE 53 but using it, we know there are STILL no more KSEs available, we can prove 54 that the next thread in the ksegrp list will not have a KSE to assign to 55 it, so we can show that the pointer must be made 'invalid' (NULL). 56 57 The pointer exists so that when a new thread is made runnable, it can 58 have its priority compared with the last assigned thread to see if 59 it should 'steal' its KSE or not.. i.e. is it 'earlier' 60 on the list than that thread or later.. If it's earlier, then the KSE is 61 removed from the last assigned (which is now not assigned a KSE) 62 and reassigned to the new thread, which is placed earlier in the list. 63 The pointer is then backed up to the previous thread (which may or may not 64 be the new thread). 65 66 When a thread sleeps or is removed, the KSE becomes available and if there 67 are queued threads that are not assigned KSEs, the highest priority one of 68 them is assigned the KSE, which is then placed back on the run queue at 69 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down 70 to point to it. 71 72 The following diagram shows 2 KSEs and 3 threads from a single process. 73 74 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads) 75 \ \____ 76 \ \ 77 KSEGROUP---thread--thread--thread (queued in priority order) 78 \ / 79 \_______________/ 80 (last_assigned) 81 82 The result of this scheme is that the M available KSEs are always 83 queued at the priorities they have inherrited from the M highest priority 84 threads for that KSEGROUP. If this situation changes, the KSEs are 85 reassigned to keep this true. 86 ***/ 87 88 #include <sys/cdefs.h> 89 __FBSDID("$FreeBSD$"); 90 91 #include "opt_sched.h" 92 93 #ifndef KERN_SWITCH_INCLUDE 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/kdb.h> 97 #include <sys/kernel.h> 98 #include <sys/ktr.h> 99 #include <sys/lock.h> 100 #include <sys/mutex.h> 101 #include <sys/proc.h> 102 #include <sys/queue.h> 103 #include <sys/sched.h> 104 #else /* KERN_SWITCH_INCLUDE */ 105 #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 106 #include <sys/smp.h> 107 #endif 108 #include <machine/critical.h> 109 #if defined(SMP) && defined(SCHED_4BSD) 110 #include <sys/sysctl.h> 111 #endif 112 113 #ifdef FULL_PREEMPTION 114 #ifndef PREEMPTION 115 #error "The FULL_PREEMPTION option requires the PREEMPTION option" 116 #endif 117 #endif 118 119 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); 120 121 #define td_kse td_sched 122 123 /************************************************************************ 124 * Functions that manipulate runnability from a thread perspective. * 125 ************************************************************************/ 126 /* 127 * Select the KSE that will be run next. From that find the thread, and 128 * remove it from the KSEGRP's run queue. If there is thread clustering, 129 * this will be what does it. 130 */ 131 struct thread * 132 choosethread(void) 133 { 134 struct kse *ke; 135 struct thread *td; 136 struct ksegrp *kg; 137 138 #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 139 if (smp_active == 0 && PCPU_GET(cpuid) != 0) { 140 /* Shutting down, run idlethread on AP's */ 141 td = PCPU_GET(idlethread); 142 ke = td->td_kse; 143 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 144 ke->ke_flags |= KEF_DIDRUN; 145 TD_SET_RUNNING(td); 146 return (td); 147 } 148 #endif 149 150 retry: 151 ke = sched_choose(); 152 if (ke) { 153 td = ke->ke_thread; 154 KASSERT((td->td_kse == ke), ("kse/thread mismatch")); 155 kg = ke->ke_ksegrp; 156 if (td->td_proc->p_flag & P_HADTHREADS) { 157 if (kg->kg_last_assigned == td) { 158 kg->kg_last_assigned = TAILQ_PREV(td, 159 threadqueue, td_runq); 160 } 161 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 162 kg->kg_runnable--; 163 } 164 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d", 165 td, td->td_priority); 166 } else { 167 /* Simulate runq_choose() having returned the idle thread */ 168 td = PCPU_GET(idlethread); 169 ke = td->td_kse; 170 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 171 } 172 ke->ke_flags |= KEF_DIDRUN; 173 174 /* 175 * If we are in panic, only allow system threads, 176 * plus the one we are running in, to be run. 177 */ 178 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 && 179 (td->td_flags & TDF_INPANIC) == 0)) { 180 /* note that it is no longer on the run queue */ 181 TD_SET_CAN_RUN(td); 182 goto retry; 183 } 184 185 TD_SET_RUNNING(td); 186 return (td); 187 } 188 189 /* 190 * Given a surplus system slot, try assign a new runnable thread to it. 191 * Called from: 192 * sched_thread_exit() (local) 193 * sched_switch() (local) 194 * sched_thread_exit() (local) 195 * remrunqueue() (local) 196 */ 197 static void 198 slot_fill(struct ksegrp *kg) 199 { 200 struct thread *td; 201 202 mtx_assert(&sched_lock, MA_OWNED); 203 while (kg->kg_avail_opennings > 0) { 204 /* 205 * Find the first unassigned thread 206 */ 207 if ((td = kg->kg_last_assigned) != NULL) 208 td = TAILQ_NEXT(td, td_runq); 209 else 210 td = TAILQ_FIRST(&kg->kg_runq); 211 212 /* 213 * If we found one, send it to the system scheduler. 214 */ 215 if (td) { 216 kg->kg_last_assigned = td; 217 kg->kg_avail_opennings--; 218 sched_add(td, SRQ_BORING); 219 CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg); 220 } else { 221 /* no threads to use up the slots. quit now */ 222 break; 223 } 224 } 225 } 226 227 #ifdef SCHED_4BSD 228 /* 229 * Remove a thread from its KSEGRP's run queue. 230 * This in turn may remove it from a KSE if it was already assigned 231 * to one, possibly causing a new thread to be assigned to the KSE 232 * and the KSE getting a new priority. 233 */ 234 static void 235 remrunqueue(struct thread *td) 236 { 237 struct thread *td2, *td3; 238 struct ksegrp *kg; 239 struct kse *ke; 240 241 mtx_assert(&sched_lock, MA_OWNED); 242 KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue")); 243 kg = td->td_ksegrp; 244 ke = td->td_kse; 245 CTR1(KTR_RUNQ, "remrunqueue: td%p", td); 246 TD_SET_CAN_RUN(td); 247 /* 248 * If it is not a threaded process, take the shortcut. 249 */ 250 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 251 /* remve from sys run queue and free up a slot */ 252 sched_rem(td); 253 kg->kg_avail_opennings++; 254 ke->ke_state = KES_THREAD; 255 return; 256 } 257 td3 = TAILQ_PREV(td, threadqueue, td_runq); 258 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 259 kg->kg_runnable--; 260 if (ke->ke_state == KES_ONRUNQ) { 261 /* 262 * This thread has been assigned to the system run queue. 263 * We need to dissociate it and try assign the 264 * KSE to the next available thread. Then, we should 265 * see if we need to move the KSE in the run queues. 266 */ 267 sched_rem(td); 268 kg->kg_avail_opennings++; 269 ke->ke_state = KES_THREAD; 270 td2 = kg->kg_last_assigned; 271 KASSERT((td2 != NULL), ("last assigned has wrong value")); 272 if (td2 == td) 273 kg->kg_last_assigned = td3; 274 /* slot_fill(kg); */ /* will replace it with another */ 275 } 276 } 277 #endif 278 279 /* 280 * Change the priority of a thread that is on the run queue. 281 */ 282 void 283 adjustrunqueue( struct thread *td, int newpri) 284 { 285 struct ksegrp *kg; 286 struct kse *ke; 287 288 mtx_assert(&sched_lock, MA_OWNED); 289 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue")); 290 291 ke = td->td_kse; 292 CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td); 293 /* 294 * If it is not a threaded process, take the shortcut. 295 */ 296 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 297 /* We only care about the kse in the run queue. */ 298 td->td_priority = newpri; 299 if (ke->ke_rqindex != (newpri / RQ_PPQ)) { 300 sched_rem(td); 301 sched_add(td, SRQ_BORING); 302 } 303 return; 304 } 305 306 /* It is a threaded process */ 307 kg = td->td_ksegrp; 308 TD_SET_CAN_RUN(td); 309 if (ke->ke_state == KES_ONRUNQ) { 310 if (kg->kg_last_assigned == td) { 311 kg->kg_last_assigned = 312 TAILQ_PREV(td, threadqueue, td_runq); 313 } 314 sched_rem(td); 315 kg->kg_avail_opennings++; 316 } 317 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 318 kg->kg_runnable--; 319 td->td_priority = newpri; 320 setrunqueue(td, SRQ_BORING); 321 } 322 int limitcount; 323 void 324 setrunqueue(struct thread *td, int flags) 325 { 326 struct ksegrp *kg; 327 struct thread *td2; 328 struct thread *tda; 329 int count; 330 331 CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d", 332 td, td->td_ksegrp, td->td_proc->p_pid); 333 mtx_assert(&sched_lock, MA_OWNED); 334 KASSERT((td->td_inhibitors == 0), 335 ("setrunqueue: trying to run inhibitted thread")); 336 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 337 ("setrunqueue: bad thread state")); 338 TD_SET_RUNQ(td); 339 kg = td->td_ksegrp; 340 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 341 /* 342 * Common path optimisation: Only one of everything 343 * and the KSE is always already attached. 344 * Totally ignore the ksegrp run queue. 345 */ 346 if (kg->kg_avail_opennings != 1) { 347 if (limitcount < 1) { 348 limitcount++; 349 printf("pid %d: corrected slot count (%d->1)\n", 350 td->td_proc->p_pid, kg->kg_avail_opennings); 351 352 } 353 kg->kg_avail_opennings = 1; 354 } 355 kg->kg_avail_opennings--; 356 sched_add(td, flags); 357 return; 358 } 359 360 tda = kg->kg_last_assigned; 361 if ((kg->kg_avail_opennings <= 0) && 362 (tda && (tda->td_priority > td->td_priority))) { 363 /* 364 * None free, but there is one we can commandeer. 365 */ 366 CTR2(KTR_RUNQ, 367 "setrunqueue: kg:%p: take slot from td: %p", kg, tda); 368 sched_rem(tda); 369 tda = kg->kg_last_assigned = 370 TAILQ_PREV(tda, threadqueue, td_runq); 371 kg->kg_avail_opennings++; 372 } 373 374 /* 375 * Add the thread to the ksegrp's run queue at 376 * the appropriate place. 377 */ 378 count = 0; 379 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { 380 if (td2->td_priority > td->td_priority) { 381 kg->kg_runnable++; 382 TAILQ_INSERT_BEFORE(td2, td, td_runq); 383 break; 384 } 385 /* XXX Debugging hack */ 386 if (++count > 10000) { 387 printf("setrunqueue(): corrupt kq_runq, td= %p\n", td); 388 panic("deadlock in setrunqueue"); 389 } 390 } 391 if (td2 == NULL) { 392 /* We ran off the end of the TAILQ or it was empty. */ 393 kg->kg_runnable++; 394 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq); 395 } 396 397 /* 398 * If we have a slot to use, then put the thread on the system 399 * run queue and if needed, readjust the last_assigned pointer. 400 */ 401 if (kg->kg_avail_opennings > 0) { 402 if (tda == NULL) { 403 /* 404 * No pre-existing last assigned so whoever is first 405 * gets the KSE we brought in.. (maybe us) 406 */ 407 td2 = TAILQ_FIRST(&kg->kg_runq); 408 kg->kg_last_assigned = td2; 409 } else if (tda->td_priority > td->td_priority) { 410 td2 = td; 411 } else { 412 /* 413 * We are past last_assigned, so 414 * gave the next slot to whatever is next, 415 * which may or may not be us. 416 */ 417 td2 = TAILQ_NEXT(tda, td_runq); 418 kg->kg_last_assigned = td2; 419 } 420 kg->kg_avail_opennings--; 421 sched_add(td2, flags); 422 } else { 423 CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d", 424 td, td->td_ksegrp, td->td_proc->p_pid); 425 } 426 } 427 428 /* 429 * Kernel thread preemption implementation. Critical sections mark 430 * regions of code in which preemptions are not allowed. 431 */ 432 void 433 critical_enter(void) 434 { 435 struct thread *td; 436 437 td = curthread; 438 if (td->td_critnest == 0) 439 cpu_critical_enter(td); 440 td->td_critnest++; 441 } 442 443 void 444 critical_exit(void) 445 { 446 struct thread *td; 447 448 td = curthread; 449 KASSERT(td->td_critnest != 0, 450 ("critical_exit: td_critnest == 0")); 451 if (td->td_critnest == 1) { 452 #ifdef PREEMPTION 453 mtx_assert(&sched_lock, MA_NOTOWNED); 454 if (td->td_pflags & TDP_OWEPREEMPT) { 455 mtx_lock_spin(&sched_lock); 456 mi_switch(SW_INVOL, NULL); 457 mtx_unlock_spin(&sched_lock); 458 } 459 #endif 460 td->td_critnest = 0; 461 cpu_critical_exit(td); 462 } else { 463 td->td_critnest--; 464 } 465 } 466 467 /* 468 * This function is called when a thread is about to be put on run queue 469 * because it has been made runnable or its priority has been adjusted. It 470 * determines if the new thread should be immediately preempted to. If so, 471 * it switches to it and eventually returns true. If not, it returns false 472 * so that the caller may place the thread on an appropriate run queue. 473 */ 474 int 475 maybe_preempt(struct thread *td) 476 { 477 #ifdef PREEMPTION 478 struct thread *ctd; 479 int cpri, pri; 480 #endif 481 482 mtx_assert(&sched_lock, MA_OWNED); 483 #ifdef PREEMPTION 484 /* 485 * The new thread should not preempt the current thread if any of the 486 * following conditions are true: 487 * 488 * - The current thread has a higher (numerically lower) or 489 * equivalent priority. Note that this prevents curthread from 490 * trying to preempt to itself. 491 * - It is too early in the boot for context switches (cold is set). 492 * - The current thread has an inhibitor set or is in the process of 493 * exiting. In this case, the current thread is about to switch 494 * out anyways, so there's no point in preempting. If we did, 495 * the current thread would not be properly resumed as well, so 496 * just avoid that whole landmine. 497 * - If the new thread's priority is not a realtime priority and 498 * the current thread's priority is not an idle priority and 499 * FULL_PREEMPTION is disabled. 500 * 501 * If all of these conditions are false, but the current thread is in 502 * a nested critical section, then we have to defer the preemption 503 * until we exit the critical section. Otherwise, switch immediately 504 * to the new thread. 505 */ 506 ctd = curthread; 507 KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd), 508 ("thread has no (or wrong) sched-private part.")); 509 KASSERT((td->td_inhibitors == 0), 510 ("maybe_preempt: trying to run inhibitted thread")); 511 pri = td->td_priority; 512 cpri = ctd->td_priority; 513 if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) || 514 td->td_kse->ke_state != KES_THREAD) 515 return (0); 516 #ifndef FULL_PREEMPTION 517 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) && 518 !(cpri >= PRI_MIN_IDLE)) 519 return (0); 520 #endif 521 if (ctd->td_critnest > 1) { 522 CTR1(KTR_PROC, "maybe_preempt: in critical section %d", 523 ctd->td_critnest); 524 ctd->td_pflags |= TDP_OWEPREEMPT; 525 return (0); 526 } 527 528 /* 529 * Our thread state says that we are already on a run queue, so 530 * update our state as if we had been dequeued by choosethread(). 531 * However we must not actually be on the system run queue yet. 532 */ 533 MPASS(TD_ON_RUNQ(td)); 534 MPASS(td->td_sched->ke_state != KES_ONRUNQ); 535 if (td->td_proc->p_flag & P_HADTHREADS) { 536 /* 537 * If this is a threaded process we actually ARE on the 538 * ksegrp run queue so take it off that first. 539 */ 540 remrunqueue(td); /* maybe use a simpler version */ 541 } 542 543 TD_SET_RUNNING(td); 544 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td, 545 td->td_proc->p_pid, td->td_proc->p_comm); 546 mi_switch(SW_INVOL, td); 547 return (1); 548 #else 549 return (0); 550 #endif 551 } 552 553 #if 0 554 #ifndef PREEMPTION 555 /* XXX: There should be a non-static version of this. */ 556 static void 557 printf_caddr_t(void *data) 558 { 559 printf("%s", (char *)data); 560 } 561 static char preempt_warning[] = 562 "WARNING: Kernel preemption is disabled, expect reduced performance.\n"; 563 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t, 564 preempt_warning) 565 #endif 566 #endif 567 568 /************************************************************************ 569 * SYSTEM RUN QUEUE manipulations and tests * 570 ************************************************************************/ 571 /* 572 * Initialize a run structure. 573 */ 574 void 575 runq_init(struct runq *rq) 576 { 577 int i; 578 579 bzero(rq, sizeof *rq); 580 for (i = 0; i < RQ_NQS; i++) 581 TAILQ_INIT(&rq->rq_queues[i]); 582 } 583 584 /* 585 * Clear the status bit of the queue corresponding to priority level pri, 586 * indicating that it is empty. 587 */ 588 static __inline void 589 runq_clrbit(struct runq *rq, int pri) 590 { 591 struct rqbits *rqb; 592 593 rqb = &rq->rq_status; 594 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d", 595 rqb->rqb_bits[RQB_WORD(pri)], 596 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri), 597 RQB_BIT(pri), RQB_WORD(pri)); 598 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri); 599 } 600 601 /* 602 * Find the index of the first non-empty run queue. This is done by 603 * scanning the status bits, a set bit indicates a non-empty queue. 604 */ 605 static __inline int 606 runq_findbit(struct runq *rq) 607 { 608 struct rqbits *rqb; 609 int pri; 610 int i; 611 612 rqb = &rq->rq_status; 613 for (i = 0; i < RQB_LEN; i++) 614 if (rqb->rqb_bits[i]) { 615 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW); 616 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d", 617 rqb->rqb_bits[i], i, pri); 618 return (pri); 619 } 620 621 return (-1); 622 } 623 624 /* 625 * Set the status bit of the queue corresponding to priority level pri, 626 * indicating that it is non-empty. 627 */ 628 static __inline void 629 runq_setbit(struct runq *rq, int pri) 630 { 631 struct rqbits *rqb; 632 633 rqb = &rq->rq_status; 634 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d", 635 rqb->rqb_bits[RQB_WORD(pri)], 636 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri), 637 RQB_BIT(pri), RQB_WORD(pri)); 638 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri); 639 } 640 641 /* 642 * Add the KSE to the queue specified by its priority, and set the 643 * corresponding status bit. 644 */ 645 void 646 runq_add(struct runq *rq, struct kse *ke) 647 { 648 struct rqhead *rqh; 649 int pri; 650 651 pri = ke->ke_thread->td_priority / RQ_PPQ; 652 ke->ke_rqindex = pri; 653 runq_setbit(rq, pri); 654 rqh = &rq->rq_queues[pri]; 655 CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p", 656 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); 657 TAILQ_INSERT_TAIL(rqh, ke, ke_procq); 658 } 659 660 /* 661 * Return true if there are runnable processes of any priority on the run 662 * queue, false otherwise. Has no side effects, does not modify the run 663 * queue structure. 664 */ 665 int 666 runq_check(struct runq *rq) 667 { 668 struct rqbits *rqb; 669 int i; 670 671 rqb = &rq->rq_status; 672 for (i = 0; i < RQB_LEN; i++) 673 if (rqb->rqb_bits[i]) { 674 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d", 675 rqb->rqb_bits[i], i); 676 return (1); 677 } 678 CTR0(KTR_RUNQ, "runq_check: empty"); 679 680 return (0); 681 } 682 683 #if defined(SMP) && defined(SCHED_4BSD) 684 int runq_fuzz = 1; 685 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, ""); 686 #endif 687 688 /* 689 * Find the highest priority process on the run queue. 690 */ 691 struct kse * 692 runq_choose(struct runq *rq) 693 { 694 struct rqhead *rqh; 695 struct kse *ke; 696 int pri; 697 698 mtx_assert(&sched_lock, MA_OWNED); 699 while ((pri = runq_findbit(rq)) != -1) { 700 rqh = &rq->rq_queues[pri]; 701 #if defined(SMP) && defined(SCHED_4BSD) 702 /* fuzz == 1 is normal.. 0 or less are ignored */ 703 if (runq_fuzz > 1) { 704 /* 705 * In the first couple of entries, check if 706 * there is one for our CPU as a preference. 707 */ 708 int count = runq_fuzz; 709 int cpu = PCPU_GET(cpuid); 710 struct kse *ke2; 711 ke2 = ke = TAILQ_FIRST(rqh); 712 713 while (count-- && ke2) { 714 if (ke->ke_thread->td_lastcpu == cpu) { 715 ke = ke2; 716 break; 717 } 718 ke2 = TAILQ_NEXT(ke2, ke_procq); 719 } 720 } else 721 #endif 722 ke = TAILQ_FIRST(rqh); 723 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue")); 724 CTR3(KTR_RUNQ, 725 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh); 726 return (ke); 727 } 728 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri); 729 730 return (NULL); 731 } 732 733 /* 734 * Remove the KSE from the queue specified by its priority, and clear the 735 * corresponding status bit if the queue becomes empty. 736 * Caller must set ke->ke_state afterwards. 737 */ 738 void 739 runq_remove(struct runq *rq, struct kse *ke) 740 { 741 struct rqhead *rqh; 742 int pri; 743 744 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 745 ("runq_remove: process swapped out")); 746 pri = ke->ke_rqindex; 747 rqh = &rq->rq_queues[pri]; 748 CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p", 749 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh); 750 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue")); 751 TAILQ_REMOVE(rqh, ke, ke_procq); 752 if (TAILQ_EMPTY(rqh)) { 753 CTR0(KTR_RUNQ, "runq_remove: empty"); 754 runq_clrbit(rq, pri); 755 } 756 } 757 758 /****** functions that are temporarily here ***********/ 759 #include <vm/uma.h> 760 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 761 extern struct mtx kse_zombie_lock; 762 763 /* 764 * Allocate scheduler specific per-process resources. 765 * The thread and ksegrp have already been linked in. 766 * In this case just set the default concurrency value. 767 * 768 * Called from: 769 * proc_init() (UMA init method) 770 */ 771 void 772 sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td) 773 { 774 775 /* This can go in sched_fork */ 776 sched_init_concurrency(kg); 777 } 778 779 /* 780 * Called by the uma process fini routine.. 781 * undo anything we may have done in the uma_init method. 782 * Panic if it's not all 1:1:1:1 783 * Called from: 784 * proc_fini() (UMA method) 785 */ 786 void 787 sched_destroyproc(struct proc *p) 788 { 789 790 /* this function slated for destruction */ 791 KASSERT((p->p_numthreads == 1), ("Cached proc with > 1 thread ")); 792 KASSERT((p->p_numksegrps == 1), ("Cached proc with > 1 ksegrp ")); 793 } 794 795 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 796 /* 797 * thread is being either created or recycled. 798 * Fix up the per-scheduler resources associated with it. 799 * Called from: 800 * sched_fork_thread() 801 * thread_dtor() (*may go away) 802 * thread_init() (*may go away) 803 */ 804 void 805 sched_newthread(struct thread *td) 806 { 807 struct td_sched *ke; 808 809 ke = (struct td_sched *) (td + 1); 810 bzero(ke, sizeof(*ke)); 811 td->td_sched = ke; 812 ke->ke_thread = td; 813 ke->ke_oncpu = NOCPU; 814 ke->ke_state = KES_THREAD; 815 } 816 817 /* 818 * Set up an initial concurrency of 1 819 * and set the given thread (if given) to be using that 820 * concurrency slot. 821 * May be used "offline"..before the ksegrp is attached to the world 822 * and thus wouldn't need schedlock in that case. 823 * Called from: 824 * thr_create() 825 * proc_init() (UMA) via sched_newproc() 826 */ 827 void 828 sched_init_concurrency(struct ksegrp *kg) 829 { 830 831 kg->kg_concurrency = 1; 832 kg->kg_avail_opennings = 1; 833 } 834 835 /* 836 * Change the concurrency of an existing ksegrp to N 837 * Called from: 838 * kse_create() 839 * kse_exit() 840 * thread_exit() 841 * thread_single() 842 */ 843 void 844 sched_set_concurrency(struct ksegrp *kg, int concurrency) 845 { 846 847 /* Handle the case for a declining concurrency */ 848 kg->kg_avail_opennings += (concurrency - kg->kg_concurrency); 849 kg->kg_concurrency = concurrency; 850 } 851 852 /* 853 * Called from thread_exit() for all exiting thread 854 * 855 * Not to be confused with sched_exit_thread() 856 * that is only called from thread_exit() for threads exiting 857 * without the rest of the process exiting because it is also called from 858 * sched_exit() and we wouldn't want to call it twice. 859 * XXX This can probably be fixed. 860 */ 861 void 862 sched_thread_exit(struct thread *td) 863 { 864 865 td->td_ksegrp->kg_avail_opennings++; 866 slot_fill(td->td_ksegrp); 867 } 868 869 #endif /* KERN_SWITCH_INCLUDE */ 870