1 /* 2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /*** 30 31 Here is the logic.. 32 33 If there are N processors, then there are at most N KSEs (kernel 34 schedulable entities) working to process threads that belong to a 35 KSEGOUP (kg). If there are X of these KSEs actually running at the 36 moment in question, then there are at most M (N-X) of these KSEs on 37 the run queue, as running KSEs are not on the queue. 38 39 Runnable threads are queued off the KSEGROUP in priority order. 40 If there are M or more threads runnable, the top M threads 41 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take 42 their priority from those threads and are put on the run queue. 43 44 The last thread that had a priority high enough to have a KSE associated 45 with it, AND IS ON THE RUN QUEUE is pointed to by 46 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs 47 assigned as all the available KSEs are activly running, or because there 48 are no threads queued, that pointer is NULL. 49 50 When a KSE is removed from the run queue to become runnable, we know 51 it was associated with the highest priority thread in the queue (at the head 52 of the queue). If it is also the last assigned we know M was 1 and must 53 now be 0. Since the thread is no longer queued that pointer must be 54 removed from it. Since we know there were no more KSEs available, 55 (M was 1 and is now 0) and since we are not FREEING our KSE 56 but using it, we know there are STILL no more KSEs available, we can prove 57 that the next thread in the ksegrp list will not have a KSE to assign to 58 it, so we can show that the pointer must be made 'invalid' (NULL). 59 60 The pointer exists so that when a new thread is made runnable, it can 61 have its priority compared with the last assigned thread to see if 62 it should 'steal' its KSE or not.. i.e. is it 'earlier' 63 on the list than that thread or later.. If it's earlier, then the KSE is 64 removed from the last assigned (which is now not assigned a KSE) 65 and reassigned to the new thread, which is placed earlier in the list. 66 The pointer is then backed up to the previous thread (which may or may not 67 be the new thread). 68 69 When a thread sleeps or is removed, the KSE becomes available and if there 70 are queued threads that are not assigned KSEs, the highest priority one of 71 them is assigned the KSE, which is then placed back on the run queue at 72 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down 73 to point to it. 74 75 The following diagram shows 2 KSEs and 3 threads from a single process. 76 77 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads) 78 \ \____ 79 \ \ 80 KSEGROUP---thread--thread--thread (queued in priority order) 81 \ / 82 \_______________/ 83 (last_assigned) 84 85 The result of this scheme is that the M available KSEs are always 86 queued at the priorities they have inherrited from the M highest priority 87 threads for that KSEGROUP. If this situation changes, the KSEs are 88 reassigned to keep this true. 89 90 */ 91 92 #include <sys/param.h> 93 #include <sys/systm.h> 94 #include <sys/kernel.h> 95 #include <sys/ktr.h> 96 #include <sys/lock.h> 97 #include <sys/mutex.h> 98 #include <sys/proc.h> 99 #include <sys/queue.h> 100 #include <machine/critical.h> 101 102 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); 103 104 /* 105 * Global run queue. 106 */ 107 static struct runq runq; 108 SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq) 109 110 static void runq_readjust(struct runq *rq, struct kse *ke); 111 /************************************************************************ 112 * Functions that manipulate runnability from a thread perspective. * 113 ************************************************************************/ 114 115 /* 116 * Select the KSE that will be run next. From that find the thread, and x 117 * remove it from the KSEGRP's run queue. If there is thread clustering, 118 * this will be what does it. 119 */ 120 struct thread * 121 choosethread(void) 122 { 123 struct kse *ke; 124 struct thread *td; 125 struct ksegrp *kg; 126 127 retry: 128 if ((ke = runq_choose(&runq))) { 129 td = ke->ke_thread; 130 KASSERT((td->td_kse == ke), ("kse/thread mismatch")); 131 kg = ke->ke_ksegrp; 132 if (td->td_flags & TDF_UNBOUND) { 133 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 134 if (kg->kg_last_assigned == td) { 135 if (TAILQ_PREV(td, threadqueue, td_runq) 136 != NULL) 137 printf("Yo MAMA!\n"); 138 kg->kg_last_assigned = TAILQ_PREV(td, 139 threadqueue, td_runq); 140 } 141 /* 142 * If we have started running an upcall, 143 * Then TDF_UNBOUND WAS set because the thread was 144 * created without a KSE. Now that we have one, 145 * and it is our time to run, we make sure 146 * that BOUND semantics apply for the rest of 147 * the journey to userland, and into the UTS. 148 */ 149 #ifdef NOTYET 150 if (td->td_flags & TDF_UPCALLING) 151 tdf->td_flags &= ~TDF_UNBOUND; 152 #endif 153 } 154 kg->kg_runnable--; 155 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d", 156 td, td->td_priority); 157 } else { 158 /* Simulate runq_choose() having returned the idle thread */ 159 td = PCPU_GET(idlethread); 160 ke = td->td_kse; 161 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 162 } 163 ke->ke_flags |= KEF_DIDRUN; 164 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 && 165 (td->td_flags & TDF_INPANIC) == 0)) 166 goto retry; 167 TD_SET_RUNNING(td); 168 return (td); 169 } 170 171 /* 172 * Given a KSE (now surplus), either assign a new runable thread to it 173 * (and put it in the run queue) or put it in the ksegrp's idle KSE list. 174 * Assumes the kse is not linked to any threads any more. (has been cleaned). 175 */ 176 void 177 kse_reassign(struct kse *ke) 178 { 179 struct ksegrp *kg; 180 struct thread *td; 181 182 mtx_assert(&sched_lock, MA_OWNED); 183 kg = ke->ke_ksegrp; 184 185 /* 186 * Find the first unassigned thread 187 * If there is a 'last assigned' then see what's next. 188 * otherwise look at what is first. 189 */ 190 if ((td = kg->kg_last_assigned)) { 191 td = TAILQ_NEXT(td, td_runq); 192 } else { 193 td = TAILQ_FIRST(&kg->kg_runq); 194 } 195 196 /* 197 * If we found one assign it the kse, otherwise idle the kse. 198 */ 199 if (td) { 200 kg->kg_last_assigned = td; 201 td->td_kse = ke; 202 ke->ke_thread = td; 203 runq_add(&runq, ke); 204 CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td); 205 } else { 206 ke->ke_state = KES_IDLE; 207 ke->ke_thread = NULL; 208 TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist); 209 kg->kg_idle_kses++; 210 CTR1(KTR_RUNQ, "kse_reassign: ke%p idled", ke); 211 } 212 } 213 214 int 215 kserunnable(void) 216 { 217 return runq_check(&runq); 218 } 219 220 /* 221 * Remove a thread from its KSEGRP's run queue. 222 * This in turn may remove it from a KSE if it was already assigned 223 * to one, possibly causing a new thread to be assigned to the KSE 224 * and the KSE getting a new priority (unless it's a BOUND thread/KSE pair). 225 */ 226 void 227 remrunqueue(struct thread *td) 228 { 229 struct thread *td2, *td3; 230 struct ksegrp *kg; 231 struct kse *ke; 232 233 mtx_assert(&sched_lock, MA_OWNED); 234 KASSERT ((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue")); 235 kg = td->td_ksegrp; 236 ke = td->td_kse; 237 /* 238 * If it's a bound thread/KSE pair, take the shortcut. All non-KSE 239 * threads are BOUND. 240 */ 241 CTR1(KTR_RUNQ, "remrunqueue: td%p", td); 242 kg->kg_runnable--; 243 TD_SET_CAN_RUN(td); 244 if ((td->td_flags & TDF_UNBOUND) == 0) { 245 /* Bring its kse with it, leave the thread attached */ 246 runq_remove(&runq, ke); 247 ke->ke_state = KES_THREAD; 248 return; 249 } 250 if (ke) { 251 /* 252 * This thread has been assigned to a KSE. 253 * We need to dissociate it and try assign the 254 * KSE to the next available thread. Then, we should 255 * see if we need to move the KSE in the run queues. 256 */ 257 td2 = kg->kg_last_assigned; 258 KASSERT((td2 != NULL), ("last assigned has wrong value ")); 259 td->td_kse = NULL; 260 if ((td3 = TAILQ_NEXT(td2, td_runq))) { 261 KASSERT(td3 != td, ("td3 somehow matched td")); 262 /* 263 * Give the next unassigned thread to the KSE 264 * so the number of runnable KSEs remains 265 * constant. 266 */ 267 td3->td_kse = ke; 268 ke->ke_thread = td3; 269 kg->kg_last_assigned = td3; 270 runq_readjust(&runq, ke); 271 } else { 272 /* 273 * There is no unassigned thread. 274 * If we were the last assigned one, 275 * adjust the last assigned pointer back 276 * one, which may result in NULL. 277 */ 278 if (td == td2) { 279 kg->kg_last_assigned = 280 TAILQ_PREV(td, threadqueue, td_runq); 281 } 282 runq_remove(&runq, ke); 283 KASSERT((ke->ke_state != KES_IDLE), 284 ("kse already idle")); 285 ke->ke_state = KES_IDLE; 286 ke->ke_thread = NULL; 287 TAILQ_INSERT_HEAD(&kg->kg_iq, ke, ke_kgrlist); 288 kg->kg_idle_kses++; 289 } 290 } 291 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 292 } 293 294 void 295 setrunqueue(struct thread *td) 296 { 297 struct kse *ke; 298 struct ksegrp *kg; 299 struct thread *td2; 300 struct thread *tda; 301 302 CTR1(KTR_RUNQ, "setrunqueue: td%p", td); 303 mtx_assert(&sched_lock, MA_OWNED); 304 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 305 ("setrunqueue: bad thread state")); 306 TD_SET_RUNQ(td); 307 kg = td->td_ksegrp; 308 kg->kg_runnable++; 309 if ((td->td_flags & TDF_UNBOUND) == 0) { 310 KASSERT((td->td_kse != NULL), 311 ("queueing BAD thread to run queue")); 312 /* 313 * Common path optimisation: Only one of everything 314 * and the KSE is always already attached. 315 * Totally ignore the ksegrp run queue. 316 */ 317 runq_add(&runq, td->td_kse); 318 return; 319 } 320 /* 321 * Ok, so we are threading with this thread. 322 * We don't have a KSE, see if we can get one.. 323 */ 324 tda = kg->kg_last_assigned; 325 if ((ke = td->td_kse) == NULL) { 326 /* 327 * We will need a KSE, see if there is one.. 328 * First look for a free one, before getting desperate. 329 * If we can't get one, our priority is not high enough.. 330 * that's ok.. 331 */ 332 if (kg->kg_idle_kses) { 333 /* 334 * There is a free one so it's ours for the asking.. 335 */ 336 ke = TAILQ_FIRST(&kg->kg_iq); 337 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 338 ke->ke_state = KES_THREAD; 339 kg->kg_idle_kses--; 340 } else if (tda && (tda->td_priority > td->td_priority)) { 341 /* 342 * None free, but there is one we can commandeer. 343 */ 344 ke = tda->td_kse; 345 tda->td_kse = NULL; 346 ke->ke_thread = NULL; 347 tda = kg->kg_last_assigned = 348 TAILQ_PREV(tda, threadqueue, td_runq); 349 runq_remove(&runq, ke); 350 } 351 } else { 352 /* 353 * Temporarily disassociate so it looks like the other cases. 354 */ 355 ke->ke_thread = NULL; 356 td->td_kse = NULL; 357 } 358 359 /* 360 * Add the thread to the ksegrp's run queue at 361 * the appropriate place. 362 */ 363 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { 364 if (td2->td_priority > td->td_priority) { 365 TAILQ_INSERT_BEFORE(td2, td, td_runq); 366 break; 367 } 368 } 369 if (td2 == NULL) { 370 /* We ran off the end of the TAILQ or it was empty. */ 371 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq); 372 } 373 374 /* 375 * If we have a ke to use, then put it on the run queue and 376 * If needed, readjust the last_assigned pointer. 377 */ 378 if (ke) { 379 if (tda == NULL) { 380 /* 381 * No pre-existing last assigned so whoever is first 382 * gets the KSE we brought in.. (maybe us) 383 */ 384 td2 = TAILQ_FIRST(&kg->kg_runq); 385 KASSERT((td2->td_kse == NULL), 386 ("unexpected ke present")); 387 td2->td_kse = ke; 388 ke->ke_thread = td2; 389 kg->kg_last_assigned = td2; 390 } else if (tda->td_priority > td->td_priority) { 391 /* 392 * It's ours, grab it, but last_assigned is past us 393 * so don't change it. 394 */ 395 td->td_kse = ke; 396 ke->ke_thread = td; 397 } else { 398 /* 399 * We are past last_assigned, so 400 * put the new kse on whatever is next, 401 * which may or may not be us. 402 */ 403 td2 = TAILQ_NEXT(tda, td_runq); 404 kg->kg_last_assigned = td2; 405 td2->td_kse = ke; 406 ke->ke_thread = td2; 407 } 408 runq_add(&runq, ke); 409 } 410 } 411 412 /************************************************************************ 413 * Critical section marker functions * 414 ************************************************************************/ 415 /* Critical sections that prevent preemption. */ 416 void 417 critical_enter(void) 418 { 419 struct thread *td; 420 421 td = curthread; 422 if (td->td_critnest == 0) 423 cpu_critical_enter(); 424 td->td_critnest++; 425 } 426 427 void 428 critical_exit(void) 429 { 430 struct thread *td; 431 432 td = curthread; 433 if (td->td_critnest == 1) { 434 td->td_critnest = 0; 435 cpu_critical_exit(); 436 } else { 437 td->td_critnest--; 438 } 439 } 440 441 442 /************************************************************************ 443 * SYSTEM RUN QUEUE manipulations and tests * 444 ************************************************************************/ 445 /* 446 * Initialize a run structure. 447 */ 448 void 449 runq_init(struct runq *rq) 450 { 451 int i; 452 453 bzero(rq, sizeof *rq); 454 for (i = 0; i < RQ_NQS; i++) 455 TAILQ_INIT(&rq->rq_queues[i]); 456 } 457 458 /* 459 * Clear the status bit of the queue corresponding to priority level pri, 460 * indicating that it is empty. 461 */ 462 static __inline void 463 runq_clrbit(struct runq *rq, int pri) 464 { 465 struct rqbits *rqb; 466 467 rqb = &rq->rq_status; 468 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d", 469 rqb->rqb_bits[RQB_WORD(pri)], 470 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri), 471 RQB_BIT(pri), RQB_WORD(pri)); 472 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri); 473 } 474 475 /* 476 * Find the index of the first non-empty run queue. This is done by 477 * scanning the status bits, a set bit indicates a non-empty queue. 478 */ 479 static __inline int 480 runq_findbit(struct runq *rq) 481 { 482 struct rqbits *rqb; 483 int pri; 484 int i; 485 486 rqb = &rq->rq_status; 487 for (i = 0; i < RQB_LEN; i++) 488 if (rqb->rqb_bits[i]) { 489 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW); 490 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d", 491 rqb->rqb_bits[i], i, pri); 492 return (pri); 493 } 494 495 return (-1); 496 } 497 498 /* 499 * Set the status bit of the queue corresponding to priority level pri, 500 * indicating that it is non-empty. 501 */ 502 static __inline void 503 runq_setbit(struct runq *rq, int pri) 504 { 505 struct rqbits *rqb; 506 507 rqb = &rq->rq_status; 508 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d", 509 rqb->rqb_bits[RQB_WORD(pri)], 510 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri), 511 RQB_BIT(pri), RQB_WORD(pri)); 512 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri); 513 } 514 515 /* 516 * Add the KSE to the queue specified by its priority, and set the 517 * corresponding status bit. 518 */ 519 void 520 runq_add(struct runq *rq, struct kse *ke) 521 { 522 struct rqhead *rqh; 523 int pri; 524 525 mtx_assert(&sched_lock, MA_OWNED); 526 KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE")); 527 KASSERT((ke->ke_thread->td_kse != NULL), 528 ("runq_add: No KSE on thread")); 529 KASSERT(ke->ke_state != KES_ONRUNQ, 530 ("runq_add: kse %p (%s) already in run queue", ke, 531 ke->ke_proc->p_comm)); 532 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 533 ("runq_add: process swapped out")); 534 pri = ke->ke_thread->td_priority / RQ_PPQ; 535 ke->ke_rqindex = pri; 536 runq_setbit(rq, pri); 537 rqh = &rq->rq_queues[pri]; 538 CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p", 539 ke->ke_proc, ke->ke_thread->td_priority, pri, rqh); 540 TAILQ_INSERT_TAIL(rqh, ke, ke_procq); 541 ke->ke_ksegrp->kg_runq_kses++; 542 ke->ke_state = KES_ONRUNQ; 543 } 544 545 /* 546 * Return true if there are runnable processes of any priority on the run 547 * queue, false otherwise. Has no side effects, does not modify the run 548 * queue structure. 549 */ 550 int 551 runq_check(struct runq *rq) 552 { 553 struct rqbits *rqb; 554 int i; 555 556 rqb = &rq->rq_status; 557 for (i = 0; i < RQB_LEN; i++) 558 if (rqb->rqb_bits[i]) { 559 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d", 560 rqb->rqb_bits[i], i); 561 return (1); 562 } 563 CTR0(KTR_RUNQ, "runq_check: empty"); 564 565 return (0); 566 } 567 568 /* 569 * Find and remove the highest priority process from the run queue. 570 * If there are no runnable processes, the per-cpu idle process is 571 * returned. Will not return NULL under any circumstances. 572 */ 573 struct kse * 574 runq_choose(struct runq *rq) 575 { 576 struct rqhead *rqh; 577 struct kse *ke; 578 int pri; 579 580 mtx_assert(&sched_lock, MA_OWNED); 581 while ((pri = runq_findbit(rq)) != -1) { 582 rqh = &rq->rq_queues[pri]; 583 ke = TAILQ_FIRST(rqh); 584 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue")); 585 CTR3(KTR_RUNQ, 586 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh); 587 TAILQ_REMOVE(rqh, ke, ke_procq); 588 ke->ke_ksegrp->kg_runq_kses--; 589 if (TAILQ_EMPTY(rqh)) { 590 CTR0(KTR_RUNQ, "runq_choose: empty"); 591 runq_clrbit(rq, pri); 592 } 593 594 ke->ke_state = KES_THREAD; 595 KASSERT((ke->ke_thread != NULL), 596 ("runq_choose: No thread on KSE")); 597 KASSERT((ke->ke_thread->td_kse != NULL), 598 ("runq_choose: No KSE on thread")); 599 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 600 ("runq_choose: process swapped out")); 601 return (ke); 602 } 603 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri); 604 605 return (NULL); 606 } 607 608 /* 609 * Remove the KSE from the queue specified by its priority, and clear the 610 * corresponding status bit if the queue becomes empty. 611 * Caller must set ke->ke_state afterwards. 612 */ 613 void 614 runq_remove(struct runq *rq, struct kse *ke) 615 { 616 struct rqhead *rqh; 617 int pri; 618 619 KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 620 mtx_assert(&sched_lock, MA_OWNED); 621 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 622 ("runq_remove: process swapped out")); 623 pri = ke->ke_rqindex; 624 rqh = &rq->rq_queues[pri]; 625 CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p", 626 ke, ke->ke_thread->td_priority, pri, rqh); 627 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue")); 628 TAILQ_REMOVE(rqh, ke, ke_procq); 629 if (TAILQ_EMPTY(rqh)) { 630 CTR0(KTR_RUNQ, "runq_remove: empty"); 631 runq_clrbit(rq, pri); 632 } 633 ke->ke_state = KES_THREAD; 634 ke->ke_ksegrp->kg_runq_kses--; 635 } 636 637 static void 638 runq_readjust(struct runq *rq, struct kse *ke) 639 { 640 641 if (ke->ke_rqindex != (ke->ke_thread->td_priority / RQ_PPQ)) { 642 runq_remove(rq, ke); 643 runq_add(rq, ke); 644 } 645 } 646 647 #if 0 648 void 649 thread_sanity_check(struct thread *td) 650 { 651 struct proc *p; 652 struct ksegrp *kg; 653 struct kse *ke; 654 struct thread *td2; 655 unsigned int prevpri; 656 int saw_lastassigned; 657 int unassigned; 658 int assigned; 659 660 p = td->td_proc; 661 kg = td->td_ksegrp; 662 ke = td->td_kse; 663 664 665 if (ke) { 666 if (p != ke->ke_proc) { 667 panic("wrong proc"); 668 } 669 if (ke->ke_thread != td) { 670 panic("wrong thread"); 671 } 672 } 673 674 if ((p->p_flag & P_KSES) == 0) { 675 if (ke == NULL) { 676 panic("non KSE thread lost kse"); 677 } 678 } else { 679 prevpri = 0; 680 saw_lastassigned = 0; 681 unassigned = 0; 682 assigned = 0; 683 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { 684 if (td2->td_priority < prevpri) { 685 panic("thread runqueue unosorted"); 686 } 687 prevpri = td2->td_priority; 688 if (td2->td_kse) { 689 assigned++; 690 if (unassigned) { 691 panic("unassigned before assigned"); 692 } 693 if (kg->kg_last_assigned == NULL) { 694 panic("lastassigned corrupt"); 695 } 696 if (saw_lastassigned) { 697 panic("last assigned not last"); 698 } 699 if (td2->td_kse->ke_thread != td2) { 700 panic("mismatched kse/thread"); 701 } 702 } else { 703 unassigned++; 704 } 705 if (td2 == kg->kg_last_assigned) { 706 saw_lastassigned = 1; 707 if (td2->td_kse == NULL) { 708 panic("last assigned not assigned"); 709 } 710 } 711 } 712 if (kg->kg_last_assigned && (saw_lastassigned == 0)) { 713 panic("where on earth does lastassigned point?"); 714 } 715 FOREACH_THREAD_IN_GROUP(kg, td2) { 716 if (((td2->td_flags & TDF_UNBOUND) == 0) && 717 (TD_ON_RUNQ(td2))) { 718 assigned++; 719 if (td2->td_kse == NULL) { 720 panic ("BOUND thread with no KSE"); 721 } 722 } 723 } 724 #if 0 725 if ((unassigned + assigned) != kg->kg_runnable) { 726 panic("wrong number in runnable"); 727 } 728 #endif 729 } 730 } 731 #endif 732 733