1 /* 2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /*** 30 31 Here is the logic.. 32 33 If there are N processors, then there are at most N KSEs (kernel 34 schedulable entities) working to process threads that belong to a 35 KSEGOUP (kg). If there are X of these KSEs actually running at the 36 moment in question, then there are at most M (N-X) of these KSEs on 37 the run queue, as running KSEs are not on the queue. 38 39 Runnable threads are queued off the KSEGROUP in priority order. 40 If there are M or more threads runnable, the top M threads 41 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take 42 their priority from those threads and are put on the run queue. 43 44 The last thread that had a priority high enough to have a KSE associated 45 with it, AND IS ON THE RUN QUEUE is pointed to by 46 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs 47 assigned as all the available KSEs are activly running, or because there 48 are no threads queued, that pointer is NULL. 49 50 When a KSE is removed from the run queue to become runnable, we know 51 it was associated with the highest priority thread in the queue (at the head 52 of the queue). If it is also the last assigned we know M was 1 and must 53 now be 0. Since the thread is no longer queued that pointer must be 54 removed from it. Since we know there were no more KSEs available, 55 (M was 1 and is now 0) and since we are not FREEING our KSE 56 but using it, we know there are STILL no more KSEs available, we can prove 57 that the next thread in the ksegrp list will not have a KSE to assign to 58 it, so we can show that the pointer must be made 'invalid' (NULL). 59 60 The pointer exists so that when a new thread is made runnable, it can 61 have its priority compared with the last assigned thread to see if 62 it should 'steal' its KSE or not.. i.e. is it 'earlier' 63 on the list than that thread or later.. If it's earlier, then the KSE is 64 removed from the last assigned (which is now not assigned a KSE) 65 and reassigned to the new thread, which is placed earlier in the list. 66 The pointer is then backed up to the previous thread (which may or may not 67 be the new thread). 68 69 When a thread sleeps or is removed, the KSE becomes available and if there 70 are queued threads that are not assigned KSEs, the highest priority one of 71 them is assigned the KSE, which is then placed back on the run queue at 72 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down 73 to point to it. 74 75 The following diagram shows 2 KSEs and 3 threads from a single process. 76 77 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads) 78 \ \____ 79 \ \ 80 KSEGROUP---thread--thread--thread (queued in priority order) 81 \ / 82 \_______________/ 83 (last_assigned) 84 85 The result of this scheme is that the M available KSEs are always 86 queued at the priorities they have inherrited from the M highest priority 87 threads for that KSEGROUP. If this situation changes, the KSEs are 88 reassigned to keep this true. 89 90 */ 91 92 #include <sys/param.h> 93 #include <sys/systm.h> 94 #include <sys/kernel.h> 95 #include <sys/ktr.h> 96 #include <sys/lock.h> 97 #include <sys/mutex.h> 98 #include <sys/proc.h> 99 #include <sys/queue.h> 100 #include <sys/sched.h> 101 #if defined(SMP) && defined(__i386__) 102 #include <sys/smp.h> 103 #endif 104 #include <machine/critical.h> 105 106 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS); 107 108 void panc(char *string1, char *string2); 109 110 #if 0 111 static void runq_readjust(struct runq *rq, struct kse *ke); 112 #endif 113 /************************************************************************ 114 * Functions that manipulate runnability from a thread perspective. * 115 ************************************************************************/ 116 /* 117 * Select the KSE that will be run next. From that find the thread, and 118 * remove it from the KSEGRP's run queue. If there is thread clustering, 119 * this will be what does it. 120 */ 121 struct thread * 122 choosethread(void) 123 { 124 struct kse *ke; 125 struct thread *td; 126 struct ksegrp *kg; 127 128 #if defined(SMP) && defined(__i386__) 129 if (smp_active == 0 && PCPU_GET(cpuid) != 0) { 130 /* Shutting down, run idlethread on AP's */ 131 td = PCPU_GET(idlethread); 132 ke = td->td_kse; 133 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 134 ke->ke_flags |= KEF_DIDRUN; 135 TD_SET_RUNNING(td); 136 return (td); 137 } 138 #endif 139 140 retry: 141 ke = sched_choose(); 142 if (ke) { 143 td = ke->ke_thread; 144 KASSERT((td->td_kse == ke), ("kse/thread mismatch")); 145 kg = ke->ke_ksegrp; 146 if (td->td_proc->p_flag & P_THREADED) { 147 if (kg->kg_last_assigned == td) { 148 kg->kg_last_assigned = TAILQ_PREV(td, 149 threadqueue, td_runq); 150 } 151 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 152 } 153 kg->kg_runnable--; 154 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d", 155 td, td->td_priority); 156 } else { 157 /* Simulate runq_choose() having returned the idle thread */ 158 td = PCPU_GET(idlethread); 159 ke = td->td_kse; 160 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); 161 } 162 ke->ke_flags |= KEF_DIDRUN; 163 164 /* 165 * Only allow non system threads to run in panic 166 * if they are the one we are tracing. (I think.. [JRE]) 167 */ 168 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 && 169 (td->td_flags & TDF_INPANIC) == 0)) 170 goto retry; 171 172 TD_SET_RUNNING(td); 173 return (td); 174 } 175 176 /* 177 * Given a surplus KSE, either assign a new runable thread to it 178 * (and put it in the run queue) or put it in the ksegrp's idle KSE list. 179 * Assumes that the original thread is not runnable. 180 */ 181 void 182 kse_reassign(struct kse *ke) 183 { 184 struct ksegrp *kg; 185 struct thread *td; 186 struct thread *original; 187 188 mtx_assert(&sched_lock, MA_OWNED); 189 original = ke->ke_thread; 190 KASSERT(original == NULL || TD_IS_INHIBITED(original), 191 ("reassigning KSE with runnable thread")); 192 kg = ke->ke_ksegrp; 193 if (original) 194 original->td_kse = NULL; 195 196 /* 197 * Find the first unassigned thread 198 */ 199 if ((td = kg->kg_last_assigned) != NULL) 200 td = TAILQ_NEXT(td, td_runq); 201 else 202 td = TAILQ_FIRST(&kg->kg_runq); 203 204 /* 205 * If we found one, assign it the kse, otherwise idle the kse. 206 */ 207 if (td) { 208 kg->kg_last_assigned = td; 209 td->td_kse = ke; 210 ke->ke_thread = td; 211 sched_add(ke); 212 CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p", ke, td); 213 return; 214 } 215 216 ke->ke_state = KES_IDLE; 217 ke->ke_thread = NULL; 218 TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist); 219 kg->kg_idle_kses++; 220 CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke); 221 return; 222 } 223 224 #if 0 225 /* 226 * Remove a thread from its KSEGRP's run queue. 227 * This in turn may remove it from a KSE if it was already assigned 228 * to one, possibly causing a new thread to be assigned to the KSE 229 * and the KSE getting a new priority. 230 */ 231 static void 232 remrunqueue(struct thread *td) 233 { 234 struct thread *td2, *td3; 235 struct ksegrp *kg; 236 struct kse *ke; 237 238 mtx_assert(&sched_lock, MA_OWNED); 239 KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue")); 240 kg = td->td_ksegrp; 241 ke = td->td_kse; 242 CTR1(KTR_RUNQ, "remrunqueue: td%p", td); 243 kg->kg_runnable--; 244 TD_SET_CAN_RUN(td); 245 /* 246 * If it is not a threaded process, take the shortcut. 247 */ 248 if ((td->td_proc->p_flag & P_THREADED) == 0) { 249 /* Bring its kse with it, leave the thread attached */ 250 sched_rem(ke); 251 ke->ke_state = KES_THREAD; 252 return; 253 } 254 td3 = TAILQ_PREV(td, threadqueue, td_runq); 255 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 256 if (ke) { 257 /* 258 * This thread has been assigned to a KSE. 259 * We need to dissociate it and try assign the 260 * KSE to the next available thread. Then, we should 261 * see if we need to move the KSE in the run queues. 262 */ 263 sched_rem(ke); 264 ke->ke_state = KES_THREAD; 265 td2 = kg->kg_last_assigned; 266 KASSERT((td2 != NULL), ("last assigned has wrong value")); 267 if (td2 == td) 268 kg->kg_last_assigned = td3; 269 kse_reassign(ke); 270 } 271 } 272 #endif 273 274 /* 275 * Change the priority of a thread that is on the run queue. 276 */ 277 void 278 adjustrunqueue( struct thread *td, int newpri) 279 { 280 struct ksegrp *kg; 281 struct kse *ke; 282 283 mtx_assert(&sched_lock, MA_OWNED); 284 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue")); 285 286 ke = td->td_kse; 287 CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td); 288 /* 289 * If it is not a threaded process, take the shortcut. 290 */ 291 if ((td->td_proc->p_flag & P_THREADED) == 0) { 292 /* We only care about the kse in the run queue. */ 293 td->td_priority = newpri; 294 if (ke->ke_rqindex != (newpri / RQ_PPQ)) { 295 sched_rem(ke); 296 sched_add(ke); 297 } 298 return; 299 } 300 301 /* It is a threaded process */ 302 kg = td->td_ksegrp; 303 kg->kg_runnable--; 304 TD_SET_CAN_RUN(td); 305 if (ke) { 306 if (kg->kg_last_assigned == td) { 307 kg->kg_last_assigned = 308 TAILQ_PREV(td, threadqueue, td_runq); 309 } 310 sched_rem(ke); 311 } 312 TAILQ_REMOVE(&kg->kg_runq, td, td_runq); 313 td->td_priority = newpri; 314 setrunqueue(td); 315 } 316 317 void 318 setrunqueue(struct thread *td) 319 { 320 struct kse *ke; 321 struct ksegrp *kg; 322 struct thread *td2; 323 struct thread *tda; 324 325 CTR1(KTR_RUNQ, "setrunqueue: td%p", td); 326 mtx_assert(&sched_lock, MA_OWNED); 327 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 328 ("setrunqueue: bad thread state")); 329 TD_SET_RUNQ(td); 330 kg = td->td_ksegrp; 331 kg->kg_runnable++; 332 if ((td->td_proc->p_flag & P_THREADED) == 0) { 333 /* 334 * Common path optimisation: Only one of everything 335 * and the KSE is always already attached. 336 * Totally ignore the ksegrp run queue. 337 */ 338 sched_add(td->td_kse); 339 return; 340 } 341 342 tda = kg->kg_last_assigned; 343 if ((ke = td->td_kse) == NULL) { 344 if (kg->kg_idle_kses) { 345 /* 346 * There is a free one so it's ours for the asking.. 347 */ 348 ke = TAILQ_FIRST(&kg->kg_iq); 349 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 350 ke->ke_state = KES_THREAD; 351 kg->kg_idle_kses--; 352 } else if (tda && (tda->td_priority > td->td_priority)) { 353 /* 354 * None free, but there is one we can commandeer. 355 */ 356 ke = tda->td_kse; 357 tda->td_kse = NULL; 358 ke->ke_thread = NULL; 359 tda = kg->kg_last_assigned = 360 TAILQ_PREV(tda, threadqueue, td_runq); 361 sched_rem(ke); 362 } 363 } else { 364 /* 365 * Temporarily disassociate so it looks like the other cases. 366 */ 367 ke->ke_thread = NULL; 368 td->td_kse = NULL; 369 } 370 371 /* 372 * Add the thread to the ksegrp's run queue at 373 * the appropriate place. 374 */ 375 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { 376 if (td2->td_priority > td->td_priority) { 377 TAILQ_INSERT_BEFORE(td2, td, td_runq); 378 break; 379 } 380 } 381 if (td2 == NULL) { 382 /* We ran off the end of the TAILQ or it was empty. */ 383 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq); 384 } 385 386 /* 387 * If we have a ke to use, then put it on the run queue and 388 * If needed, readjust the last_assigned pointer. 389 */ 390 if (ke) { 391 if (tda == NULL) { 392 /* 393 * No pre-existing last assigned so whoever is first 394 * gets the KSE we brought in.. (maybe us) 395 */ 396 td2 = TAILQ_FIRST(&kg->kg_runq); 397 KASSERT((td2->td_kse == NULL), 398 ("unexpected ke present")); 399 td2->td_kse = ke; 400 ke->ke_thread = td2; 401 kg->kg_last_assigned = td2; 402 } else if (tda->td_priority > td->td_priority) { 403 /* 404 * It's ours, grab it, but last_assigned is past us 405 * so don't change it. 406 */ 407 td->td_kse = ke; 408 ke->ke_thread = td; 409 } else { 410 /* 411 * We are past last_assigned, so 412 * put the new kse on whatever is next, 413 * which may or may not be us. 414 */ 415 td2 = TAILQ_NEXT(tda, td_runq); 416 kg->kg_last_assigned = td2; 417 td2->td_kse = ke; 418 ke->ke_thread = td2; 419 } 420 sched_add(ke); 421 } 422 } 423 424 /************************************************************************ 425 * Critical section marker functions * 426 ************************************************************************/ 427 /* Critical sections that prevent preemption. */ 428 void 429 critical_enter(void) 430 { 431 struct thread *td; 432 433 td = curthread; 434 if (td->td_critnest == 0) 435 cpu_critical_enter(); 436 td->td_critnest++; 437 } 438 439 void 440 critical_exit(void) 441 { 442 struct thread *td; 443 444 td = curthread; 445 if (td->td_critnest == 1) { 446 td->td_critnest = 0; 447 cpu_critical_exit(); 448 } else { 449 td->td_critnest--; 450 } 451 } 452 453 454 /************************************************************************ 455 * SYSTEM RUN QUEUE manipulations and tests * 456 ************************************************************************/ 457 /* 458 * Initialize a run structure. 459 */ 460 void 461 runq_init(struct runq *rq) 462 { 463 int i; 464 465 bzero(rq, sizeof *rq); 466 for (i = 0; i < RQ_NQS; i++) 467 TAILQ_INIT(&rq->rq_queues[i]); 468 } 469 470 /* 471 * Clear the status bit of the queue corresponding to priority level pri, 472 * indicating that it is empty. 473 */ 474 static __inline void 475 runq_clrbit(struct runq *rq, int pri) 476 { 477 struct rqbits *rqb; 478 479 rqb = &rq->rq_status; 480 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d", 481 rqb->rqb_bits[RQB_WORD(pri)], 482 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri), 483 RQB_BIT(pri), RQB_WORD(pri)); 484 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri); 485 } 486 487 /* 488 * Find the index of the first non-empty run queue. This is done by 489 * scanning the status bits, a set bit indicates a non-empty queue. 490 */ 491 static __inline int 492 runq_findbit(struct runq *rq) 493 { 494 struct rqbits *rqb; 495 int pri; 496 int i; 497 498 rqb = &rq->rq_status; 499 for (i = 0; i < RQB_LEN; i++) 500 if (rqb->rqb_bits[i]) { 501 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW); 502 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d", 503 rqb->rqb_bits[i], i, pri); 504 return (pri); 505 } 506 507 return (-1); 508 } 509 510 /* 511 * Set the status bit of the queue corresponding to priority level pri, 512 * indicating that it is non-empty. 513 */ 514 static __inline void 515 runq_setbit(struct runq *rq, int pri) 516 { 517 struct rqbits *rqb; 518 519 rqb = &rq->rq_status; 520 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d", 521 rqb->rqb_bits[RQB_WORD(pri)], 522 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri), 523 RQB_BIT(pri), RQB_WORD(pri)); 524 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri); 525 } 526 527 /* 528 * Add the KSE to the queue specified by its priority, and set the 529 * corresponding status bit. 530 */ 531 void 532 runq_add(struct runq *rq, struct kse *ke) 533 { 534 struct rqhead *rqh; 535 int pri; 536 537 pri = ke->ke_thread->td_priority / RQ_PPQ; 538 ke->ke_rqindex = pri; 539 runq_setbit(rq, pri); 540 rqh = &rq->rq_queues[pri]; 541 CTR4(KTR_RUNQ, "runq_add: p=%p pri=%d %d rqh=%p", 542 ke->ke_proc, ke->ke_thread->td_priority, pri, rqh); 543 TAILQ_INSERT_TAIL(rqh, ke, ke_procq); 544 } 545 546 /* 547 * Return true if there are runnable processes of any priority on the run 548 * queue, false otherwise. Has no side effects, does not modify the run 549 * queue structure. 550 */ 551 int 552 runq_check(struct runq *rq) 553 { 554 struct rqbits *rqb; 555 int i; 556 557 rqb = &rq->rq_status; 558 for (i = 0; i < RQB_LEN; i++) 559 if (rqb->rqb_bits[i]) { 560 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d", 561 rqb->rqb_bits[i], i); 562 return (1); 563 } 564 CTR0(KTR_RUNQ, "runq_check: empty"); 565 566 return (0); 567 } 568 569 /* 570 * Find the highest priority process on the run queue. 571 */ 572 struct kse * 573 runq_choose(struct runq *rq) 574 { 575 struct rqhead *rqh; 576 struct kse *ke; 577 int pri; 578 579 mtx_assert(&sched_lock, MA_OWNED); 580 while ((pri = runq_findbit(rq)) != -1) { 581 rqh = &rq->rq_queues[pri]; 582 ke = TAILQ_FIRST(rqh); 583 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue")); 584 CTR3(KTR_RUNQ, 585 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh); 586 return (ke); 587 } 588 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri); 589 590 return (NULL); 591 } 592 593 /* 594 * Remove the KSE from the queue specified by its priority, and clear the 595 * corresponding status bit if the queue becomes empty. 596 * Caller must set ke->ke_state afterwards. 597 */ 598 void 599 runq_remove(struct runq *rq, struct kse *ke) 600 { 601 struct rqhead *rqh; 602 int pri; 603 604 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 605 ("runq_remove: process swapped out")); 606 pri = ke->ke_rqindex; 607 rqh = &rq->rq_queues[pri]; 608 CTR4(KTR_RUNQ, "runq_remove: p=%p pri=%d %d rqh=%p", 609 ke, ke->ke_thread->td_priority, pri, rqh); 610 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue")); 611 TAILQ_REMOVE(rqh, ke, ke_procq); 612 if (TAILQ_EMPTY(rqh)) { 613 CTR0(KTR_RUNQ, "runq_remove: empty"); 614 runq_clrbit(rq, pri); 615 } 616 } 617 618 #if 0 619 void 620 panc(char *string1, char *string2) 621 { 622 printf("%s", string1); 623 Debugger(string2); 624 } 625 626 void 627 thread_sanity_check(struct thread *td, char *string) 628 { 629 struct proc *p; 630 struct ksegrp *kg; 631 struct kse *ke; 632 struct thread *td2 = NULL; 633 unsigned int prevpri; 634 int saw_lastassigned = 0; 635 int unassigned = 0; 636 int assigned = 0; 637 638 p = td->td_proc; 639 kg = td->td_ksegrp; 640 ke = td->td_kse; 641 642 643 if (ke) { 644 if (p != ke->ke_proc) { 645 panc(string, "wrong proc"); 646 } 647 if (ke->ke_thread != td) { 648 panc(string, "wrong thread"); 649 } 650 } 651 652 if ((p->p_flag & P_THREADED) == 0) { 653 if (ke == NULL) { 654 panc(string, "non KSE thread lost kse"); 655 } 656 } else { 657 prevpri = 0; 658 saw_lastassigned = 0; 659 unassigned = 0; 660 assigned = 0; 661 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) { 662 if (td2->td_priority < prevpri) { 663 panc(string, "thread runqueue unosorted"); 664 } 665 if ((td2->td_state == TDS_RUNQ) && 666 td2->td_kse && 667 (td2->td_kse->ke_state != KES_ONRUNQ)) { 668 panc(string, "KSE wrong state"); 669 } 670 prevpri = td2->td_priority; 671 if (td2->td_kse) { 672 assigned++; 673 if (unassigned) { 674 panc(string, "unassigned before assigned"); 675 } 676 if (kg->kg_last_assigned == NULL) { 677 panc(string, "lastassigned corrupt"); 678 } 679 if (saw_lastassigned) { 680 panc(string, "last assigned not last"); 681 } 682 if (td2->td_kse->ke_thread != td2) { 683 panc(string, "mismatched kse/thread"); 684 } 685 } else { 686 unassigned++; 687 } 688 if (td2 == kg->kg_last_assigned) { 689 saw_lastassigned = 1; 690 if (td2->td_kse == NULL) { 691 panc(string, "last assigned not assigned"); 692 } 693 } 694 } 695 if (kg->kg_last_assigned && (saw_lastassigned == 0)) { 696 panc(string, "where on earth does lastassigned point?"); 697 } 698 #if 0 699 FOREACH_THREAD_IN_GROUP(kg, td2) { 700 if (((td2->td_flags & TDF_UNBOUND) == 0) && 701 (TD_ON_RUNQ(td2))) { 702 assigned++; 703 if (td2->td_kse == NULL) { 704 panc(string, "BOUND thread with no KSE"); 705 } 706 } 707 } 708 #endif 709 #if 0 710 if ((unassigned + assigned) != kg->kg_runnable) { 711 panc(string, "wrong number in runnable"); 712 } 713 #endif 714 } 715 if (assigned == 12345) { 716 printf("%p %p %p %p %p %d, %d", 717 td, td2, ke, kg, p, assigned, saw_lastassigned); 718 } 719 } 720 #endif 721 722