1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 30 #include <sys/types.h> 31 #include <sys/param.h> 32 #include <sys/sysmacros.h> 33 #include <sys/signal.h> 34 #include <sys/user.h> 35 #include <sys/systm.h> 36 #include <sys/sysinfo.h> 37 #include <sys/var.h> 38 #include <sys/errno.h> 39 #include <sys/cmn_err.h> 40 #include <sys/debug.h> 41 #include <sys/inline.h> 42 #include <sys/disp.h> 43 #include <sys/class.h> 44 #include <sys/bitmap.h> 45 #include <sys/kmem.h> 46 #include <sys/cpuvar.h> 47 #include <sys/vtrace.h> 48 #include <sys/tnf.h> 49 #include <sys/cpupart.h> 50 #include <sys/lgrp.h> 51 #include <sys/pg.h> 52 #include <sys/cmt.h> 53 #include <sys/bitset.h> 54 #include <sys/schedctl.h> 55 #include <sys/atomic.h> 56 #include <sys/dtrace.h> 57 #include <sys/sdt.h> 58 #include <sys/archsystm.h> 59 60 #include <vm/as.h> 61 62 #define BOUND_CPU 0x1 63 #define BOUND_PARTITION 0x2 64 #define BOUND_INTR 0x4 65 66 /* Dispatch queue allocation structure and functions */ 67 struct disp_queue_info { 68 disp_t *dp; 69 dispq_t *olddispq; 70 dispq_t *newdispq; 71 ulong_t *olddqactmap; 72 ulong_t *newdqactmap; 73 int oldnglobpris; 74 }; 75 static void disp_dq_alloc(struct disp_queue_info *dptr, int numpris, 76 disp_t *dp); 77 static void disp_dq_assign(struct disp_queue_info *dptr, int numpris); 78 static void disp_dq_free(struct disp_queue_info *dptr); 79 80 /* platform-specific routine to call when processor is idle */ 81 static void generic_idle_cpu(); 82 void (*idle_cpu)() = generic_idle_cpu; 83 84 /* routines invoked when a CPU enters/exits the idle loop */ 85 static void idle_enter(); 86 static void idle_exit(); 87 88 /* platform-specific routine to call when thread is enqueued */ 89 static void generic_enq_thread(cpu_t *, int); 90 void (*disp_enq_thread)(cpu_t *, int) = generic_enq_thread; 91 92 pri_t kpreemptpri; /* priority where kernel preemption applies */ 93 pri_t upreemptpri = 0; /* priority where normal preemption applies */ 94 pri_t intr_pri; /* interrupt thread priority base level */ 95 96 #define KPQPRI -1 /* pri where cpu affinity is dropped for kpq */ 97 pri_t kpqpri = KPQPRI; /* can be set in /etc/system */ 98 disp_t cpu0_disp; /* boot CPU's dispatch queue */ 99 disp_lock_t swapped_lock; /* lock swapped threads and swap queue */ 100 int nswapped; /* total number of swapped threads */ 101 void disp_swapped_enq(kthread_t *tp); 102 static void disp_swapped_setrun(kthread_t *tp); 103 static void cpu_resched(cpu_t *cp, pri_t tpri); 104 105 /* 106 * If this is set, only interrupt threads will cause kernel preemptions. 107 * This is done by changing the value of kpreemptpri. kpreemptpri 108 * will either be the max sysclass pri + 1 or the min interrupt pri. 109 */ 110 int only_intr_kpreempt; 111 112 extern void set_idle_cpu(int cpun); 113 extern void unset_idle_cpu(int cpun); 114 static void setkpdq(kthread_t *tp, int borf); 115 #define SETKP_BACK 0 116 #define SETKP_FRONT 1 117 /* 118 * Parameter that determines how recently a thread must have run 119 * on the CPU to be considered loosely-bound to that CPU to reduce 120 * cold cache effects. The interval is in hertz. 121 */ 122 #define RECHOOSE_INTERVAL 3 123 int rechoose_interval = RECHOOSE_INTERVAL; 124 125 /* 126 * Parameter that determines how long (in nanoseconds) a thread must 127 * be sitting on a run queue before it can be stolen by another CPU 128 * to reduce migrations. The interval is in nanoseconds. 129 * 130 * The nosteal_nsec should be set by platform code cmp_set_nosteal_interval() 131 * to an appropriate value. nosteal_nsec is set to NOSTEAL_UNINITIALIZED 132 * here indicating it is uninitiallized. 133 * Setting nosteal_nsec to 0 effectively disables the nosteal 'protection'. 134 * 135 */ 136 #define NOSTEAL_UNINITIALIZED (-1) 137 hrtime_t nosteal_nsec = NOSTEAL_UNINITIALIZED; 138 extern void cmp_set_nosteal_interval(void); 139 140 id_t defaultcid; /* system "default" class; see dispadmin(1M) */ 141 142 disp_lock_t transition_lock; /* lock on transitioning threads */ 143 disp_lock_t stop_lock; /* lock on stopped threads */ 144 145 static void cpu_dispqalloc(int numpris); 146 147 /* 148 * This gets returned by disp_getwork/disp_getbest if we couldn't steal 149 * a thread because it was sitting on its run queue for a very short 150 * period of time. 151 */ 152 #define T_DONTSTEAL (kthread_t *)(-1) /* returned by disp_getwork/getbest */ 153 154 static kthread_t *disp_getwork(cpu_t *to); 155 static kthread_t *disp_getbest(disp_t *from); 156 static kthread_t *disp_ratify(kthread_t *tp, disp_t *kpq); 157 158 void swtch_to(kthread_t *); 159 160 /* 161 * dispatcher and scheduler initialization 162 */ 163 164 /* 165 * disp_setup - Common code to calculate and allocate dispatcher 166 * variables and structures based on the maximum priority. 167 */ 168 static void 169 disp_setup(pri_t maxglobpri, pri_t oldnglobpris) 170 { 171 pri_t newnglobpris; 172 173 ASSERT(MUTEX_HELD(&cpu_lock)); 174 175 newnglobpris = maxglobpri + 1 + LOCK_LEVEL; 176 177 if (newnglobpris > oldnglobpris) { 178 /* 179 * Allocate new kp queues for each CPU partition. 180 */ 181 cpupart_kpqalloc(newnglobpris); 182 183 /* 184 * Allocate new dispatch queues for each CPU. 185 */ 186 cpu_dispqalloc(newnglobpris); 187 188 /* 189 * compute new interrupt thread base priority 190 */ 191 intr_pri = maxglobpri; 192 if (only_intr_kpreempt) { 193 kpreemptpri = intr_pri + 1; 194 if (kpqpri == KPQPRI) 195 kpqpri = kpreemptpri; 196 } 197 v.v_nglobpris = newnglobpris; 198 } 199 } 200 201 /* 202 * dispinit - Called to initialize all loaded classes and the 203 * dispatcher framework. 204 */ 205 void 206 dispinit(void) 207 { 208 id_t cid; 209 pri_t maxglobpri; 210 pri_t cl_maxglobpri; 211 212 maxglobpri = -1; 213 214 /* 215 * Initialize transition lock, which will always be set. 216 */ 217 DISP_LOCK_INIT(&transition_lock); 218 disp_lock_enter_high(&transition_lock); 219 DISP_LOCK_INIT(&stop_lock); 220 221 mutex_enter(&cpu_lock); 222 CPU->cpu_disp->disp_maxrunpri = -1; 223 CPU->cpu_disp->disp_max_unbound_pri = -1; 224 225 /* 226 * Initialize the default CPU partition. 227 */ 228 cpupart_initialize_default(); 229 /* 230 * Call the class specific initialization functions for 231 * all pre-installed schedulers. 232 * 233 * We pass the size of a class specific parameter 234 * buffer to each of the initialization functions 235 * to try to catch problems with backward compatibility 236 * of class modules. 237 * 238 * For example a new class module running on an old system 239 * which didn't provide sufficiently large parameter buffers 240 * would be bad news. Class initialization modules can check for 241 * this and take action if they detect a problem. 242 */ 243 244 for (cid = 0; cid < nclass; cid++) { 245 sclass_t *sc; 246 247 sc = &sclass[cid]; 248 if (SCHED_INSTALLED(sc)) { 249 cl_maxglobpri = sc->cl_init(cid, PC_CLPARMSZ, 250 &sc->cl_funcs); 251 if (cl_maxglobpri > maxglobpri) 252 maxglobpri = cl_maxglobpri; 253 } 254 } 255 kpreemptpri = (pri_t)v.v_maxsyspri + 1; 256 if (kpqpri == KPQPRI) 257 kpqpri = kpreemptpri; 258 259 ASSERT(maxglobpri >= 0); 260 disp_setup(maxglobpri, 0); 261 262 mutex_exit(&cpu_lock); 263 264 /* 265 * Platform specific sticky scheduler setup. 266 */ 267 if (nosteal_nsec == NOSTEAL_UNINITIALIZED) 268 cmp_set_nosteal_interval(); 269 270 /* 271 * Get the default class ID; this may be later modified via 272 * dispadmin(1M). This will load the class (normally TS) and that will 273 * call disp_add(), which is why we had to drop cpu_lock first. 274 */ 275 if (getcid(defaultclass, &defaultcid) != 0) { 276 cmn_err(CE_PANIC, "Couldn't load default scheduling class '%s'", 277 defaultclass); 278 } 279 } 280 281 /* 282 * disp_add - Called with class pointer to initialize the dispatcher 283 * for a newly loaded class. 284 */ 285 void 286 disp_add(sclass_t *clp) 287 { 288 pri_t maxglobpri; 289 pri_t cl_maxglobpri; 290 291 mutex_enter(&cpu_lock); 292 /* 293 * Initialize the scheduler class. 294 */ 295 maxglobpri = (pri_t)(v.v_nglobpris - LOCK_LEVEL - 1); 296 cl_maxglobpri = clp->cl_init(clp - sclass, PC_CLPARMSZ, &clp->cl_funcs); 297 if (cl_maxglobpri > maxglobpri) 298 maxglobpri = cl_maxglobpri; 299 300 /* 301 * Save old queue information. Since we're initializing a 302 * new scheduling class which has just been loaded, then 303 * the size of the dispq may have changed. We need to handle 304 * that here. 305 */ 306 disp_setup(maxglobpri, v.v_nglobpris); 307 308 mutex_exit(&cpu_lock); 309 } 310 311 312 /* 313 * For each CPU, allocate new dispatch queues 314 * with the stated number of priorities. 315 */ 316 static void 317 cpu_dispqalloc(int numpris) 318 { 319 cpu_t *cpup; 320 struct disp_queue_info *disp_mem; 321 int i, num; 322 323 ASSERT(MUTEX_HELD(&cpu_lock)); 324 325 disp_mem = kmem_zalloc(NCPU * 326 sizeof (struct disp_queue_info), KM_SLEEP); 327 328 /* 329 * This routine must allocate all of the memory before stopping 330 * the cpus because it must not sleep in kmem_alloc while the 331 * CPUs are stopped. Locks they hold will not be freed until they 332 * are restarted. 333 */ 334 i = 0; 335 cpup = cpu_list; 336 do { 337 disp_dq_alloc(&disp_mem[i], numpris, cpup->cpu_disp); 338 i++; 339 cpup = cpup->cpu_next; 340 } while (cpup != cpu_list); 341 num = i; 342 343 pause_cpus(NULL); 344 for (i = 0; i < num; i++) 345 disp_dq_assign(&disp_mem[i], numpris); 346 start_cpus(); 347 348 /* 349 * I must free all of the memory after starting the cpus because 350 * I can not risk sleeping in kmem_free while the cpus are stopped. 351 */ 352 for (i = 0; i < num; i++) 353 disp_dq_free(&disp_mem[i]); 354 355 kmem_free(disp_mem, NCPU * sizeof (struct disp_queue_info)); 356 } 357 358 static void 359 disp_dq_alloc(struct disp_queue_info *dptr, int numpris, disp_t *dp) 360 { 361 dptr->newdispq = kmem_zalloc(numpris * sizeof (dispq_t), KM_SLEEP); 362 dptr->newdqactmap = kmem_zalloc(((numpris / BT_NBIPUL) + 1) * 363 sizeof (long), KM_SLEEP); 364 dptr->dp = dp; 365 } 366 367 static void 368 disp_dq_assign(struct disp_queue_info *dptr, int numpris) 369 { 370 disp_t *dp; 371 372 dp = dptr->dp; 373 dptr->olddispq = dp->disp_q; 374 dptr->olddqactmap = dp->disp_qactmap; 375 dptr->oldnglobpris = dp->disp_npri; 376 377 ASSERT(dptr->oldnglobpris < numpris); 378 379 if (dptr->olddispq != NULL) { 380 /* 381 * Use kcopy because bcopy is platform-specific 382 * and could block while we might have paused the cpus. 383 */ 384 (void) kcopy(dptr->olddispq, dptr->newdispq, 385 dptr->oldnglobpris * sizeof (dispq_t)); 386 (void) kcopy(dptr->olddqactmap, dptr->newdqactmap, 387 ((dptr->oldnglobpris / BT_NBIPUL) + 1) * 388 sizeof (long)); 389 } 390 dp->disp_q = dptr->newdispq; 391 dp->disp_qactmap = dptr->newdqactmap; 392 dp->disp_q_limit = &dptr->newdispq[numpris]; 393 dp->disp_npri = numpris; 394 } 395 396 static void 397 disp_dq_free(struct disp_queue_info *dptr) 398 { 399 if (dptr->olddispq != NULL) 400 kmem_free(dptr->olddispq, 401 dptr->oldnglobpris * sizeof (dispq_t)); 402 if (dptr->olddqactmap != NULL) 403 kmem_free(dptr->olddqactmap, 404 ((dptr->oldnglobpris / BT_NBIPUL) + 1) * sizeof (long)); 405 } 406 407 /* 408 * For a newly created CPU, initialize the dispatch queue. 409 * This is called before the CPU is known through cpu[] or on any lists. 410 */ 411 void 412 disp_cpu_init(cpu_t *cp) 413 { 414 disp_t *dp; 415 dispq_t *newdispq; 416 ulong_t *newdqactmap; 417 418 ASSERT(MUTEX_HELD(&cpu_lock)); /* protect dispatcher queue sizes */ 419 420 if (cp == cpu0_disp.disp_cpu) 421 dp = &cpu0_disp; 422 else 423 dp = kmem_alloc(sizeof (disp_t), KM_SLEEP); 424 bzero(dp, sizeof (disp_t)); 425 cp->cpu_disp = dp; 426 dp->disp_cpu = cp; 427 dp->disp_maxrunpri = -1; 428 dp->disp_max_unbound_pri = -1; 429 DISP_LOCK_INIT(&cp->cpu_thread_lock); 430 /* 431 * Allocate memory for the dispatcher queue headers 432 * and the active queue bitmap. 433 */ 434 newdispq = kmem_zalloc(v.v_nglobpris * sizeof (dispq_t), KM_SLEEP); 435 newdqactmap = kmem_zalloc(((v.v_nglobpris / BT_NBIPUL) + 1) * 436 sizeof (long), KM_SLEEP); 437 dp->disp_q = newdispq; 438 dp->disp_qactmap = newdqactmap; 439 dp->disp_q_limit = &newdispq[v.v_nglobpris]; 440 dp->disp_npri = v.v_nglobpris; 441 } 442 443 void 444 disp_cpu_fini(cpu_t *cp) 445 { 446 ASSERT(MUTEX_HELD(&cpu_lock)); 447 448 disp_kp_free(cp->cpu_disp); 449 if (cp->cpu_disp != &cpu0_disp) 450 kmem_free(cp->cpu_disp, sizeof (disp_t)); 451 } 452 453 /* 454 * Allocate new, larger kpreempt dispatch queue to replace the old one. 455 */ 456 void 457 disp_kp_alloc(disp_t *dq, pri_t npri) 458 { 459 struct disp_queue_info mem_info; 460 461 if (npri > dq->disp_npri) { 462 /* 463 * Allocate memory for the new array. 464 */ 465 disp_dq_alloc(&mem_info, npri, dq); 466 467 /* 468 * We need to copy the old structures to the new 469 * and free the old. 470 */ 471 disp_dq_assign(&mem_info, npri); 472 disp_dq_free(&mem_info); 473 } 474 } 475 476 /* 477 * Free dispatch queue. 478 * Used for the kpreempt queues for a removed CPU partition and 479 * for the per-CPU queues of deleted CPUs. 480 */ 481 void 482 disp_kp_free(disp_t *dq) 483 { 484 struct disp_queue_info mem_info; 485 486 mem_info.olddispq = dq->disp_q; 487 mem_info.olddqactmap = dq->disp_qactmap; 488 mem_info.oldnglobpris = dq->disp_npri; 489 disp_dq_free(&mem_info); 490 } 491 492 /* 493 * End dispatcher and scheduler initialization. 494 */ 495 496 /* 497 * See if there's anything to do other than remain idle. 498 * Return non-zero if there is. 499 * 500 * This function must be called with high spl, or with 501 * kernel preemption disabled to prevent the partition's 502 * active cpu list from changing while being traversed. 503 * 504 * This is essentially a simpler version of disp_getwork() 505 * to be called by CPUs preparing to "halt". 506 */ 507 int 508 disp_anywork(void) 509 { 510 cpu_t *cp = CPU; 511 cpu_t *ocp; 512 volatile int *local_nrunnable = &cp->cpu_disp->disp_nrunnable; 513 514 if (!(cp->cpu_flags & CPU_OFFLINE)) { 515 if (CP_MAXRUNPRI(cp->cpu_part) >= 0) 516 return (1); 517 518 for (ocp = cp->cpu_next_part; ocp != cp; 519 ocp = ocp->cpu_next_part) { 520 ASSERT(CPU_ACTIVE(ocp)); 521 522 /* 523 * Something has appeared on the local run queue. 524 */ 525 if (*local_nrunnable > 0) 526 return (1); 527 /* 528 * If we encounter another idle CPU that will 529 * soon be trolling around through disp_anywork() 530 * terminate our walk here and let this other CPU 531 * patrol the next part of the list. 532 */ 533 if (ocp->cpu_dispatch_pri == -1 && 534 (ocp->cpu_disp_flags & CPU_DISP_HALTED) == 0) 535 return (0); 536 /* 537 * Work can be taken from another CPU if: 538 * - There is unbound work on the run queue 539 * - That work isn't a thread undergoing a 540 * - context switch on an otherwise empty queue. 541 * - The CPU isn't running the idle loop. 542 */ 543 if (ocp->cpu_disp->disp_max_unbound_pri != -1 && 544 !((ocp->cpu_disp_flags & CPU_DISP_DONTSTEAL) && 545 ocp->cpu_disp->disp_nrunnable == 1) && 546 ocp->cpu_dispatch_pri != -1) 547 return (1); 548 } 549 } 550 return (0); 551 } 552 553 /* 554 * Called when CPU enters the idle loop 555 */ 556 static void 557 idle_enter() 558 { 559 cpu_t *cp = CPU; 560 561 new_cpu_mstate(CMS_IDLE, gethrtime_unscaled()); 562 CPU_STATS_ADDQ(cp, sys, idlethread, 1); 563 set_idle_cpu(cp->cpu_id); /* arch-dependent hook */ 564 } 565 566 /* 567 * Called when CPU exits the idle loop 568 */ 569 static void 570 idle_exit() 571 { 572 cpu_t *cp = CPU; 573 574 new_cpu_mstate(CMS_SYSTEM, gethrtime_unscaled()); 575 unset_idle_cpu(cp->cpu_id); /* arch-dependent hook */ 576 } 577 578 /* 579 * Idle loop. 580 */ 581 void 582 idle() 583 { 584 struct cpu *cp = CPU; /* pointer to this CPU */ 585 kthread_t *t; /* taken thread */ 586 587 idle_enter(); 588 589 /* 590 * Uniprocessor version of idle loop. 591 * Do this until notified that we're on an actual multiprocessor. 592 */ 593 while (ncpus == 1) { 594 if (cp->cpu_disp->disp_nrunnable == 0) { 595 (*idle_cpu)(); 596 continue; 597 } 598 idle_exit(); 599 swtch(); 600 601 idle_enter(); /* returned from swtch */ 602 } 603 604 /* 605 * Multiprocessor idle loop. 606 */ 607 for (;;) { 608 /* 609 * If CPU is completely quiesced by p_online(2), just wait 610 * here with minimal bus traffic until put online. 611 */ 612 while (cp->cpu_flags & CPU_QUIESCED) 613 (*idle_cpu)(); 614 615 if (cp->cpu_disp->disp_nrunnable != 0) { 616 idle_exit(); 617 swtch(); 618 } else { 619 if (cp->cpu_flags & CPU_OFFLINE) 620 continue; 621 if ((t = disp_getwork(cp)) == NULL) { 622 if (cp->cpu_chosen_level != -1) { 623 disp_t *dp = cp->cpu_disp; 624 disp_t *kpq; 625 626 disp_lock_enter(&dp->disp_lock); 627 /* 628 * Set kpq under lock to prevent 629 * migration between partitions. 630 */ 631 kpq = &cp->cpu_part->cp_kp_queue; 632 if (kpq->disp_maxrunpri == -1) 633 cp->cpu_chosen_level = -1; 634 disp_lock_exit(&dp->disp_lock); 635 } 636 (*idle_cpu)(); 637 continue; 638 } 639 /* 640 * If there was a thread but we couldn't steal 641 * it, then keep trying. 642 */ 643 if (t == T_DONTSTEAL) 644 continue; 645 idle_exit(); 646 swtch_to(t); 647 } 648 idle_enter(); /* returned from swtch/swtch_to */ 649 } 650 } 651 652 653 /* 654 * Preempt the currently running thread in favor of the highest 655 * priority thread. The class of the current thread controls 656 * where it goes on the dispatcher queues. If panicking, turn 657 * preemption off. 658 */ 659 void 660 preempt() 661 { 662 kthread_t *t = curthread; 663 klwp_t *lwp = ttolwp(curthread); 664 665 if (panicstr) 666 return; 667 668 TRACE_0(TR_FAC_DISP, TR_PREEMPT_START, "preempt_start"); 669 670 thread_lock(t); 671 672 if (t->t_state != TS_ONPROC || t->t_disp_queue != CPU->cpu_disp) { 673 /* 674 * this thread has already been chosen to be run on 675 * another CPU. Clear kprunrun on this CPU since we're 676 * already headed for swtch(). 677 */ 678 CPU->cpu_kprunrun = 0; 679 thread_unlock_nopreempt(t); 680 TRACE_0(TR_FAC_DISP, TR_PREEMPT_END, "preempt_end"); 681 } else { 682 if (lwp != NULL) 683 lwp->lwp_ru.nivcsw++; 684 CPU_STATS_ADDQ(CPU, sys, inv_swtch, 1); 685 THREAD_TRANSITION(t); 686 CL_PREEMPT(t); 687 DTRACE_SCHED(preempt); 688 thread_unlock_nopreempt(t); 689 690 TRACE_0(TR_FAC_DISP, TR_PREEMPT_END, "preempt_end"); 691 692 swtch(); /* clears CPU->cpu_runrun via disp() */ 693 } 694 } 695 696 extern kthread_t *thread_unpin(); 697 698 /* 699 * disp() - find the highest priority thread for this processor to run, and 700 * set it in TS_ONPROC state so that resume() can be called to run it. 701 */ 702 static kthread_t * 703 disp() 704 { 705 cpu_t *cpup; 706 disp_t *dp; 707 kthread_t *tp; 708 dispq_t *dq; 709 int maxrunword; 710 pri_t pri; 711 disp_t *kpq; 712 713 TRACE_0(TR_FAC_DISP, TR_DISP_START, "disp_start"); 714 715 cpup = CPU; 716 /* 717 * Find the highest priority loaded, runnable thread. 718 */ 719 dp = cpup->cpu_disp; 720 721 reschedule: 722 /* 723 * If there is more important work on the global queue with a better 724 * priority than the maximum on this CPU, take it now. 725 */ 726 kpq = &cpup->cpu_part->cp_kp_queue; 727 while ((pri = kpq->disp_maxrunpri) >= 0 && 728 pri >= dp->disp_maxrunpri && 729 (cpup->cpu_flags & CPU_OFFLINE) == 0 && 730 (tp = disp_getbest(kpq)) != NULL) { 731 if (disp_ratify(tp, kpq) != NULL) { 732 TRACE_1(TR_FAC_DISP, TR_DISP_END, 733 "disp_end:tid %p", tp); 734 return (tp); 735 } 736 } 737 738 disp_lock_enter(&dp->disp_lock); 739 pri = dp->disp_maxrunpri; 740 741 /* 742 * If there is nothing to run, look at what's runnable on other queues. 743 * Choose the idle thread if the CPU is quiesced. 744 * Note that CPUs that have the CPU_OFFLINE flag set can still run 745 * interrupt threads, which will be the only threads on the CPU's own 746 * queue, but cannot run threads from other queues. 747 */ 748 if (pri == -1) { 749 if (!(cpup->cpu_flags & CPU_OFFLINE)) { 750 disp_lock_exit(&dp->disp_lock); 751 if ((tp = disp_getwork(cpup)) == NULL || 752 tp == T_DONTSTEAL) { 753 tp = cpup->cpu_idle_thread; 754 (void) splhigh(); 755 THREAD_ONPROC(tp, cpup); 756 cpup->cpu_dispthread = tp; 757 cpup->cpu_dispatch_pri = -1; 758 cpup->cpu_runrun = cpup->cpu_kprunrun = 0; 759 cpup->cpu_chosen_level = -1; 760 } 761 } else { 762 disp_lock_exit_high(&dp->disp_lock); 763 tp = cpup->cpu_idle_thread; 764 THREAD_ONPROC(tp, cpup); 765 cpup->cpu_dispthread = tp; 766 cpup->cpu_dispatch_pri = -1; 767 cpup->cpu_runrun = cpup->cpu_kprunrun = 0; 768 cpup->cpu_chosen_level = -1; 769 } 770 TRACE_1(TR_FAC_DISP, TR_DISP_END, 771 "disp_end:tid %p", tp); 772 return (tp); 773 } 774 775 dq = &dp->disp_q[pri]; 776 tp = dq->dq_first; 777 778 ASSERT(tp != NULL); 779 ASSERT(tp->t_schedflag & TS_LOAD); /* thread must be swapped in */ 780 781 DTRACE_SCHED2(dequeue, kthread_t *, tp, disp_t *, dp); 782 783 /* 784 * Found it so remove it from queue. 785 */ 786 dp->disp_nrunnable--; 787 dq->dq_sruncnt--; 788 if ((dq->dq_first = tp->t_link) == NULL) { 789 ulong_t *dqactmap = dp->disp_qactmap; 790 791 ASSERT(dq->dq_sruncnt == 0); 792 dq->dq_last = NULL; 793 794 /* 795 * The queue is empty, so the corresponding bit needs to be 796 * turned off in dqactmap. If nrunnable != 0 just took the 797 * last runnable thread off the 798 * highest queue, so recompute disp_maxrunpri. 799 */ 800 maxrunword = pri >> BT_ULSHIFT; 801 dqactmap[maxrunword] &= ~BT_BIW(pri); 802 803 if (dp->disp_nrunnable == 0) { 804 dp->disp_max_unbound_pri = -1; 805 dp->disp_maxrunpri = -1; 806 } else { 807 int ipri; 808 809 ipri = bt_gethighbit(dqactmap, maxrunword); 810 dp->disp_maxrunpri = ipri; 811 if (ipri < dp->disp_max_unbound_pri) 812 dp->disp_max_unbound_pri = ipri; 813 } 814 } else { 815 tp->t_link = NULL; 816 } 817 818 /* 819 * Set TS_DONT_SWAP flag to prevent another processor from swapping 820 * out this thread before we have a chance to run it. 821 * While running, it is protected against swapping by t_lock. 822 */ 823 tp->t_schedflag |= TS_DONT_SWAP; 824 cpup->cpu_dispthread = tp; /* protected by spl only */ 825 cpup->cpu_dispatch_pri = pri; 826 ASSERT(pri == DISP_PRIO(tp)); 827 thread_onproc(tp, cpup); /* set t_state to TS_ONPROC */ 828 disp_lock_exit_high(&dp->disp_lock); /* drop run queue lock */ 829 830 ASSERT(tp != NULL); 831 TRACE_1(TR_FAC_DISP, TR_DISP_END, 832 "disp_end:tid %p", tp); 833 834 if (disp_ratify(tp, kpq) == NULL) 835 goto reschedule; 836 837 return (tp); 838 } 839 840 /* 841 * swtch() 842 * Find best runnable thread and run it. 843 * Called with the current thread already switched to a new state, 844 * on a sleep queue, run queue, stopped, and not zombied. 845 * May be called at any spl level less than or equal to LOCK_LEVEL. 846 * Always drops spl to the base level (spl0()). 847 */ 848 void 849 swtch() 850 { 851 kthread_t *t = curthread; 852 kthread_t *next; 853 cpu_t *cp; 854 855 TRACE_0(TR_FAC_DISP, TR_SWTCH_START, "swtch_start"); 856 857 if (t->t_flag & T_INTR_THREAD) 858 cpu_intr_swtch_enter(t); 859 860 if (t->t_intr != NULL) { 861 /* 862 * We are an interrupt thread. Setup and return 863 * the interrupted thread to be resumed. 864 */ 865 (void) splhigh(); /* block other scheduler action */ 866 cp = CPU; /* now protected against migration */ 867 ASSERT(CPU_ON_INTR(cp) == 0); /* not called with PIL > 10 */ 868 CPU_STATS_ADDQ(cp, sys, pswitch, 1); 869 CPU_STATS_ADDQ(cp, sys, intrblk, 1); 870 next = thread_unpin(); 871 TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start"); 872 resume_from_intr(next); 873 } else { 874 #ifdef DEBUG 875 if (t->t_state == TS_ONPROC && 876 t->t_disp_queue->disp_cpu == CPU && 877 t->t_preempt == 0) { 878 thread_lock(t); 879 ASSERT(t->t_state != TS_ONPROC || 880 t->t_disp_queue->disp_cpu != CPU || 881 t->t_preempt != 0); /* cannot migrate */ 882 thread_unlock_nopreempt(t); 883 } 884 #endif /* DEBUG */ 885 cp = CPU; 886 next = disp(); /* returns with spl high */ 887 ASSERT(CPU_ON_INTR(cp) == 0); /* not called with PIL > 10 */ 888 889 /* OK to steal anything left on run queue */ 890 cp->cpu_disp_flags &= ~CPU_DISP_DONTSTEAL; 891 892 if (next != t) { 893 if (t == cp->cpu_idle_thread) { 894 PG_NRUN_UPDATE(cp, 1); 895 } else if (next == cp->cpu_idle_thread) { 896 PG_NRUN_UPDATE(cp, -1); 897 } 898 899 /* 900 * If t was previously in the TS_ONPROC state, 901 * setfrontdq and setbackdq won't have set its t_waitrq. 902 * Since we now finally know that we're switching away 903 * from this thread, set its t_waitrq if it is on a run 904 * queue. 905 */ 906 if ((t->t_state == TS_RUN) && (t->t_waitrq == 0)) { 907 t->t_waitrq = gethrtime_unscaled(); 908 } 909 910 /* 911 * restore mstate of thread that we are switching to 912 */ 913 restore_mstate(next); 914 915 CPU_STATS_ADDQ(cp, sys, pswitch, 1); 916 cp->cpu_last_swtch = t->t_disp_time = lbolt; 917 TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start"); 918 919 if (dtrace_vtime_active) 920 dtrace_vtime_switch(next); 921 922 resume(next); 923 /* 924 * The TR_RESUME_END and TR_SWTCH_END trace points 925 * appear at the end of resume(), because we may not 926 * return here 927 */ 928 } else { 929 if (t->t_flag & T_INTR_THREAD) 930 cpu_intr_swtch_exit(t); 931 932 DTRACE_SCHED(remain__cpu); 933 TRACE_0(TR_FAC_DISP, TR_SWTCH_END, "swtch_end"); 934 (void) spl0(); 935 } 936 } 937 } 938 939 /* 940 * swtch_from_zombie() 941 * Special case of swtch(), which allows checks for TS_ZOMB to be 942 * eliminated from normal resume. 943 * Find best runnable thread and run it. 944 * Called with the current thread zombied. 945 * Zombies cannot migrate, so CPU references are safe. 946 */ 947 void 948 swtch_from_zombie() 949 { 950 kthread_t *next; 951 cpu_t *cpu = CPU; 952 953 TRACE_0(TR_FAC_DISP, TR_SWTCH_START, "swtch_start"); 954 955 ASSERT(curthread->t_state == TS_ZOMB); 956 957 next = disp(); /* returns with spl high */ 958 ASSERT(CPU_ON_INTR(CPU) == 0); /* not called with PIL > 10 */ 959 CPU_STATS_ADDQ(CPU, sys, pswitch, 1); 960 ASSERT(next != curthread); 961 TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start"); 962 963 if (next == cpu->cpu_idle_thread) 964 PG_NRUN_UPDATE(cpu, -1); 965 966 restore_mstate(next); 967 968 if (dtrace_vtime_active) 969 dtrace_vtime_switch(next); 970 971 resume_from_zombie(next); 972 /* 973 * The TR_RESUME_END and TR_SWTCH_END trace points 974 * appear at the end of resume(), because we certainly will not 975 * return here 976 */ 977 } 978 979 #if defined(DEBUG) && (defined(DISP_DEBUG) || defined(lint)) 980 981 /* 982 * search_disp_queues() 983 * Search the given dispatch queues for thread tp. 984 * Return 1 if tp is found, otherwise return 0. 985 */ 986 static int 987 search_disp_queues(disp_t *dp, kthread_t *tp) 988 { 989 dispq_t *dq; 990 dispq_t *eq; 991 992 disp_lock_enter_high(&dp->disp_lock); 993 994 for (dq = dp->disp_q, eq = dp->disp_q_limit; dq < eq; ++dq) { 995 kthread_t *rp; 996 997 ASSERT(dq->dq_last == NULL || dq->dq_last->t_link == NULL); 998 999 for (rp = dq->dq_first; rp; rp = rp->t_link) 1000 if (tp == rp) { 1001 disp_lock_exit_high(&dp->disp_lock); 1002 return (1); 1003 } 1004 } 1005 disp_lock_exit_high(&dp->disp_lock); 1006 1007 return (0); 1008 } 1009 1010 /* 1011 * thread_on_queue() 1012 * Search all per-CPU dispatch queues and all partition-wide kpreempt 1013 * queues for thread tp. Return 1 if tp is found, otherwise return 0. 1014 */ 1015 static int 1016 thread_on_queue(kthread_t *tp) 1017 { 1018 cpu_t *cp; 1019 struct cpupart *part; 1020 1021 ASSERT(getpil() >= DISP_LEVEL); 1022 1023 /* 1024 * Search the per-CPU dispatch queues for tp. 1025 */ 1026 cp = CPU; 1027 do { 1028 if (search_disp_queues(cp->cpu_disp, tp)) 1029 return (1); 1030 } while ((cp = cp->cpu_next_onln) != CPU); 1031 1032 /* 1033 * Search the partition-wide kpreempt queues for tp. 1034 */ 1035 part = CPU->cpu_part; 1036 do { 1037 if (search_disp_queues(&part->cp_kp_queue, tp)) 1038 return (1); 1039 } while ((part = part->cp_next) != CPU->cpu_part); 1040 1041 return (0); 1042 } 1043 1044 #else 1045 1046 #define thread_on_queue(tp) 0 /* ASSERT must be !thread_on_queue */ 1047 1048 #endif /* DEBUG */ 1049 1050 /* 1051 * like swtch(), but switch to a specified thread taken from another CPU. 1052 * called with spl high.. 1053 */ 1054 void 1055 swtch_to(kthread_t *next) 1056 { 1057 cpu_t *cp = CPU; 1058 1059 TRACE_0(TR_FAC_DISP, TR_SWTCH_START, "swtch_start"); 1060 1061 /* 1062 * Update context switch statistics. 1063 */ 1064 CPU_STATS_ADDQ(cp, sys, pswitch, 1); 1065 1066 TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start"); 1067 1068 if (curthread == cp->cpu_idle_thread) 1069 PG_NRUN_UPDATE(cp, 1); 1070 1071 /* OK to steal anything left on run queue */ 1072 cp->cpu_disp_flags &= ~CPU_DISP_DONTSTEAL; 1073 1074 /* record last execution time */ 1075 cp->cpu_last_swtch = curthread->t_disp_time = lbolt; 1076 1077 /* 1078 * If t was previously in the TS_ONPROC state, setfrontdq and setbackdq 1079 * won't have set its t_waitrq. Since we now finally know that we're 1080 * switching away from this thread, set its t_waitrq if it is on a run 1081 * queue. 1082 */ 1083 if ((curthread->t_state == TS_RUN) && (curthread->t_waitrq == 0)) { 1084 curthread->t_waitrq = gethrtime_unscaled(); 1085 } 1086 1087 /* restore next thread to previously running microstate */ 1088 restore_mstate(next); 1089 1090 if (dtrace_vtime_active) 1091 dtrace_vtime_switch(next); 1092 1093 resume(next); 1094 /* 1095 * The TR_RESUME_END and TR_SWTCH_END trace points 1096 * appear at the end of resume(), because we may not 1097 * return here 1098 */ 1099 } 1100 1101 1102 1103 #define CPU_IDLING(pri) ((pri) == -1) 1104 1105 static void 1106 cpu_resched(cpu_t *cp, pri_t tpri) 1107 { 1108 int call_poke_cpu = 0; 1109 pri_t cpupri = cp->cpu_dispatch_pri; 1110 1111 if (!CPU_IDLING(cpupri) && (cpupri < tpri)) { 1112 TRACE_2(TR_FAC_DISP, TR_CPU_RESCHED, 1113 "CPU_RESCHED:Tpri %d Cpupri %d", tpri, cpupri); 1114 if (tpri >= upreemptpri && cp->cpu_runrun == 0) { 1115 cp->cpu_runrun = 1; 1116 aston(cp->cpu_dispthread); 1117 if (tpri < kpreemptpri && cp != CPU) 1118 call_poke_cpu = 1; 1119 } 1120 if (tpri >= kpreemptpri && cp->cpu_kprunrun == 0) { 1121 cp->cpu_kprunrun = 1; 1122 if (cp != CPU) 1123 call_poke_cpu = 1; 1124 } 1125 } 1126 1127 /* 1128 * Propagate cpu_runrun, and cpu_kprunrun to global visibility. 1129 */ 1130 membar_enter(); 1131 1132 if (call_poke_cpu) 1133 poke_cpu(cp->cpu_id); 1134 } 1135 1136 /* 1137 * setbackdq() keeps runqs balanced such that the difference in length 1138 * between the chosen runq and the next one is no more than RUNQ_MAX_DIFF. 1139 * For threads with priorities below RUNQ_MATCH_PRI levels, the runq's lengths 1140 * must match. When per-thread TS_RUNQMATCH flag is set, setbackdq() will 1141 * try to keep runqs perfectly balanced regardless of the thread priority. 1142 */ 1143 #define RUNQ_MATCH_PRI 16 /* pri below which queue lengths must match */ 1144 #define RUNQ_MAX_DIFF 2 /* maximum runq length difference */ 1145 #define RUNQ_LEN(cp, pri) ((cp)->cpu_disp->disp_q[pri].dq_sruncnt) 1146 1147 /* 1148 * Macro that evaluates to true if it is likely that the thread has cache 1149 * warmth. This is based on the amount of time that has elapsed since the 1150 * thread last ran. If that amount of time is less than "rechoose_interval" 1151 * ticks, then we decide that the thread has enough cache warmth to warrant 1152 * some affinity for t->t_cpu. 1153 */ 1154 #define THREAD_HAS_CACHE_WARMTH(thread) \ 1155 ((thread == curthread) || \ 1156 ((lbolt - thread->t_disp_time) <= rechoose_interval)) 1157 /* 1158 * Put the specified thread on the back of the dispatcher 1159 * queue corresponding to its current priority. 1160 * 1161 * Called with the thread in transition, onproc or stopped state 1162 * and locked (transition implies locked) and at high spl. 1163 * Returns with the thread in TS_RUN state and still locked. 1164 */ 1165 void 1166 setbackdq(kthread_t *tp) 1167 { 1168 dispq_t *dq; 1169 disp_t *dp; 1170 cpu_t *cp; 1171 pri_t tpri; 1172 int bound; 1173 boolean_t self; 1174 1175 ASSERT(THREAD_LOCK_HELD(tp)); 1176 ASSERT((tp->t_schedflag & TS_ALLSTART) == 0); 1177 ASSERT(!thread_on_queue(tp)); /* make sure tp isn't on a runq */ 1178 1179 /* 1180 * If thread is "swapped" or on the swap queue don't 1181 * queue it, but wake sched. 1182 */ 1183 if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) { 1184 disp_swapped_setrun(tp); 1185 return; 1186 } 1187 1188 self = (tp == curthread); 1189 1190 if (tp->t_bound_cpu || tp->t_weakbound_cpu) 1191 bound = 1; 1192 else 1193 bound = 0; 1194 1195 tpri = DISP_PRIO(tp); 1196 if (ncpus == 1) 1197 cp = tp->t_cpu; 1198 else if (!bound) { 1199 if (tpri >= kpqpri) { 1200 setkpdq(tp, SETKP_BACK); 1201 return; 1202 } 1203 1204 /* 1205 * We'll generally let this thread continue to run where 1206 * it last ran...but will consider migration if: 1207 * - We thread probably doesn't have much cache warmth. 1208 * - The CPU where it last ran is the target of an offline 1209 * request. 1210 * - The thread last ran outside it's home lgroup. 1211 */ 1212 if ((!THREAD_HAS_CACHE_WARMTH(tp)) || 1213 (tp->t_cpu == cpu_inmotion)) { 1214 cp = disp_lowpri_cpu(tp->t_cpu, tp->t_lpl, tpri, NULL); 1215 } else if (!LGRP_CONTAINS_CPU(tp->t_lpl->lpl_lgrp, tp->t_cpu)) { 1216 cp = disp_lowpri_cpu(tp->t_cpu, tp->t_lpl, tpri, 1217 self ? tp->t_cpu : NULL); 1218 } else { 1219 cp = tp->t_cpu; 1220 } 1221 1222 if (tp->t_cpupart == cp->cpu_part) { 1223 int qlen; 1224 1225 /* 1226 * Perform any CMT load balancing 1227 */ 1228 cp = cmt_balance(tp, cp); 1229 1230 /* 1231 * Balance across the run queues 1232 */ 1233 qlen = RUNQ_LEN(cp, tpri); 1234 if (tpri >= RUNQ_MATCH_PRI && 1235 !(tp->t_schedflag & TS_RUNQMATCH)) 1236 qlen -= RUNQ_MAX_DIFF; 1237 if (qlen > 0) { 1238 cpu_t *newcp; 1239 1240 if (tp->t_lpl->lpl_lgrpid == LGRP_ROOTID) { 1241 newcp = cp->cpu_next_part; 1242 } else if ((newcp = cp->cpu_next_lpl) == cp) { 1243 newcp = cp->cpu_next_part; 1244 } 1245 1246 if (RUNQ_LEN(newcp, tpri) < qlen) { 1247 DTRACE_PROBE3(runq__balance, 1248 kthread_t *, tp, 1249 cpu_t *, cp, cpu_t *, newcp); 1250 cp = newcp; 1251 } 1252 } 1253 } else { 1254 /* 1255 * Migrate to a cpu in the new partition. 1256 */ 1257 cp = disp_lowpri_cpu(tp->t_cpupart->cp_cpulist, 1258 tp->t_lpl, tp->t_pri, NULL); 1259 } 1260 ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0); 1261 } else { 1262 /* 1263 * It is possible that t_weakbound_cpu != t_bound_cpu (for 1264 * a short time until weak binding that existed when the 1265 * strong binding was established has dropped) so we must 1266 * favour weak binding over strong. 1267 */ 1268 cp = tp->t_weakbound_cpu ? 1269 tp->t_weakbound_cpu : tp->t_bound_cpu; 1270 } 1271 /* 1272 * A thread that is ONPROC may be temporarily placed on the run queue 1273 * but then chosen to run again by disp. If the thread we're placing on 1274 * the queue is in TS_ONPROC state, don't set its t_waitrq until a 1275 * replacement process is actually scheduled in swtch(). In this 1276 * situation, curthread is the only thread that could be in the ONPROC 1277 * state. 1278 */ 1279 if ((!self) && (tp->t_waitrq == 0)) { 1280 hrtime_t curtime; 1281 1282 curtime = gethrtime_unscaled(); 1283 (void) cpu_update_pct(tp, curtime); 1284 tp->t_waitrq = curtime; 1285 } else { 1286 (void) cpu_update_pct(tp, gethrtime_unscaled()); 1287 } 1288 1289 dp = cp->cpu_disp; 1290 disp_lock_enter_high(&dp->disp_lock); 1291 1292 DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, 0); 1293 TRACE_3(TR_FAC_DISP, TR_BACKQ, "setbackdq:pri %d cpu %p tid %p", 1294 tpri, cp, tp); 1295 1296 #ifndef NPROBE 1297 /* Kernel probe */ 1298 if (tnf_tracing_active) 1299 tnf_thread_queue(tp, cp, tpri); 1300 #endif /* NPROBE */ 1301 1302 ASSERT(tpri >= 0 && tpri < dp->disp_npri); 1303 1304 THREAD_RUN(tp, &dp->disp_lock); /* set t_state to TS_RUN */ 1305 tp->t_disp_queue = dp; 1306 tp->t_link = NULL; 1307 1308 dq = &dp->disp_q[tpri]; 1309 dp->disp_nrunnable++; 1310 if (!bound) 1311 dp->disp_steal = 0; 1312 membar_enter(); 1313 1314 if (dq->dq_sruncnt++ != 0) { 1315 ASSERT(dq->dq_first != NULL); 1316 dq->dq_last->t_link = tp; 1317 dq->dq_last = tp; 1318 } else { 1319 ASSERT(dq->dq_first == NULL); 1320 ASSERT(dq->dq_last == NULL); 1321 dq->dq_first = dq->dq_last = tp; 1322 BT_SET(dp->disp_qactmap, tpri); 1323 if (tpri > dp->disp_maxrunpri) { 1324 dp->disp_maxrunpri = tpri; 1325 membar_enter(); 1326 cpu_resched(cp, tpri); 1327 } 1328 } 1329 1330 if (!bound && tpri > dp->disp_max_unbound_pri) { 1331 if (self && dp->disp_max_unbound_pri == -1 && cp == CPU) { 1332 /* 1333 * If there are no other unbound threads on the 1334 * run queue, don't allow other CPUs to steal 1335 * this thread while we are in the middle of a 1336 * context switch. We may just switch to it 1337 * again right away. CPU_DISP_DONTSTEAL is cleared 1338 * in swtch and swtch_to. 1339 */ 1340 cp->cpu_disp_flags |= CPU_DISP_DONTSTEAL; 1341 } 1342 dp->disp_max_unbound_pri = tpri; 1343 } 1344 (*disp_enq_thread)(cp, bound); 1345 } 1346 1347 /* 1348 * Put the specified thread on the front of the dispatcher 1349 * queue corresponding to its current priority. 1350 * 1351 * Called with the thread in transition, onproc or stopped state 1352 * and locked (transition implies locked) and at high spl. 1353 * Returns with the thread in TS_RUN state and still locked. 1354 */ 1355 void 1356 setfrontdq(kthread_t *tp) 1357 { 1358 disp_t *dp; 1359 dispq_t *dq; 1360 cpu_t *cp; 1361 pri_t tpri; 1362 int bound; 1363 1364 ASSERT(THREAD_LOCK_HELD(tp)); 1365 ASSERT((tp->t_schedflag & TS_ALLSTART) == 0); 1366 ASSERT(!thread_on_queue(tp)); /* make sure tp isn't on a runq */ 1367 1368 /* 1369 * If thread is "swapped" or on the swap queue don't 1370 * queue it, but wake sched. 1371 */ 1372 if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) { 1373 disp_swapped_setrun(tp); 1374 return; 1375 } 1376 1377 if (tp->t_bound_cpu || tp->t_weakbound_cpu) 1378 bound = 1; 1379 else 1380 bound = 0; 1381 1382 tpri = DISP_PRIO(tp); 1383 if (ncpus == 1) 1384 cp = tp->t_cpu; 1385 else if (!bound) { 1386 if (tpri >= kpqpri) { 1387 setkpdq(tp, SETKP_FRONT); 1388 return; 1389 } 1390 cp = tp->t_cpu; 1391 if (tp->t_cpupart == cp->cpu_part) { 1392 /* 1393 * We'll generally let this thread continue to run 1394 * where it last ran, but will consider migration if: 1395 * - The thread last ran outside it's home lgroup. 1396 * - The CPU where it last ran is the target of an 1397 * offline request (a thread_nomigrate() on the in 1398 * motion CPU relies on this when forcing a preempt). 1399 * - The thread isn't the highest priority thread where 1400 * it last ran, and it is considered not likely to 1401 * have significant cache warmth. 1402 */ 1403 if ((!LGRP_CONTAINS_CPU(tp->t_lpl->lpl_lgrp, cp)) || 1404 (cp == cpu_inmotion)) { 1405 cp = disp_lowpri_cpu(tp->t_cpu, tp->t_lpl, tpri, 1406 (tp == curthread) ? cp : NULL); 1407 } else if ((tpri < cp->cpu_disp->disp_maxrunpri) && 1408 (!THREAD_HAS_CACHE_WARMTH(tp))) { 1409 cp = disp_lowpri_cpu(tp->t_cpu, tp->t_lpl, tpri, 1410 NULL); 1411 } 1412 } else { 1413 /* 1414 * Migrate to a cpu in the new partition. 1415 */ 1416 cp = disp_lowpri_cpu(tp->t_cpupart->cp_cpulist, 1417 tp->t_lpl, tp->t_pri, NULL); 1418 } 1419 ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0); 1420 } else { 1421 /* 1422 * It is possible that t_weakbound_cpu != t_bound_cpu (for 1423 * a short time until weak binding that existed when the 1424 * strong binding was established has dropped) so we must 1425 * favour weak binding over strong. 1426 */ 1427 cp = tp->t_weakbound_cpu ? 1428 tp->t_weakbound_cpu : tp->t_bound_cpu; 1429 } 1430 1431 /* 1432 * A thread that is ONPROC may be temporarily placed on the run queue 1433 * but then chosen to run again by disp. If the thread we're placing on 1434 * the queue is in TS_ONPROC state, don't set its t_waitrq until a 1435 * replacement process is actually scheduled in swtch(). In this 1436 * situation, curthread is the only thread that could be in the ONPROC 1437 * state. 1438 */ 1439 if ((tp != curthread) && (tp->t_waitrq == 0)) { 1440 hrtime_t curtime; 1441 1442 curtime = gethrtime_unscaled(); 1443 (void) cpu_update_pct(tp, curtime); 1444 tp->t_waitrq = curtime; 1445 } else { 1446 (void) cpu_update_pct(tp, gethrtime_unscaled()); 1447 } 1448 1449 dp = cp->cpu_disp; 1450 disp_lock_enter_high(&dp->disp_lock); 1451 1452 TRACE_2(TR_FAC_DISP, TR_FRONTQ, "frontq:pri %d tid %p", tpri, tp); 1453 DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, 1); 1454 1455 #ifndef NPROBE 1456 /* Kernel probe */ 1457 if (tnf_tracing_active) 1458 tnf_thread_queue(tp, cp, tpri); 1459 #endif /* NPROBE */ 1460 1461 ASSERT(tpri >= 0 && tpri < dp->disp_npri); 1462 1463 THREAD_RUN(tp, &dp->disp_lock); /* set TS_RUN state and lock */ 1464 tp->t_disp_queue = dp; 1465 1466 dq = &dp->disp_q[tpri]; 1467 dp->disp_nrunnable++; 1468 if (!bound) 1469 dp->disp_steal = 0; 1470 membar_enter(); 1471 1472 if (dq->dq_sruncnt++ != 0) { 1473 ASSERT(dq->dq_last != NULL); 1474 tp->t_link = dq->dq_first; 1475 dq->dq_first = tp; 1476 } else { 1477 ASSERT(dq->dq_last == NULL); 1478 ASSERT(dq->dq_first == NULL); 1479 tp->t_link = NULL; 1480 dq->dq_first = dq->dq_last = tp; 1481 BT_SET(dp->disp_qactmap, tpri); 1482 if (tpri > dp->disp_maxrunpri) { 1483 dp->disp_maxrunpri = tpri; 1484 membar_enter(); 1485 cpu_resched(cp, tpri); 1486 } 1487 } 1488 1489 if (!bound && tpri > dp->disp_max_unbound_pri) { 1490 if (tp == curthread && dp->disp_max_unbound_pri == -1 && 1491 cp == CPU) { 1492 /* 1493 * If there are no other unbound threads on the 1494 * run queue, don't allow other CPUs to steal 1495 * this thread while we are in the middle of a 1496 * context switch. We may just switch to it 1497 * again right away. CPU_DISP_DONTSTEAL is cleared 1498 * in swtch and swtch_to. 1499 */ 1500 cp->cpu_disp_flags |= CPU_DISP_DONTSTEAL; 1501 } 1502 dp->disp_max_unbound_pri = tpri; 1503 } 1504 (*disp_enq_thread)(cp, bound); 1505 } 1506 1507 /* 1508 * Put a high-priority unbound thread on the kp queue 1509 */ 1510 static void 1511 setkpdq(kthread_t *tp, int borf) 1512 { 1513 dispq_t *dq; 1514 disp_t *dp; 1515 cpu_t *cp; 1516 pri_t tpri; 1517 1518 tpri = DISP_PRIO(tp); 1519 1520 dp = &tp->t_cpupart->cp_kp_queue; 1521 disp_lock_enter_high(&dp->disp_lock); 1522 1523 TRACE_2(TR_FAC_DISP, TR_FRONTQ, "frontq:pri %d tid %p", tpri, tp); 1524 1525 ASSERT(tpri >= 0 && tpri < dp->disp_npri); 1526 DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, borf); 1527 THREAD_RUN(tp, &dp->disp_lock); /* set t_state to TS_RUN */ 1528 tp->t_disp_queue = dp; 1529 dp->disp_nrunnable++; 1530 dq = &dp->disp_q[tpri]; 1531 1532 if (dq->dq_sruncnt++ != 0) { 1533 if (borf == SETKP_BACK) { 1534 ASSERT(dq->dq_first != NULL); 1535 tp->t_link = NULL; 1536 dq->dq_last->t_link = tp; 1537 dq->dq_last = tp; 1538 } else { 1539 ASSERT(dq->dq_last != NULL); 1540 tp->t_link = dq->dq_first; 1541 dq->dq_first = tp; 1542 } 1543 } else { 1544 if (borf == SETKP_BACK) { 1545 ASSERT(dq->dq_first == NULL); 1546 ASSERT(dq->dq_last == NULL); 1547 dq->dq_first = dq->dq_last = tp; 1548 } else { 1549 ASSERT(dq->dq_last == NULL); 1550 ASSERT(dq->dq_first == NULL); 1551 tp->t_link = NULL; 1552 dq->dq_first = dq->dq_last = tp; 1553 } 1554 BT_SET(dp->disp_qactmap, tpri); 1555 if (tpri > dp->disp_max_unbound_pri) 1556 dp->disp_max_unbound_pri = tpri; 1557 if (tpri > dp->disp_maxrunpri) { 1558 dp->disp_maxrunpri = tpri; 1559 membar_enter(); 1560 } 1561 } 1562 1563 cp = tp->t_cpu; 1564 if (tp->t_cpupart != cp->cpu_part) { 1565 /* migrate to a cpu in the new partition */ 1566 cp = tp->t_cpupart->cp_cpulist; 1567 } 1568 cp = disp_lowpri_cpu(cp, tp->t_lpl, tp->t_pri, NULL); 1569 disp_lock_enter_high(&cp->cpu_disp->disp_lock); 1570 ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0); 1571 1572 #ifndef NPROBE 1573 /* Kernel probe */ 1574 if (tnf_tracing_active) 1575 tnf_thread_queue(tp, cp, tpri); 1576 #endif /* NPROBE */ 1577 1578 if (cp->cpu_chosen_level < tpri) 1579 cp->cpu_chosen_level = tpri; 1580 cpu_resched(cp, tpri); 1581 disp_lock_exit_high(&cp->cpu_disp->disp_lock); 1582 (*disp_enq_thread)(cp, 0); 1583 } 1584 1585 /* 1586 * Remove a thread from the dispatcher queue if it is on it. 1587 * It is not an error if it is not found but we return whether 1588 * or not it was found in case the caller wants to check. 1589 */ 1590 int 1591 dispdeq(kthread_t *tp) 1592 { 1593 disp_t *dp; 1594 dispq_t *dq; 1595 kthread_t *rp; 1596 kthread_t *trp; 1597 kthread_t **ptp; 1598 int tpri; 1599 1600 ASSERT(THREAD_LOCK_HELD(tp)); 1601 1602 if (tp->t_state != TS_RUN) 1603 return (0); 1604 1605 /* 1606 * The thread is "swapped" or is on the swap queue and 1607 * hence no longer on the run queue, so return true. 1608 */ 1609 if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) 1610 return (1); 1611 1612 tpri = DISP_PRIO(tp); 1613 dp = tp->t_disp_queue; 1614 ASSERT(tpri < dp->disp_npri); 1615 dq = &dp->disp_q[tpri]; 1616 ptp = &dq->dq_first; 1617 rp = *ptp; 1618 trp = NULL; 1619 1620 ASSERT(dq->dq_last == NULL || dq->dq_last->t_link == NULL); 1621 1622 /* 1623 * Search for thread in queue. 1624 * Double links would simplify this at the expense of disp/setrun. 1625 */ 1626 while (rp != tp && rp != NULL) { 1627 trp = rp; 1628 ptp = &trp->t_link; 1629 rp = trp->t_link; 1630 } 1631 1632 if (rp == NULL) { 1633 panic("dispdeq: thread not on queue"); 1634 } 1635 1636 DTRACE_SCHED2(dequeue, kthread_t *, tp, disp_t *, dp); 1637 1638 /* 1639 * Found it so remove it from queue. 1640 */ 1641 if ((*ptp = rp->t_link) == NULL) 1642 dq->dq_last = trp; 1643 1644 dp->disp_nrunnable--; 1645 if (--dq->dq_sruncnt == 0) { 1646 dp->disp_qactmap[tpri >> BT_ULSHIFT] &= ~BT_BIW(tpri); 1647 if (dp->disp_nrunnable == 0) { 1648 dp->disp_max_unbound_pri = -1; 1649 dp->disp_maxrunpri = -1; 1650 } else if (tpri == dp->disp_maxrunpri) { 1651 int ipri; 1652 1653 ipri = bt_gethighbit(dp->disp_qactmap, 1654 dp->disp_maxrunpri >> BT_ULSHIFT); 1655 if (ipri < dp->disp_max_unbound_pri) 1656 dp->disp_max_unbound_pri = ipri; 1657 dp->disp_maxrunpri = ipri; 1658 } 1659 } 1660 tp->t_link = NULL; 1661 THREAD_TRANSITION(tp); /* put in intermediate state */ 1662 return (1); 1663 } 1664 1665 1666 /* 1667 * dq_sruninc and dq_srundec are public functions for 1668 * incrementing/decrementing the sruncnts when a thread on 1669 * a dispatcher queue is made schedulable/unschedulable by 1670 * resetting the TS_LOAD flag. 1671 * 1672 * The caller MUST have the thread lock and therefore the dispatcher 1673 * queue lock so that the operation which changes 1674 * the flag, the operation that checks the status of the thread to 1675 * determine if it's on a disp queue AND the call to this function 1676 * are one atomic operation with respect to interrupts. 1677 */ 1678 1679 /* 1680 * Called by sched AFTER TS_LOAD flag is set on a swapped, runnable thread. 1681 */ 1682 void 1683 dq_sruninc(kthread_t *t) 1684 { 1685 ASSERT(t->t_state == TS_RUN); 1686 ASSERT(t->t_schedflag & TS_LOAD); 1687 1688 THREAD_TRANSITION(t); 1689 setfrontdq(t); 1690 } 1691 1692 /* 1693 * See comment on calling conventions above. 1694 * Called by sched BEFORE TS_LOAD flag is cleared on a runnable thread. 1695 */ 1696 void 1697 dq_srundec(kthread_t *t) 1698 { 1699 ASSERT(t->t_schedflag & TS_LOAD); 1700 1701 (void) dispdeq(t); 1702 disp_swapped_enq(t); 1703 } 1704 1705 /* 1706 * Change the dispatcher lock of thread to the "swapped_lock" 1707 * and return with thread lock still held. 1708 * 1709 * Called with thread_lock held, in transition state, and at high spl. 1710 */ 1711 void 1712 disp_swapped_enq(kthread_t *tp) 1713 { 1714 ASSERT(THREAD_LOCK_HELD(tp)); 1715 ASSERT(tp->t_schedflag & TS_LOAD); 1716 1717 switch (tp->t_state) { 1718 case TS_RUN: 1719 disp_lock_enter_high(&swapped_lock); 1720 THREAD_SWAP(tp, &swapped_lock); /* set TS_RUN state and lock */ 1721 break; 1722 case TS_ONPROC: 1723 disp_lock_enter_high(&swapped_lock); 1724 THREAD_TRANSITION(tp); 1725 wake_sched_sec = 1; /* tell clock to wake sched */ 1726 THREAD_SWAP(tp, &swapped_lock); /* set TS_RUN state and lock */ 1727 break; 1728 default: 1729 panic("disp_swapped: tp: %p bad t_state", (void *)tp); 1730 } 1731 } 1732 1733 /* 1734 * This routine is called by setbackdq/setfrontdq if the thread is 1735 * not loaded or loaded and on the swap queue. 1736 * 1737 * Thread state TS_SLEEP implies that a swapped thread 1738 * has been woken up and needs to be swapped in by the swapper. 1739 * 1740 * Thread state TS_RUN, it implies that the priority of a swapped 1741 * thread is being increased by scheduling class (e.g. ts_update). 1742 */ 1743 static void 1744 disp_swapped_setrun(kthread_t *tp) 1745 { 1746 ASSERT(THREAD_LOCK_HELD(tp)); 1747 ASSERT((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD); 1748 1749 switch (tp->t_state) { 1750 case TS_SLEEP: 1751 disp_lock_enter_high(&swapped_lock); 1752 /* 1753 * Wakeup sched immediately (i.e., next tick) if the 1754 * thread priority is above maxclsyspri. 1755 */ 1756 if (DISP_PRIO(tp) > maxclsyspri) 1757 wake_sched = 1; 1758 else 1759 wake_sched_sec = 1; 1760 THREAD_RUN(tp, &swapped_lock); /* set TS_RUN state and lock */ 1761 break; 1762 case TS_RUN: /* called from ts_update */ 1763 break; 1764 default: 1765 panic("disp_swapped_setrun: tp: %p bad t_state", (void *)tp); 1766 } 1767 } 1768 1769 1770 /* 1771 * Make a thread give up its processor. Find the processor on 1772 * which this thread is executing, and have that processor 1773 * preempt. 1774 */ 1775 void 1776 cpu_surrender(kthread_t *tp) 1777 { 1778 cpu_t *cpup; 1779 int max_pri; 1780 int max_run_pri; 1781 klwp_t *lwp; 1782 1783 ASSERT(THREAD_LOCK_HELD(tp)); 1784 1785 if (tp->t_state != TS_ONPROC) 1786 return; 1787 cpup = tp->t_disp_queue->disp_cpu; /* CPU thread dispatched to */ 1788 max_pri = cpup->cpu_disp->disp_maxrunpri; /* best pri of that CPU */ 1789 max_run_pri = CP_MAXRUNPRI(cpup->cpu_part); 1790 if (max_pri < max_run_pri) 1791 max_pri = max_run_pri; 1792 1793 cpup->cpu_runrun = 1; 1794 if (max_pri >= kpreemptpri && cpup->cpu_kprunrun == 0) { 1795 cpup->cpu_kprunrun = 1; 1796 } 1797 1798 /* 1799 * Propagate cpu_runrun, and cpu_kprunrun to global visibility. 1800 */ 1801 membar_enter(); 1802 1803 DTRACE_SCHED1(surrender, kthread_t *, tp); 1804 1805 /* 1806 * Make the target thread take an excursion through trap() 1807 * to do preempt() (unless we're already in trap or post_syscall, 1808 * calling cpu_surrender via CL_TRAPRET). 1809 */ 1810 if (tp != curthread || (lwp = tp->t_lwp) == NULL || 1811 lwp->lwp_state != LWP_USER) { 1812 aston(tp); 1813 if (cpup != CPU) 1814 poke_cpu(cpup->cpu_id); 1815 } 1816 TRACE_2(TR_FAC_DISP, TR_CPU_SURRENDER, 1817 "cpu_surrender:tid %p cpu %p", tp, cpup); 1818 } 1819 1820 1821 /* 1822 * Commit to and ratify a scheduling decision 1823 */ 1824 /*ARGSUSED*/ 1825 static kthread_t * 1826 disp_ratify(kthread_t *tp, disp_t *kpq) 1827 { 1828 pri_t tpri, maxpri; 1829 pri_t maxkpri; 1830 cpu_t *cpup; 1831 1832 ASSERT(tp != NULL); 1833 /* 1834 * Commit to, then ratify scheduling decision 1835 */ 1836 cpup = CPU; 1837 if (cpup->cpu_runrun != 0) 1838 cpup->cpu_runrun = 0; 1839 if (cpup->cpu_kprunrun != 0) 1840 cpup->cpu_kprunrun = 0; 1841 if (cpup->cpu_chosen_level != -1) 1842 cpup->cpu_chosen_level = -1; 1843 membar_enter(); 1844 tpri = DISP_PRIO(tp); 1845 maxpri = cpup->cpu_disp->disp_maxrunpri; 1846 maxkpri = kpq->disp_maxrunpri; 1847 if (maxpri < maxkpri) 1848 maxpri = maxkpri; 1849 if (tpri < maxpri) { 1850 /* 1851 * should have done better 1852 * put this one back and indicate to try again 1853 */ 1854 cpup->cpu_dispthread = curthread; /* fixup dispthread */ 1855 cpup->cpu_dispatch_pri = DISP_PRIO(curthread); 1856 thread_lock_high(tp); 1857 THREAD_TRANSITION(tp); 1858 setfrontdq(tp); 1859 thread_unlock_nopreempt(tp); 1860 1861 tp = NULL; 1862 } 1863 return (tp); 1864 } 1865 1866 /* 1867 * See if there is any work on the dispatcher queue for other CPUs. 1868 * If there is, dequeue the best thread and return. 1869 */ 1870 static kthread_t * 1871 disp_getwork(cpu_t *cp) 1872 { 1873 cpu_t *ocp; /* other CPU */ 1874 cpu_t *ocp_start; 1875 cpu_t *tcp; /* target local CPU */ 1876 kthread_t *tp; 1877 kthread_t *retval = NULL; 1878 pri_t maxpri; 1879 disp_t *kpq; /* kp queue for this partition */ 1880 lpl_t *lpl, *lpl_leaf; 1881 int leafidx, startidx; 1882 hrtime_t stealtime; 1883 lgrp_id_t local_id; 1884 1885 maxpri = -1; 1886 tcp = NULL; 1887 1888 kpq = &cp->cpu_part->cp_kp_queue; 1889 while (kpq->disp_maxrunpri >= 0) { 1890 /* 1891 * Try to take a thread from the kp_queue. 1892 */ 1893 tp = (disp_getbest(kpq)); 1894 if (tp) 1895 return (disp_ratify(tp, kpq)); 1896 } 1897 1898 kpreempt_disable(); /* protect the cpu_active list */ 1899 1900 /* 1901 * Try to find something to do on another CPU's run queue. 1902 * Loop through all other CPUs looking for the one with the highest 1903 * priority unbound thread. 1904 * 1905 * On NUMA machines, the partition's CPUs are consulted in order of 1906 * distance from the current CPU. This way, the first available 1907 * work found is also the closest, and will suffer the least 1908 * from being migrated. 1909 */ 1910 lpl = lpl_leaf = cp->cpu_lpl; 1911 local_id = lpl_leaf->lpl_lgrpid; 1912 leafidx = startidx = 0; 1913 1914 /* 1915 * This loop traverses the lpl hierarchy. Higher level lpls represent 1916 * broader levels of locality 1917 */ 1918 do { 1919 /* This loop iterates over the lpl's leaves */ 1920 do { 1921 if (lpl_leaf != cp->cpu_lpl) 1922 ocp = lpl_leaf->lpl_cpus; 1923 else 1924 ocp = cp->cpu_next_lpl; 1925 1926 /* This loop iterates over the CPUs in the leaf */ 1927 ocp_start = ocp; 1928 do { 1929 pri_t pri; 1930 1931 ASSERT(CPU_ACTIVE(ocp)); 1932 1933 /* 1934 * End our stroll around this lpl if: 1935 * 1936 * - Something became runnable on the local 1937 * queue...which also ends our stroll around 1938 * the partition. 1939 * 1940 * - We happen across another idle CPU. 1941 * Since it is patrolling the next portion 1942 * of the lpl's list (assuming it's not 1943 * halted, or busy servicing an interrupt), 1944 * move to the next higher level of locality. 1945 */ 1946 if (cp->cpu_disp->disp_nrunnable != 0) { 1947 kpreempt_enable(); 1948 return (NULL); 1949 } 1950 if (ocp->cpu_dispatch_pri == -1) { 1951 if (ocp->cpu_disp_flags & 1952 CPU_DISP_HALTED || 1953 ocp->cpu_intr_actv != 0) 1954 continue; 1955 else 1956 goto next_level; 1957 } 1958 1959 /* 1960 * If there's only one thread and the CPU 1961 * is in the middle of a context switch, 1962 * or it's currently running the idle thread, 1963 * don't steal it. 1964 */ 1965 if ((ocp->cpu_disp_flags & 1966 CPU_DISP_DONTSTEAL) && 1967 ocp->cpu_disp->disp_nrunnable == 1) 1968 continue; 1969 1970 pri = ocp->cpu_disp->disp_max_unbound_pri; 1971 if (pri > maxpri) { 1972 /* 1973 * Don't steal threads that we attempted 1974 * to steal recently until they're ready 1975 * to be stolen again. 1976 */ 1977 stealtime = ocp->cpu_disp->disp_steal; 1978 if (stealtime == 0 || 1979 stealtime - gethrtime() <= 0) { 1980 maxpri = pri; 1981 tcp = ocp; 1982 } else { 1983 /* 1984 * Don't update tcp, just set 1985 * the retval to T_DONTSTEAL, so 1986 * that if no acceptable CPUs 1987 * are found the return value 1988 * will be T_DONTSTEAL rather 1989 * then NULL. 1990 */ 1991 retval = T_DONTSTEAL; 1992 } 1993 } 1994 } while ((ocp = ocp->cpu_next_lpl) != ocp_start); 1995 1996 /* 1997 * Iterate to the next leaf lpl in the resource set 1998 * at this level of locality. If we hit the end of 1999 * the set, wrap back around to the beginning. 2000 * 2001 * Note: This iteration is NULL terminated for a reason 2002 * see lpl_topo_bootstrap() in lgrp.c for details. 2003 */ 2004 if ((lpl_leaf = lpl->lpl_rset[++leafidx]) == NULL) { 2005 leafidx = 0; 2006 lpl_leaf = lpl->lpl_rset[leafidx]; 2007 } 2008 } while (leafidx != startidx); 2009 2010 next_level: 2011 /* 2012 * Expand the search to include farther away CPUs (next 2013 * locality level). The closer CPUs that have already been 2014 * checked will be checked again. In doing so, idle CPUs 2015 * will tend to be more aggresive about stealing from CPUs 2016 * that are closer (since the closer CPUs will be considered 2017 * more often). 2018 * Begin at this level with the CPUs local leaf lpl. 2019 */ 2020 if ((lpl = lpl->lpl_parent) != NULL) { 2021 leafidx = startidx = lpl->lpl_id2rset[local_id]; 2022 lpl_leaf = lpl->lpl_rset[leafidx]; 2023 } 2024 } while (!tcp && lpl); 2025 2026 kpreempt_enable(); 2027 2028 /* 2029 * If another queue looks good, and there is still nothing on 2030 * the local queue, try to transfer one or more threads 2031 * from it to our queue. 2032 */ 2033 if (tcp && cp->cpu_disp->disp_nrunnable == 0) { 2034 tp = disp_getbest(tcp->cpu_disp); 2035 if (tp == NULL || tp == T_DONTSTEAL) 2036 return (tp); 2037 return (disp_ratify(tp, kpq)); 2038 } 2039 return (retval); 2040 } 2041 2042 2043 /* 2044 * disp_fix_unbound_pri() 2045 * Determines the maximum priority of unbound threads on the queue. 2046 * The priority is kept for the queue, but is only increased, never 2047 * reduced unless some CPU is looking for something on that queue. 2048 * 2049 * The priority argument is the known upper limit. 2050 * 2051 * Perhaps this should be kept accurately, but that probably means 2052 * separate bitmaps for bound and unbound threads. Since only idled 2053 * CPUs will have to do this recalculation, it seems better this way. 2054 */ 2055 static void 2056 disp_fix_unbound_pri(disp_t *dp, pri_t pri) 2057 { 2058 kthread_t *tp; 2059 dispq_t *dq; 2060 ulong_t *dqactmap = dp->disp_qactmap; 2061 ulong_t mapword; 2062 int wx; 2063 2064 ASSERT(DISP_LOCK_HELD(&dp->disp_lock)); 2065 2066 ASSERT(pri >= 0); /* checked by caller */ 2067 2068 /* 2069 * Start the search at the next lowest priority below the supplied 2070 * priority. This depends on the bitmap implementation. 2071 */ 2072 do { 2073 wx = pri >> BT_ULSHIFT; /* index of word in map */ 2074 2075 /* 2076 * Form mask for all lower priorities in the word. 2077 */ 2078 mapword = dqactmap[wx] & (BT_BIW(pri) - 1); 2079 2080 /* 2081 * Get next lower active priority. 2082 */ 2083 if (mapword != 0) { 2084 pri = (wx << BT_ULSHIFT) + highbit(mapword) - 1; 2085 } else if (wx > 0) { 2086 pri = bt_gethighbit(dqactmap, wx - 1); /* sign extend */ 2087 if (pri < 0) 2088 break; 2089 } else { 2090 pri = -1; 2091 break; 2092 } 2093 2094 /* 2095 * Search the queue for unbound, runnable threads. 2096 */ 2097 dq = &dp->disp_q[pri]; 2098 tp = dq->dq_first; 2099 2100 while (tp && (tp->t_bound_cpu || tp->t_weakbound_cpu)) { 2101 tp = tp->t_link; 2102 } 2103 2104 /* 2105 * If a thread was found, set the priority and return. 2106 */ 2107 } while (tp == NULL); 2108 2109 /* 2110 * pri holds the maximum unbound thread priority or -1. 2111 */ 2112 if (dp->disp_max_unbound_pri != pri) 2113 dp->disp_max_unbound_pri = pri; 2114 } 2115 2116 /* 2117 * disp_adjust_unbound_pri() - thread is becoming unbound, so we should 2118 * check if the CPU to which is was previously bound should have 2119 * its disp_max_unbound_pri increased. 2120 */ 2121 void 2122 disp_adjust_unbound_pri(kthread_t *tp) 2123 { 2124 disp_t *dp; 2125 pri_t tpri; 2126 2127 ASSERT(THREAD_LOCK_HELD(tp)); 2128 2129 /* 2130 * Don't do anything if the thread is not bound, or 2131 * currently not runnable or swapped out. 2132 */ 2133 if (tp->t_bound_cpu == NULL || 2134 tp->t_state != TS_RUN || 2135 tp->t_schedflag & TS_ON_SWAPQ) 2136 return; 2137 2138 tpri = DISP_PRIO(tp); 2139 dp = tp->t_bound_cpu->cpu_disp; 2140 ASSERT(tpri >= 0 && tpri < dp->disp_npri); 2141 if (tpri > dp->disp_max_unbound_pri) 2142 dp->disp_max_unbound_pri = tpri; 2143 } 2144 2145 /* 2146 * disp_getbest() 2147 * De-queue the highest priority unbound runnable thread. 2148 * Returns with the thread unlocked and onproc but at splhigh (like disp()). 2149 * Returns NULL if nothing found. 2150 * Returns T_DONTSTEAL if the thread was not stealable. 2151 * so that the caller will try again later. 2152 * 2153 * Passed a pointer to a dispatch queue not associated with this CPU, and 2154 * its type. 2155 */ 2156 static kthread_t * 2157 disp_getbest(disp_t *dp) 2158 { 2159 kthread_t *tp; 2160 dispq_t *dq; 2161 pri_t pri; 2162 cpu_t *cp, *tcp; 2163 boolean_t allbound; 2164 2165 disp_lock_enter(&dp->disp_lock); 2166 2167 /* 2168 * If there is nothing to run, or the CPU is in the middle of a 2169 * context switch of the only thread, return NULL. 2170 */ 2171 tcp = dp->disp_cpu; 2172 cp = CPU; 2173 pri = dp->disp_max_unbound_pri; 2174 if (pri == -1 || 2175 (tcp != NULL && (tcp->cpu_disp_flags & CPU_DISP_DONTSTEAL) && 2176 tcp->cpu_disp->disp_nrunnable == 1)) { 2177 disp_lock_exit_nopreempt(&dp->disp_lock); 2178 return (NULL); 2179 } 2180 2181 dq = &dp->disp_q[pri]; 2182 2183 2184 /* 2185 * Assume that all threads are bound on this queue, and change it 2186 * later when we find out that it is not the case. 2187 */ 2188 allbound = B_TRUE; 2189 for (tp = dq->dq_first; tp != NULL; tp = tp->t_link) { 2190 hrtime_t now, nosteal, rqtime; 2191 2192 /* 2193 * Skip over bound threads which could be here even 2194 * though disp_max_unbound_pri indicated this level. 2195 */ 2196 if (tp->t_bound_cpu || tp->t_weakbound_cpu) 2197 continue; 2198 2199 /* 2200 * We've got some unbound threads on this queue, so turn 2201 * the allbound flag off now. 2202 */ 2203 allbound = B_FALSE; 2204 2205 /* 2206 * The thread is a candidate for stealing from its run queue. We 2207 * don't want to steal threads that became runnable just a 2208 * moment ago. This improves CPU affinity for threads that get 2209 * preempted for short periods of time and go back on the run 2210 * queue. 2211 * 2212 * We want to let it stay on its run queue if it was only placed 2213 * there recently and it was running on the same CPU before that 2214 * to preserve its cache investment. For the thread to remain on 2215 * its run queue, ALL of the following conditions must be 2216 * satisfied: 2217 * 2218 * - the disp queue should not be the kernel preemption queue 2219 * - delayed idle stealing should not be disabled 2220 * - nosteal_nsec should be non-zero 2221 * - it should run with user priority 2222 * - it should be on the run queue of the CPU where it was 2223 * running before being placed on the run queue 2224 * - it should be the only thread on the run queue (to prevent 2225 * extra scheduling latency for other threads) 2226 * - it should sit on the run queue for less than per-chip 2227 * nosteal interval or global nosteal interval 2228 * - in case of CPUs with shared cache it should sit in a run 2229 * queue of a CPU from a different chip 2230 * 2231 * The checks are arranged so that the ones that are faster are 2232 * placed earlier. 2233 */ 2234 if (tcp == NULL || 2235 pri >= minclsyspri || 2236 tp->t_cpu != tcp) 2237 break; 2238 2239 /* 2240 * Steal immediately if, due to CMT processor architecture 2241 * migraiton between cp and tcp would incur no performance 2242 * penalty. 2243 */ 2244 if (pg_cmt_can_migrate(cp, tcp)) 2245 break; 2246 2247 nosteal = nosteal_nsec; 2248 if (nosteal == 0) 2249 break; 2250 2251 /* 2252 * Calculate time spent sitting on run queue 2253 */ 2254 now = gethrtime_unscaled(); 2255 rqtime = now - tp->t_waitrq; 2256 scalehrtime(&rqtime); 2257 2258 /* 2259 * Steal immediately if the time spent on this run queue is more 2260 * than allowed nosteal delay. 2261 * 2262 * Negative rqtime check is needed here to avoid infinite 2263 * stealing delays caused by unlikely but not impossible 2264 * drifts between CPU times on different CPUs. 2265 */ 2266 if (rqtime > nosteal || rqtime < 0) 2267 break; 2268 2269 DTRACE_PROBE4(nosteal, kthread_t *, tp, 2270 cpu_t *, tcp, cpu_t *, cp, hrtime_t, rqtime); 2271 scalehrtime(&now); 2272 /* 2273 * Calculate when this thread becomes stealable 2274 */ 2275 now += (nosteal - rqtime); 2276 2277 /* 2278 * Calculate time when some thread becomes stealable 2279 */ 2280 if (now < dp->disp_steal) 2281 dp->disp_steal = now; 2282 } 2283 2284 /* 2285 * If there were no unbound threads on this queue, find the queue 2286 * where they are and then return later. The value of 2287 * disp_max_unbound_pri is not always accurate because it isn't 2288 * reduced until another idle CPU looks for work. 2289 */ 2290 if (allbound) 2291 disp_fix_unbound_pri(dp, pri); 2292 2293 /* 2294 * If we reached the end of the queue and found no unbound threads 2295 * then return NULL so that other CPUs will be considered. If there 2296 * are unbound threads but they cannot yet be stolen, then 2297 * return T_DONTSTEAL and try again later. 2298 */ 2299 if (tp == NULL) { 2300 disp_lock_exit_nopreempt(&dp->disp_lock); 2301 return (allbound ? NULL : T_DONTSTEAL); 2302 } 2303 2304 /* 2305 * Found a runnable, unbound thread, so remove it from queue. 2306 * dispdeq() requires that we have the thread locked, and we do, 2307 * by virtue of holding the dispatch queue lock. dispdeq() will 2308 * put the thread in transition state, thereby dropping the dispq 2309 * lock. 2310 */ 2311 2312 #ifdef DEBUG 2313 { 2314 int thread_was_on_queue; 2315 2316 thread_was_on_queue = dispdeq(tp); /* drops disp_lock */ 2317 ASSERT(thread_was_on_queue); 2318 } 2319 2320 #else /* DEBUG */ 2321 (void) dispdeq(tp); /* drops disp_lock */ 2322 #endif /* DEBUG */ 2323 2324 /* 2325 * Reset the disp_queue steal time - we do not know what is the smallest 2326 * value across the queue is. 2327 */ 2328 dp->disp_steal = 0; 2329 2330 tp->t_schedflag |= TS_DONT_SWAP; 2331 2332 /* 2333 * Setup thread to run on the current CPU. 2334 */ 2335 tp->t_disp_queue = cp->cpu_disp; 2336 2337 cp->cpu_dispthread = tp; /* protected by spl only */ 2338 cp->cpu_dispatch_pri = pri; 2339 2340 /* 2341 * There can be a memory synchronization race between disp_getbest() 2342 * and disp_ratify() vs cpu_resched() where cpu_resched() is trying 2343 * to preempt the current thread to run the enqueued thread while 2344 * disp_getbest() and disp_ratify() are changing the current thread 2345 * to the stolen thread. This may lead to a situation where 2346 * cpu_resched() tries to preempt the wrong thread and the 2347 * stolen thread continues to run on the CPU which has been tagged 2348 * for preemption. 2349 * Later the clock thread gets enqueued but doesn't get to run on the 2350 * CPU causing the system to hang. 2351 * 2352 * To avoid this, grabbing and dropping the disp_lock (which does 2353 * a memory barrier) is needed to synchronize the execution of 2354 * cpu_resched() with disp_getbest() and disp_ratify() and 2355 * synchronize the memory read and written by cpu_resched(), 2356 * disp_getbest(), and disp_ratify() with each other. 2357 * (see CR#6482861 for more details). 2358 */ 2359 disp_lock_enter_high(&cp->cpu_disp->disp_lock); 2360 disp_lock_exit_high(&cp->cpu_disp->disp_lock); 2361 2362 ASSERT(pri == DISP_PRIO(tp)); 2363 2364 DTRACE_PROBE3(steal, kthread_t *, tp, cpu_t *, tcp, cpu_t *, cp); 2365 2366 thread_onproc(tp, cp); /* set t_state to TS_ONPROC */ 2367 2368 /* 2369 * Return with spl high so that swtch() won't need to raise it. 2370 * The disp_lock was dropped by dispdeq(). 2371 */ 2372 2373 return (tp); 2374 } 2375 2376 /* 2377 * disp_bound_common() - common routine for higher level functions 2378 * that check for bound threads under certain conditions. 2379 * If 'threadlistsafe' is set then there is no need to acquire 2380 * pidlock to stop the thread list from changing (eg, if 2381 * disp_bound_* is called with cpus paused). 2382 */ 2383 static int 2384 disp_bound_common(cpu_t *cp, int threadlistsafe, int flag) 2385 { 2386 int found = 0; 2387 kthread_t *tp; 2388 2389 ASSERT(flag); 2390 2391 if (!threadlistsafe) 2392 mutex_enter(&pidlock); 2393 tp = curthread; /* faster than allthreads */ 2394 do { 2395 if (tp->t_state != TS_FREE) { 2396 /* 2397 * If an interrupt thread is busy, but the 2398 * caller doesn't care (i.e. BOUND_INTR is off), 2399 * then just ignore it and continue through. 2400 */ 2401 if ((tp->t_flag & T_INTR_THREAD) && 2402 !(flag & BOUND_INTR)) 2403 continue; 2404 2405 /* 2406 * Skip the idle thread for the CPU 2407 * we're about to set offline. 2408 */ 2409 if (tp == cp->cpu_idle_thread) 2410 continue; 2411 2412 /* 2413 * Skip the pause thread for the CPU 2414 * we're about to set offline. 2415 */ 2416 if (tp == cp->cpu_pause_thread) 2417 continue; 2418 2419 if ((flag & BOUND_CPU) && 2420 (tp->t_bound_cpu == cp || 2421 tp->t_bind_cpu == cp->cpu_id || 2422 tp->t_weakbound_cpu == cp)) { 2423 found = 1; 2424 break; 2425 } 2426 2427 if ((flag & BOUND_PARTITION) && 2428 (tp->t_cpupart == cp->cpu_part)) { 2429 found = 1; 2430 break; 2431 } 2432 } 2433 } while ((tp = tp->t_next) != curthread && found == 0); 2434 if (!threadlistsafe) 2435 mutex_exit(&pidlock); 2436 return (found); 2437 } 2438 2439 /* 2440 * disp_bound_threads - return nonzero if threads are bound to the processor. 2441 * Called infrequently. Keep this simple. 2442 * Includes threads that are asleep or stopped but not onproc. 2443 */ 2444 int 2445 disp_bound_threads(cpu_t *cp, int threadlistsafe) 2446 { 2447 return (disp_bound_common(cp, threadlistsafe, BOUND_CPU)); 2448 } 2449 2450 /* 2451 * disp_bound_anythreads - return nonzero if _any_ threads are bound 2452 * to the given processor, including interrupt threads. 2453 */ 2454 int 2455 disp_bound_anythreads(cpu_t *cp, int threadlistsafe) 2456 { 2457 return (disp_bound_common(cp, threadlistsafe, BOUND_CPU | BOUND_INTR)); 2458 } 2459 2460 /* 2461 * disp_bound_partition - return nonzero if threads are bound to the same 2462 * partition as the processor. 2463 * Called infrequently. Keep this simple. 2464 * Includes threads that are asleep or stopped but not onproc. 2465 */ 2466 int 2467 disp_bound_partition(cpu_t *cp, int threadlistsafe) 2468 { 2469 return (disp_bound_common(cp, threadlistsafe, BOUND_PARTITION)); 2470 } 2471 2472 /* 2473 * disp_cpu_inactive - make a CPU inactive by moving all of its unbound 2474 * threads to other CPUs. 2475 */ 2476 void 2477 disp_cpu_inactive(cpu_t *cp) 2478 { 2479 kthread_t *tp; 2480 disp_t *dp = cp->cpu_disp; 2481 dispq_t *dq; 2482 pri_t pri; 2483 int wasonq; 2484 2485 disp_lock_enter(&dp->disp_lock); 2486 while ((pri = dp->disp_max_unbound_pri) != -1) { 2487 dq = &dp->disp_q[pri]; 2488 tp = dq->dq_first; 2489 2490 /* 2491 * Skip over bound threads. 2492 */ 2493 while (tp != NULL && tp->t_bound_cpu != NULL) { 2494 tp = tp->t_link; 2495 } 2496 2497 if (tp == NULL) { 2498 /* disp_max_unbound_pri must be inaccurate, so fix it */ 2499 disp_fix_unbound_pri(dp, pri); 2500 continue; 2501 } 2502 2503 wasonq = dispdeq(tp); /* drops disp_lock */ 2504 ASSERT(wasonq); 2505 ASSERT(tp->t_weakbound_cpu == NULL); 2506 2507 setbackdq(tp); 2508 /* 2509 * Called from cpu_offline: 2510 * 2511 * cp has already been removed from the list of active cpus 2512 * and tp->t_cpu has been changed so there is no risk of 2513 * tp ending up back on cp. 2514 * 2515 * Called from cpupart_move_cpu: 2516 * 2517 * The cpu has moved to a new cpupart. Any threads that 2518 * were on it's dispatch queues before the move remain 2519 * in the old partition and can't run in the new partition. 2520 */ 2521 ASSERT(tp->t_cpu != cp); 2522 thread_unlock(tp); 2523 2524 disp_lock_enter(&dp->disp_lock); 2525 } 2526 disp_lock_exit(&dp->disp_lock); 2527 } 2528 2529 /* 2530 * disp_lowpri_cpu - find CPU running the lowest priority thread. 2531 * The hint passed in is used as a starting point so we don't favor 2532 * CPU 0 or any other CPU. The caller should pass in the most recently 2533 * used CPU for the thread. 2534 * 2535 * The lgroup and priority are used to determine the best CPU to run on 2536 * in a NUMA machine. The lgroup specifies which CPUs are closest while 2537 * the thread priority will indicate whether the thread will actually run 2538 * there. To pick the best CPU, the CPUs inside and outside of the given 2539 * lgroup which are running the lowest priority threads are found. The 2540 * remote CPU is chosen only if the thread will not run locally on a CPU 2541 * within the lgroup, but will run on the remote CPU. If the thread 2542 * cannot immediately run on any CPU, the best local CPU will be chosen. 2543 * 2544 * The lpl specified also identifies the cpu partition from which 2545 * disp_lowpri_cpu should select a CPU. 2546 * 2547 * curcpu is used to indicate that disp_lowpri_cpu is being called on 2548 * behalf of the current thread. (curthread is looking for a new cpu) 2549 * In this case, cpu_dispatch_pri for this thread's cpu should be 2550 * ignored. 2551 * 2552 * If a cpu is the target of an offline request then try to avoid it. 2553 * 2554 * This function must be called at either high SPL, or with preemption 2555 * disabled, so that the "hint" CPU cannot be removed from the online 2556 * CPU list while we are traversing it. 2557 */ 2558 cpu_t * 2559 disp_lowpri_cpu(cpu_t *hint, lpl_t *lpl, pri_t tpri, cpu_t *curcpu) 2560 { 2561 cpu_t *bestcpu; 2562 cpu_t *besthomecpu; 2563 cpu_t *cp, *cpstart; 2564 2565 pri_t bestpri; 2566 pri_t cpupri; 2567 2568 klgrpset_t done; 2569 klgrpset_t cur_set; 2570 2571 lpl_t *lpl_iter, *lpl_leaf; 2572 int i; 2573 2574 /* 2575 * Scan for a CPU currently running the lowest priority thread. 2576 * Cannot get cpu_lock here because it is adaptive. 2577 * We do not require lock on CPU list. 2578 */ 2579 ASSERT(hint != NULL); 2580 ASSERT(lpl != NULL); 2581 ASSERT(lpl->lpl_ncpu > 0); 2582 2583 /* 2584 * First examine local CPUs. Note that it's possible the hint CPU 2585 * passed in in remote to the specified home lgroup. If our priority 2586 * isn't sufficient enough such that we can run immediately at home, 2587 * then examine CPUs remote to our home lgroup. 2588 * We would like to give preference to CPUs closest to "home". 2589 * If we can't find a CPU where we'll run at a given level 2590 * of locality, we expand our search to include the next level. 2591 */ 2592 bestcpu = besthomecpu = NULL; 2593 klgrpset_clear(done); 2594 /* start with lpl we were passed */ 2595 2596 lpl_iter = lpl; 2597 2598 do { 2599 2600 bestpri = SHRT_MAX; 2601 klgrpset_clear(cur_set); 2602 2603 for (i = 0; i < lpl_iter->lpl_nrset; i++) { 2604 lpl_leaf = lpl_iter->lpl_rset[i]; 2605 if (klgrpset_ismember(done, lpl_leaf->lpl_lgrpid)) 2606 continue; 2607 2608 klgrpset_add(cur_set, lpl_leaf->lpl_lgrpid); 2609 2610 if (hint->cpu_lpl == lpl_leaf) 2611 cp = cpstart = hint; 2612 else 2613 cp = cpstart = lpl_leaf->lpl_cpus; 2614 2615 do { 2616 if (cp == curcpu) 2617 cpupri = -1; 2618 else if (cp == cpu_inmotion) 2619 cpupri = SHRT_MAX; 2620 else 2621 cpupri = cp->cpu_dispatch_pri; 2622 if (cp->cpu_disp->disp_maxrunpri > cpupri) 2623 cpupri = cp->cpu_disp->disp_maxrunpri; 2624 if (cp->cpu_chosen_level > cpupri) 2625 cpupri = cp->cpu_chosen_level; 2626 if (cpupri < bestpri) { 2627 if (CPU_IDLING(cpupri)) { 2628 ASSERT((cp->cpu_flags & 2629 CPU_QUIESCED) == 0); 2630 return (cp); 2631 } 2632 bestcpu = cp; 2633 bestpri = cpupri; 2634 } 2635 } while ((cp = cp->cpu_next_lpl) != cpstart); 2636 } 2637 2638 if (bestcpu && (tpri > bestpri)) { 2639 ASSERT((bestcpu->cpu_flags & CPU_QUIESCED) == 0); 2640 return (bestcpu); 2641 } 2642 if (besthomecpu == NULL) 2643 besthomecpu = bestcpu; 2644 /* 2645 * Add the lgrps we just considered to the "done" set 2646 */ 2647 klgrpset_or(done, cur_set); 2648 2649 } while ((lpl_iter = lpl_iter->lpl_parent) != NULL); 2650 2651 /* 2652 * The specified priority isn't high enough to run immediately 2653 * anywhere, so just return the best CPU from the home lgroup. 2654 */ 2655 ASSERT((besthomecpu->cpu_flags & CPU_QUIESCED) == 0); 2656 return (besthomecpu); 2657 } 2658 2659 /* 2660 * This routine provides the generic idle cpu function for all processors. 2661 * If a processor has some specific code to execute when idle (say, to stop 2662 * the pipeline and save power) then that routine should be defined in the 2663 * processors specific code (module_xx.c) and the global variable idle_cpu 2664 * set to that function. 2665 */ 2666 static void 2667 generic_idle_cpu(void) 2668 { 2669 } 2670 2671 /*ARGSUSED*/ 2672 static void 2673 generic_enq_thread(cpu_t *cpu, int bound) 2674 { 2675 } 2676