17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5ab761399Sesaxe * Common Development and Distribution License (the "License"). 6ab761399Sesaxe * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22057452c6Sjj209869 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 277c478bd9Sstevel@tonic-gate /* All Rights Reserved */ 287c478bd9Sstevel@tonic-gate 297c478bd9Sstevel@tonic-gate 307c478bd9Sstevel@tonic-gate #include <sys/types.h> 317c478bd9Sstevel@tonic-gate #include <sys/param.h> 327c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 337c478bd9Sstevel@tonic-gate #include <sys/signal.h> 347c478bd9Sstevel@tonic-gate #include <sys/user.h> 357c478bd9Sstevel@tonic-gate #include <sys/systm.h> 367c478bd9Sstevel@tonic-gate #include <sys/sysinfo.h> 377c478bd9Sstevel@tonic-gate #include <sys/var.h> 387c478bd9Sstevel@tonic-gate #include <sys/errno.h> 397c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 407c478bd9Sstevel@tonic-gate #include <sys/debug.h> 417c478bd9Sstevel@tonic-gate #include <sys/inline.h> 427c478bd9Sstevel@tonic-gate #include <sys/disp.h> 437c478bd9Sstevel@tonic-gate #include <sys/class.h> 447c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 457c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 467c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 477c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 487c478bd9Sstevel@tonic-gate #include <sys/tnf.h> 497c478bd9Sstevel@tonic-gate #include <sys/cpupart.h> 507c478bd9Sstevel@tonic-gate #include <sys/lgrp.h> 51fb2f18f8Sesaxe #include <sys/pg.h> 52fb2f18f8Sesaxe #include <sys/cmt.h> 53fb2f18f8Sesaxe #include <sys/bitset.h> 547c478bd9Sstevel@tonic-gate #include <sys/schedctl.h> 557c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 567c478bd9Sstevel@tonic-gate #include <sys/dtrace.h> 577c478bd9Sstevel@tonic-gate #include <sys/sdt.h> 58057452c6Sjj209869 #include <sys/archsystm.h> 597c478bd9Sstevel@tonic-gate 607c478bd9Sstevel@tonic-gate #include <vm/as.h> 617c478bd9Sstevel@tonic-gate 627c478bd9Sstevel@tonic-gate #define BOUND_CPU 0x1 637c478bd9Sstevel@tonic-gate #define BOUND_PARTITION 0x2 647c478bd9Sstevel@tonic-gate #define BOUND_INTR 0x4 657c478bd9Sstevel@tonic-gate 667c478bd9Sstevel@tonic-gate /* Dispatch queue allocation structure and functions */ 677c478bd9Sstevel@tonic-gate struct disp_queue_info { 687c478bd9Sstevel@tonic-gate disp_t *dp; 697c478bd9Sstevel@tonic-gate dispq_t *olddispq; 707c478bd9Sstevel@tonic-gate dispq_t *newdispq; 717c478bd9Sstevel@tonic-gate ulong_t *olddqactmap; 727c478bd9Sstevel@tonic-gate ulong_t *newdqactmap; 737c478bd9Sstevel@tonic-gate int oldnglobpris; 747c478bd9Sstevel@tonic-gate }; 757c478bd9Sstevel@tonic-gate static void disp_dq_alloc(struct disp_queue_info *dptr, int numpris, 767c478bd9Sstevel@tonic-gate disp_t *dp); 777c478bd9Sstevel@tonic-gate static void disp_dq_assign(struct disp_queue_info *dptr, int numpris); 787c478bd9Sstevel@tonic-gate static void disp_dq_free(struct disp_queue_info *dptr); 797c478bd9Sstevel@tonic-gate 807c478bd9Sstevel@tonic-gate /* platform-specific routine to call when processor is idle */ 817c478bd9Sstevel@tonic-gate static void generic_idle_cpu(); 827c478bd9Sstevel@tonic-gate void (*idle_cpu)() = generic_idle_cpu; 837c478bd9Sstevel@tonic-gate 847c478bd9Sstevel@tonic-gate /* routines invoked when a CPU enters/exits the idle loop */ 857c478bd9Sstevel@tonic-gate static void idle_enter(); 867c478bd9Sstevel@tonic-gate static void idle_exit(); 877c478bd9Sstevel@tonic-gate 887c478bd9Sstevel@tonic-gate /* platform-specific routine to call when thread is enqueued */ 897c478bd9Sstevel@tonic-gate static void generic_enq_thread(cpu_t *, int); 907c478bd9Sstevel@tonic-gate void (*disp_enq_thread)(cpu_t *, int) = generic_enq_thread; 917c478bd9Sstevel@tonic-gate 927c478bd9Sstevel@tonic-gate pri_t kpreemptpri; /* priority where kernel preemption applies */ 937c478bd9Sstevel@tonic-gate pri_t upreemptpri = 0; /* priority where normal preemption applies */ 947c478bd9Sstevel@tonic-gate pri_t intr_pri; /* interrupt thread priority base level */ 957c478bd9Sstevel@tonic-gate 96685679f7Sakolb #define KPQPRI -1 /* pri where cpu affinity is dropped for kpq */ 977c478bd9Sstevel@tonic-gate pri_t kpqpri = KPQPRI; /* can be set in /etc/system */ 987c478bd9Sstevel@tonic-gate disp_t cpu0_disp; /* boot CPU's dispatch queue */ 997c478bd9Sstevel@tonic-gate disp_lock_t swapped_lock; /* lock swapped threads and swap queue */ 1007c478bd9Sstevel@tonic-gate int nswapped; /* total number of swapped threads */ 1017c478bd9Sstevel@tonic-gate void disp_swapped_enq(kthread_t *tp); 1027c478bd9Sstevel@tonic-gate static void disp_swapped_setrun(kthread_t *tp); 1037c478bd9Sstevel@tonic-gate static void cpu_resched(cpu_t *cp, pri_t tpri); 1047c478bd9Sstevel@tonic-gate 1057c478bd9Sstevel@tonic-gate /* 1067c478bd9Sstevel@tonic-gate * If this is set, only interrupt threads will cause kernel preemptions. 1077c478bd9Sstevel@tonic-gate * This is done by changing the value of kpreemptpri. kpreemptpri 1087c478bd9Sstevel@tonic-gate * will either be the max sysclass pri + 1 or the min interrupt pri. 1097c478bd9Sstevel@tonic-gate */ 1107c478bd9Sstevel@tonic-gate int only_intr_kpreempt; 1117c478bd9Sstevel@tonic-gate 1127c478bd9Sstevel@tonic-gate extern void set_idle_cpu(int cpun); 1137c478bd9Sstevel@tonic-gate extern void unset_idle_cpu(int cpun); 1147c478bd9Sstevel@tonic-gate static void setkpdq(kthread_t *tp, int borf); 1157c478bd9Sstevel@tonic-gate #define SETKP_BACK 0 1167c478bd9Sstevel@tonic-gate #define SETKP_FRONT 1 1177c478bd9Sstevel@tonic-gate /* 1187c478bd9Sstevel@tonic-gate * Parameter that determines how recently a thread must have run 1197c478bd9Sstevel@tonic-gate * on the CPU to be considered loosely-bound to that CPU to reduce 1207c478bd9Sstevel@tonic-gate * cold cache effects. The interval is in hertz. 1217c478bd9Sstevel@tonic-gate */ 122fb2f18f8Sesaxe #define RECHOOSE_INTERVAL 3 1237c478bd9Sstevel@tonic-gate int rechoose_interval = RECHOOSE_INTERVAL; 1247c478bd9Sstevel@tonic-gate static cpu_t *cpu_choose(kthread_t *, pri_t); 1257c478bd9Sstevel@tonic-gate 126685679f7Sakolb /* 127685679f7Sakolb * Parameter that determines how long (in nanoseconds) a thread must 128685679f7Sakolb * be sitting on a run queue before it can be stolen by another CPU 129685679f7Sakolb * to reduce migrations. The interval is in nanoseconds. 130685679f7Sakolb * 13181588590Sbholler * The nosteal_nsec should be set by platform code cmp_set_nosteal_interval() 13281588590Sbholler * to an appropriate value. nosteal_nsec is set to NOSTEAL_UNINITIALIZED 13381588590Sbholler * here indicating it is uninitiallized. 13481588590Sbholler * Setting nosteal_nsec to 0 effectively disables the nosteal 'protection'. 13581588590Sbholler * 136685679f7Sakolb */ 13781588590Sbholler #define NOSTEAL_UNINITIALIZED (-1) 13881588590Sbholler hrtime_t nosteal_nsec = NOSTEAL_UNINITIALIZED; 13981588590Sbholler extern void cmp_set_nosteal_interval(void); 140685679f7Sakolb 1417c478bd9Sstevel@tonic-gate id_t defaultcid; /* system "default" class; see dispadmin(1M) */ 1427c478bd9Sstevel@tonic-gate 1437c478bd9Sstevel@tonic-gate disp_lock_t transition_lock; /* lock on transitioning threads */ 1447c478bd9Sstevel@tonic-gate disp_lock_t stop_lock; /* lock on stopped threads */ 1457c478bd9Sstevel@tonic-gate 1467c478bd9Sstevel@tonic-gate static void cpu_dispqalloc(int numpris); 1477c478bd9Sstevel@tonic-gate 148685679f7Sakolb /* 149685679f7Sakolb * This gets returned by disp_getwork/disp_getbest if we couldn't steal 150685679f7Sakolb * a thread because it was sitting on its run queue for a very short 151685679f7Sakolb * period of time. 152685679f7Sakolb */ 153685679f7Sakolb #define T_DONTSTEAL (kthread_t *)(-1) /* returned by disp_getwork/getbest */ 154685679f7Sakolb 1557c478bd9Sstevel@tonic-gate static kthread_t *disp_getwork(cpu_t *to); 1567c478bd9Sstevel@tonic-gate static kthread_t *disp_getbest(disp_t *from); 1577c478bd9Sstevel@tonic-gate static kthread_t *disp_ratify(kthread_t *tp, disp_t *kpq); 1587c478bd9Sstevel@tonic-gate 1597c478bd9Sstevel@tonic-gate void swtch_to(kthread_t *); 1607c478bd9Sstevel@tonic-gate 1617c478bd9Sstevel@tonic-gate /* 1627c478bd9Sstevel@tonic-gate * dispatcher and scheduler initialization 1637c478bd9Sstevel@tonic-gate */ 1647c478bd9Sstevel@tonic-gate 1657c478bd9Sstevel@tonic-gate /* 1667c478bd9Sstevel@tonic-gate * disp_setup - Common code to calculate and allocate dispatcher 1677c478bd9Sstevel@tonic-gate * variables and structures based on the maximum priority. 1687c478bd9Sstevel@tonic-gate */ 1697c478bd9Sstevel@tonic-gate static void 1707c478bd9Sstevel@tonic-gate disp_setup(pri_t maxglobpri, pri_t oldnglobpris) 1717c478bd9Sstevel@tonic-gate { 1727c478bd9Sstevel@tonic-gate pri_t newnglobpris; 1737c478bd9Sstevel@tonic-gate 1747c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 1757c478bd9Sstevel@tonic-gate 1767c478bd9Sstevel@tonic-gate newnglobpris = maxglobpri + 1 + LOCK_LEVEL; 1777c478bd9Sstevel@tonic-gate 1787c478bd9Sstevel@tonic-gate if (newnglobpris > oldnglobpris) { 1797c478bd9Sstevel@tonic-gate /* 1807c478bd9Sstevel@tonic-gate * Allocate new kp queues for each CPU partition. 1817c478bd9Sstevel@tonic-gate */ 1827c478bd9Sstevel@tonic-gate cpupart_kpqalloc(newnglobpris); 1837c478bd9Sstevel@tonic-gate 1847c478bd9Sstevel@tonic-gate /* 1857c478bd9Sstevel@tonic-gate * Allocate new dispatch queues for each CPU. 1867c478bd9Sstevel@tonic-gate */ 1877c478bd9Sstevel@tonic-gate cpu_dispqalloc(newnglobpris); 1887c478bd9Sstevel@tonic-gate 1897c478bd9Sstevel@tonic-gate /* 1907c478bd9Sstevel@tonic-gate * compute new interrupt thread base priority 1917c478bd9Sstevel@tonic-gate */ 1927c478bd9Sstevel@tonic-gate intr_pri = maxglobpri; 1937c478bd9Sstevel@tonic-gate if (only_intr_kpreempt) { 1947c478bd9Sstevel@tonic-gate kpreemptpri = intr_pri + 1; 1957c478bd9Sstevel@tonic-gate if (kpqpri == KPQPRI) 1967c478bd9Sstevel@tonic-gate kpqpri = kpreemptpri; 1977c478bd9Sstevel@tonic-gate } 1987c478bd9Sstevel@tonic-gate v.v_nglobpris = newnglobpris; 1997c478bd9Sstevel@tonic-gate } 2007c478bd9Sstevel@tonic-gate } 2017c478bd9Sstevel@tonic-gate 2027c478bd9Sstevel@tonic-gate /* 2037c478bd9Sstevel@tonic-gate * dispinit - Called to initialize all loaded classes and the 2047c478bd9Sstevel@tonic-gate * dispatcher framework. 2057c478bd9Sstevel@tonic-gate */ 2067c478bd9Sstevel@tonic-gate void 2077c478bd9Sstevel@tonic-gate dispinit(void) 2087c478bd9Sstevel@tonic-gate { 2097c478bd9Sstevel@tonic-gate id_t cid; 2107c478bd9Sstevel@tonic-gate pri_t maxglobpri; 2117c478bd9Sstevel@tonic-gate pri_t cl_maxglobpri; 2127c478bd9Sstevel@tonic-gate 2137c478bd9Sstevel@tonic-gate maxglobpri = -1; 2147c478bd9Sstevel@tonic-gate 2157c478bd9Sstevel@tonic-gate /* 2167c478bd9Sstevel@tonic-gate * Initialize transition lock, which will always be set. 2177c478bd9Sstevel@tonic-gate */ 2187c478bd9Sstevel@tonic-gate DISP_LOCK_INIT(&transition_lock); 2197c478bd9Sstevel@tonic-gate disp_lock_enter_high(&transition_lock); 2207c478bd9Sstevel@tonic-gate DISP_LOCK_INIT(&stop_lock); 2217c478bd9Sstevel@tonic-gate 2227c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 2237c478bd9Sstevel@tonic-gate CPU->cpu_disp->disp_maxrunpri = -1; 2247c478bd9Sstevel@tonic-gate CPU->cpu_disp->disp_max_unbound_pri = -1; 225fb2f18f8Sesaxe 2267c478bd9Sstevel@tonic-gate /* 2277c478bd9Sstevel@tonic-gate * Initialize the default CPU partition. 2287c478bd9Sstevel@tonic-gate */ 2297c478bd9Sstevel@tonic-gate cpupart_initialize_default(); 2307c478bd9Sstevel@tonic-gate /* 2317c478bd9Sstevel@tonic-gate * Call the class specific initialization functions for 2327c478bd9Sstevel@tonic-gate * all pre-installed schedulers. 2337c478bd9Sstevel@tonic-gate * 2347c478bd9Sstevel@tonic-gate * We pass the size of a class specific parameter 2357c478bd9Sstevel@tonic-gate * buffer to each of the initialization functions 2367c478bd9Sstevel@tonic-gate * to try to catch problems with backward compatibility 2377c478bd9Sstevel@tonic-gate * of class modules. 2387c478bd9Sstevel@tonic-gate * 2397c478bd9Sstevel@tonic-gate * For example a new class module running on an old system 2407c478bd9Sstevel@tonic-gate * which didn't provide sufficiently large parameter buffers 2417c478bd9Sstevel@tonic-gate * would be bad news. Class initialization modules can check for 2427c478bd9Sstevel@tonic-gate * this and take action if they detect a problem. 2437c478bd9Sstevel@tonic-gate */ 2447c478bd9Sstevel@tonic-gate 2457c478bd9Sstevel@tonic-gate for (cid = 0; cid < nclass; cid++) { 2467c478bd9Sstevel@tonic-gate sclass_t *sc; 2477c478bd9Sstevel@tonic-gate 2487c478bd9Sstevel@tonic-gate sc = &sclass[cid]; 2497c478bd9Sstevel@tonic-gate if (SCHED_INSTALLED(sc)) { 2507c478bd9Sstevel@tonic-gate cl_maxglobpri = sc->cl_init(cid, PC_CLPARMSZ, 2517c478bd9Sstevel@tonic-gate &sc->cl_funcs); 2527c478bd9Sstevel@tonic-gate if (cl_maxglobpri > maxglobpri) 2537c478bd9Sstevel@tonic-gate maxglobpri = cl_maxglobpri; 2547c478bd9Sstevel@tonic-gate } 2557c478bd9Sstevel@tonic-gate } 2567c478bd9Sstevel@tonic-gate kpreemptpri = (pri_t)v.v_maxsyspri + 1; 2577c478bd9Sstevel@tonic-gate if (kpqpri == KPQPRI) 2587c478bd9Sstevel@tonic-gate kpqpri = kpreemptpri; 2597c478bd9Sstevel@tonic-gate 2607c478bd9Sstevel@tonic-gate ASSERT(maxglobpri >= 0); 2617c478bd9Sstevel@tonic-gate disp_setup(maxglobpri, 0); 2627c478bd9Sstevel@tonic-gate 2637c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 2647c478bd9Sstevel@tonic-gate 2657c478bd9Sstevel@tonic-gate /* 26681588590Sbholler * Platform specific sticky scheduler setup. 26781588590Sbholler */ 26881588590Sbholler if (nosteal_nsec == NOSTEAL_UNINITIALIZED) 26981588590Sbholler cmp_set_nosteal_interval(); 27081588590Sbholler 27181588590Sbholler /* 2727c478bd9Sstevel@tonic-gate * Get the default class ID; this may be later modified via 2737c478bd9Sstevel@tonic-gate * dispadmin(1M). This will load the class (normally TS) and that will 2747c478bd9Sstevel@tonic-gate * call disp_add(), which is why we had to drop cpu_lock first. 2757c478bd9Sstevel@tonic-gate */ 2767c478bd9Sstevel@tonic-gate if (getcid(defaultclass, &defaultcid) != 0) { 2777c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "Couldn't load default scheduling class '%s'", 2787c478bd9Sstevel@tonic-gate defaultclass); 2797c478bd9Sstevel@tonic-gate } 2807c478bd9Sstevel@tonic-gate } 2817c478bd9Sstevel@tonic-gate 2827c478bd9Sstevel@tonic-gate /* 2837c478bd9Sstevel@tonic-gate * disp_add - Called with class pointer to initialize the dispatcher 2847c478bd9Sstevel@tonic-gate * for a newly loaded class. 2857c478bd9Sstevel@tonic-gate */ 2867c478bd9Sstevel@tonic-gate void 2877c478bd9Sstevel@tonic-gate disp_add(sclass_t *clp) 2887c478bd9Sstevel@tonic-gate { 2897c478bd9Sstevel@tonic-gate pri_t maxglobpri; 2907c478bd9Sstevel@tonic-gate pri_t cl_maxglobpri; 2917c478bd9Sstevel@tonic-gate 2927c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 2937c478bd9Sstevel@tonic-gate /* 2947c478bd9Sstevel@tonic-gate * Initialize the scheduler class. 2957c478bd9Sstevel@tonic-gate */ 2967c478bd9Sstevel@tonic-gate maxglobpri = (pri_t)(v.v_nglobpris - LOCK_LEVEL - 1); 2977c478bd9Sstevel@tonic-gate cl_maxglobpri = clp->cl_init(clp - sclass, PC_CLPARMSZ, &clp->cl_funcs); 2987c478bd9Sstevel@tonic-gate if (cl_maxglobpri > maxglobpri) 2997c478bd9Sstevel@tonic-gate maxglobpri = cl_maxglobpri; 3007c478bd9Sstevel@tonic-gate 3017c478bd9Sstevel@tonic-gate /* 3027c478bd9Sstevel@tonic-gate * Save old queue information. Since we're initializing a 3037c478bd9Sstevel@tonic-gate * new scheduling class which has just been loaded, then 3047c478bd9Sstevel@tonic-gate * the size of the dispq may have changed. We need to handle 3057c478bd9Sstevel@tonic-gate * that here. 3067c478bd9Sstevel@tonic-gate */ 3077c478bd9Sstevel@tonic-gate disp_setup(maxglobpri, v.v_nglobpris); 3087c478bd9Sstevel@tonic-gate 3097c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 3107c478bd9Sstevel@tonic-gate } 3117c478bd9Sstevel@tonic-gate 3127c478bd9Sstevel@tonic-gate 3137c478bd9Sstevel@tonic-gate /* 3147c478bd9Sstevel@tonic-gate * For each CPU, allocate new dispatch queues 3157c478bd9Sstevel@tonic-gate * with the stated number of priorities. 3167c478bd9Sstevel@tonic-gate */ 3177c478bd9Sstevel@tonic-gate static void 3187c478bd9Sstevel@tonic-gate cpu_dispqalloc(int numpris) 3197c478bd9Sstevel@tonic-gate { 3207c478bd9Sstevel@tonic-gate cpu_t *cpup; 3217c478bd9Sstevel@tonic-gate struct disp_queue_info *disp_mem; 3227c478bd9Sstevel@tonic-gate int i, num; 3237c478bd9Sstevel@tonic-gate 3247c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 3257c478bd9Sstevel@tonic-gate 3267c478bd9Sstevel@tonic-gate disp_mem = kmem_zalloc(NCPU * 3277c478bd9Sstevel@tonic-gate sizeof (struct disp_queue_info), KM_SLEEP); 3287c478bd9Sstevel@tonic-gate 3297c478bd9Sstevel@tonic-gate /* 3307c478bd9Sstevel@tonic-gate * This routine must allocate all of the memory before stopping 3317c478bd9Sstevel@tonic-gate * the cpus because it must not sleep in kmem_alloc while the 3327c478bd9Sstevel@tonic-gate * CPUs are stopped. Locks they hold will not be freed until they 3337c478bd9Sstevel@tonic-gate * are restarted. 3347c478bd9Sstevel@tonic-gate */ 3357c478bd9Sstevel@tonic-gate i = 0; 3367c478bd9Sstevel@tonic-gate cpup = cpu_list; 3377c478bd9Sstevel@tonic-gate do { 3387c478bd9Sstevel@tonic-gate disp_dq_alloc(&disp_mem[i], numpris, cpup->cpu_disp); 3397c478bd9Sstevel@tonic-gate i++; 3407c478bd9Sstevel@tonic-gate cpup = cpup->cpu_next; 3417c478bd9Sstevel@tonic-gate } while (cpup != cpu_list); 3427c478bd9Sstevel@tonic-gate num = i; 3437c478bd9Sstevel@tonic-gate 3447c478bd9Sstevel@tonic-gate pause_cpus(NULL); 3457c478bd9Sstevel@tonic-gate for (i = 0; i < num; i++) 3467c478bd9Sstevel@tonic-gate disp_dq_assign(&disp_mem[i], numpris); 3477c478bd9Sstevel@tonic-gate start_cpus(); 3487c478bd9Sstevel@tonic-gate 3497c478bd9Sstevel@tonic-gate /* 3507c478bd9Sstevel@tonic-gate * I must free all of the memory after starting the cpus because 3517c478bd9Sstevel@tonic-gate * I can not risk sleeping in kmem_free while the cpus are stopped. 3527c478bd9Sstevel@tonic-gate */ 3537c478bd9Sstevel@tonic-gate for (i = 0; i < num; i++) 3547c478bd9Sstevel@tonic-gate disp_dq_free(&disp_mem[i]); 3557c478bd9Sstevel@tonic-gate 3567c478bd9Sstevel@tonic-gate kmem_free(disp_mem, NCPU * sizeof (struct disp_queue_info)); 3577c478bd9Sstevel@tonic-gate } 3587c478bd9Sstevel@tonic-gate 3597c478bd9Sstevel@tonic-gate static void 3607c478bd9Sstevel@tonic-gate disp_dq_alloc(struct disp_queue_info *dptr, int numpris, disp_t *dp) 3617c478bd9Sstevel@tonic-gate { 3627c478bd9Sstevel@tonic-gate dptr->newdispq = kmem_zalloc(numpris * sizeof (dispq_t), KM_SLEEP); 3637c478bd9Sstevel@tonic-gate dptr->newdqactmap = kmem_zalloc(((numpris / BT_NBIPUL) + 1) * 3647c478bd9Sstevel@tonic-gate sizeof (long), KM_SLEEP); 3657c478bd9Sstevel@tonic-gate dptr->dp = dp; 3667c478bd9Sstevel@tonic-gate } 3677c478bd9Sstevel@tonic-gate 3687c478bd9Sstevel@tonic-gate static void 3697c478bd9Sstevel@tonic-gate disp_dq_assign(struct disp_queue_info *dptr, int numpris) 3707c478bd9Sstevel@tonic-gate { 3717c478bd9Sstevel@tonic-gate disp_t *dp; 3727c478bd9Sstevel@tonic-gate 3737c478bd9Sstevel@tonic-gate dp = dptr->dp; 3747c478bd9Sstevel@tonic-gate dptr->olddispq = dp->disp_q; 3757c478bd9Sstevel@tonic-gate dptr->olddqactmap = dp->disp_qactmap; 3767c478bd9Sstevel@tonic-gate dptr->oldnglobpris = dp->disp_npri; 3777c478bd9Sstevel@tonic-gate 3787c478bd9Sstevel@tonic-gate ASSERT(dptr->oldnglobpris < numpris); 3797c478bd9Sstevel@tonic-gate 3807c478bd9Sstevel@tonic-gate if (dptr->olddispq != NULL) { 3817c478bd9Sstevel@tonic-gate /* 3827c478bd9Sstevel@tonic-gate * Use kcopy because bcopy is platform-specific 3837c478bd9Sstevel@tonic-gate * and could block while we might have paused the cpus. 3847c478bd9Sstevel@tonic-gate */ 3857c478bd9Sstevel@tonic-gate (void) kcopy(dptr->olddispq, dptr->newdispq, 3867c478bd9Sstevel@tonic-gate dptr->oldnglobpris * sizeof (dispq_t)); 3877c478bd9Sstevel@tonic-gate (void) kcopy(dptr->olddqactmap, dptr->newdqactmap, 3887c478bd9Sstevel@tonic-gate ((dptr->oldnglobpris / BT_NBIPUL) + 1) * 3897c478bd9Sstevel@tonic-gate sizeof (long)); 3907c478bd9Sstevel@tonic-gate } 3917c478bd9Sstevel@tonic-gate dp->disp_q = dptr->newdispq; 3927c478bd9Sstevel@tonic-gate dp->disp_qactmap = dptr->newdqactmap; 3937c478bd9Sstevel@tonic-gate dp->disp_q_limit = &dptr->newdispq[numpris]; 3947c478bd9Sstevel@tonic-gate dp->disp_npri = numpris; 3957c478bd9Sstevel@tonic-gate } 3967c478bd9Sstevel@tonic-gate 3977c478bd9Sstevel@tonic-gate static void 3987c478bd9Sstevel@tonic-gate disp_dq_free(struct disp_queue_info *dptr) 3997c478bd9Sstevel@tonic-gate { 4007c478bd9Sstevel@tonic-gate if (dptr->olddispq != NULL) 4017c478bd9Sstevel@tonic-gate kmem_free(dptr->olddispq, 4027c478bd9Sstevel@tonic-gate dptr->oldnglobpris * sizeof (dispq_t)); 4037c478bd9Sstevel@tonic-gate if (dptr->olddqactmap != NULL) 4047c478bd9Sstevel@tonic-gate kmem_free(dptr->olddqactmap, 4057c478bd9Sstevel@tonic-gate ((dptr->oldnglobpris / BT_NBIPUL) + 1) * sizeof (long)); 4067c478bd9Sstevel@tonic-gate } 4077c478bd9Sstevel@tonic-gate 4087c478bd9Sstevel@tonic-gate /* 4097c478bd9Sstevel@tonic-gate * For a newly created CPU, initialize the dispatch queue. 4107c478bd9Sstevel@tonic-gate * This is called before the CPU is known through cpu[] or on any lists. 4117c478bd9Sstevel@tonic-gate */ 4127c478bd9Sstevel@tonic-gate void 4137c478bd9Sstevel@tonic-gate disp_cpu_init(cpu_t *cp) 4147c478bd9Sstevel@tonic-gate { 4157c478bd9Sstevel@tonic-gate disp_t *dp; 4167c478bd9Sstevel@tonic-gate dispq_t *newdispq; 4177c478bd9Sstevel@tonic-gate ulong_t *newdqactmap; 4187c478bd9Sstevel@tonic-gate 4197c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); /* protect dispatcher queue sizes */ 4207c478bd9Sstevel@tonic-gate 4217c478bd9Sstevel@tonic-gate if (cp == cpu0_disp.disp_cpu) 4227c478bd9Sstevel@tonic-gate dp = &cpu0_disp; 4237c478bd9Sstevel@tonic-gate else 4247c478bd9Sstevel@tonic-gate dp = kmem_alloc(sizeof (disp_t), KM_SLEEP); 4257c478bd9Sstevel@tonic-gate bzero(dp, sizeof (disp_t)); 4267c478bd9Sstevel@tonic-gate cp->cpu_disp = dp; 4277c478bd9Sstevel@tonic-gate dp->disp_cpu = cp; 4287c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = -1; 4297c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = -1; 4307c478bd9Sstevel@tonic-gate DISP_LOCK_INIT(&cp->cpu_thread_lock); 4317c478bd9Sstevel@tonic-gate /* 4327c478bd9Sstevel@tonic-gate * Allocate memory for the dispatcher queue headers 4337c478bd9Sstevel@tonic-gate * and the active queue bitmap. 4347c478bd9Sstevel@tonic-gate */ 4357c478bd9Sstevel@tonic-gate newdispq = kmem_zalloc(v.v_nglobpris * sizeof (dispq_t), KM_SLEEP); 4367c478bd9Sstevel@tonic-gate newdqactmap = kmem_zalloc(((v.v_nglobpris / BT_NBIPUL) + 1) * 4377c478bd9Sstevel@tonic-gate sizeof (long), KM_SLEEP); 4387c478bd9Sstevel@tonic-gate dp->disp_q = newdispq; 4397c478bd9Sstevel@tonic-gate dp->disp_qactmap = newdqactmap; 4407c478bd9Sstevel@tonic-gate dp->disp_q_limit = &newdispq[v.v_nglobpris]; 4417c478bd9Sstevel@tonic-gate dp->disp_npri = v.v_nglobpris; 4427c478bd9Sstevel@tonic-gate } 4437c478bd9Sstevel@tonic-gate 4447c478bd9Sstevel@tonic-gate void 4457c478bd9Sstevel@tonic-gate disp_cpu_fini(cpu_t *cp) 4467c478bd9Sstevel@tonic-gate { 4477c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 4487c478bd9Sstevel@tonic-gate 4497c478bd9Sstevel@tonic-gate disp_kp_free(cp->cpu_disp); 4507c478bd9Sstevel@tonic-gate if (cp->cpu_disp != &cpu0_disp) 4517c478bd9Sstevel@tonic-gate kmem_free(cp->cpu_disp, sizeof (disp_t)); 4527c478bd9Sstevel@tonic-gate } 4537c478bd9Sstevel@tonic-gate 4547c478bd9Sstevel@tonic-gate /* 4557c478bd9Sstevel@tonic-gate * Allocate new, larger kpreempt dispatch queue to replace the old one. 4567c478bd9Sstevel@tonic-gate */ 4577c478bd9Sstevel@tonic-gate void 4587c478bd9Sstevel@tonic-gate disp_kp_alloc(disp_t *dq, pri_t npri) 4597c478bd9Sstevel@tonic-gate { 4607c478bd9Sstevel@tonic-gate struct disp_queue_info mem_info; 4617c478bd9Sstevel@tonic-gate 4627c478bd9Sstevel@tonic-gate if (npri > dq->disp_npri) { 4637c478bd9Sstevel@tonic-gate /* 4647c478bd9Sstevel@tonic-gate * Allocate memory for the new array. 4657c478bd9Sstevel@tonic-gate */ 4667c478bd9Sstevel@tonic-gate disp_dq_alloc(&mem_info, npri, dq); 4677c478bd9Sstevel@tonic-gate 4687c478bd9Sstevel@tonic-gate /* 4697c478bd9Sstevel@tonic-gate * We need to copy the old structures to the new 4707c478bd9Sstevel@tonic-gate * and free the old. 4717c478bd9Sstevel@tonic-gate */ 4727c478bd9Sstevel@tonic-gate disp_dq_assign(&mem_info, npri); 4737c478bd9Sstevel@tonic-gate disp_dq_free(&mem_info); 4747c478bd9Sstevel@tonic-gate } 4757c478bd9Sstevel@tonic-gate } 4767c478bd9Sstevel@tonic-gate 4777c478bd9Sstevel@tonic-gate /* 4787c478bd9Sstevel@tonic-gate * Free dispatch queue. 4797c478bd9Sstevel@tonic-gate * Used for the kpreempt queues for a removed CPU partition and 4807c478bd9Sstevel@tonic-gate * for the per-CPU queues of deleted CPUs. 4817c478bd9Sstevel@tonic-gate */ 4827c478bd9Sstevel@tonic-gate void 4837c478bd9Sstevel@tonic-gate disp_kp_free(disp_t *dq) 4847c478bd9Sstevel@tonic-gate { 4857c478bd9Sstevel@tonic-gate struct disp_queue_info mem_info; 4867c478bd9Sstevel@tonic-gate 4877c478bd9Sstevel@tonic-gate mem_info.olddispq = dq->disp_q; 4887c478bd9Sstevel@tonic-gate mem_info.olddqactmap = dq->disp_qactmap; 4897c478bd9Sstevel@tonic-gate mem_info.oldnglobpris = dq->disp_npri; 4907c478bd9Sstevel@tonic-gate disp_dq_free(&mem_info); 4917c478bd9Sstevel@tonic-gate } 4927c478bd9Sstevel@tonic-gate 4937c478bd9Sstevel@tonic-gate /* 4947c478bd9Sstevel@tonic-gate * End dispatcher and scheduler initialization. 4957c478bd9Sstevel@tonic-gate */ 4967c478bd9Sstevel@tonic-gate 4977c478bd9Sstevel@tonic-gate /* 4987c478bd9Sstevel@tonic-gate * See if there's anything to do other than remain idle. 4997c478bd9Sstevel@tonic-gate * Return non-zero if there is. 5007c478bd9Sstevel@tonic-gate * 5017c478bd9Sstevel@tonic-gate * This function must be called with high spl, or with 5027c478bd9Sstevel@tonic-gate * kernel preemption disabled to prevent the partition's 5037c478bd9Sstevel@tonic-gate * active cpu list from changing while being traversed. 5047c478bd9Sstevel@tonic-gate * 5057c478bd9Sstevel@tonic-gate */ 5067c478bd9Sstevel@tonic-gate int 5077c478bd9Sstevel@tonic-gate disp_anywork(void) 5087c478bd9Sstevel@tonic-gate { 5097c478bd9Sstevel@tonic-gate cpu_t *cp = CPU; 5107c478bd9Sstevel@tonic-gate cpu_t *ocp; 5117c478bd9Sstevel@tonic-gate 5127c478bd9Sstevel@tonic-gate if (cp->cpu_disp->disp_nrunnable != 0) 5137c478bd9Sstevel@tonic-gate return (1); 5147c478bd9Sstevel@tonic-gate 5157c478bd9Sstevel@tonic-gate if (!(cp->cpu_flags & CPU_OFFLINE)) { 5167c478bd9Sstevel@tonic-gate if (CP_MAXRUNPRI(cp->cpu_part) >= 0) 5177c478bd9Sstevel@tonic-gate return (1); 5187c478bd9Sstevel@tonic-gate 5197c478bd9Sstevel@tonic-gate /* 5207c478bd9Sstevel@tonic-gate * Work can be taken from another CPU if: 5217c478bd9Sstevel@tonic-gate * - There is unbound work on the run queue 5227c478bd9Sstevel@tonic-gate * - That work isn't a thread undergoing a 5237c478bd9Sstevel@tonic-gate * - context switch on an otherwise empty queue. 5247c478bd9Sstevel@tonic-gate * - The CPU isn't running the idle loop. 5257c478bd9Sstevel@tonic-gate */ 5267c478bd9Sstevel@tonic-gate for (ocp = cp->cpu_next_part; ocp != cp; 5277c478bd9Sstevel@tonic-gate ocp = ocp->cpu_next_part) { 5287c478bd9Sstevel@tonic-gate ASSERT(CPU_ACTIVE(ocp)); 5297c478bd9Sstevel@tonic-gate 5307c478bd9Sstevel@tonic-gate if (ocp->cpu_disp->disp_max_unbound_pri != -1 && 5317c478bd9Sstevel@tonic-gate !((ocp->cpu_disp_flags & CPU_DISP_DONTSTEAL) && 5327c478bd9Sstevel@tonic-gate ocp->cpu_disp->disp_nrunnable == 1) && 5337c478bd9Sstevel@tonic-gate ocp->cpu_dispatch_pri != -1) 5347c478bd9Sstevel@tonic-gate return (1); 5357c478bd9Sstevel@tonic-gate } 5367c478bd9Sstevel@tonic-gate } 5377c478bd9Sstevel@tonic-gate return (0); 5387c478bd9Sstevel@tonic-gate } 5397c478bd9Sstevel@tonic-gate 5407c478bd9Sstevel@tonic-gate /* 5417c478bd9Sstevel@tonic-gate * Called when CPU enters the idle loop 5427c478bd9Sstevel@tonic-gate */ 5437c478bd9Sstevel@tonic-gate static void 5447c478bd9Sstevel@tonic-gate idle_enter() 5457c478bd9Sstevel@tonic-gate { 5467c478bd9Sstevel@tonic-gate cpu_t *cp = CPU; 5477c478bd9Sstevel@tonic-gate 548eda89462Sesolom new_cpu_mstate(CMS_IDLE, gethrtime_unscaled()); 5497c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, idlethread, 1); 5507c478bd9Sstevel@tonic-gate set_idle_cpu(cp->cpu_id); /* arch-dependent hook */ 5517c478bd9Sstevel@tonic-gate } 5527c478bd9Sstevel@tonic-gate 5537c478bd9Sstevel@tonic-gate /* 5547c478bd9Sstevel@tonic-gate * Called when CPU exits the idle loop 5557c478bd9Sstevel@tonic-gate */ 5567c478bd9Sstevel@tonic-gate static void 5577c478bd9Sstevel@tonic-gate idle_exit() 5587c478bd9Sstevel@tonic-gate { 5597c478bd9Sstevel@tonic-gate cpu_t *cp = CPU; 5607c478bd9Sstevel@tonic-gate 561eda89462Sesolom new_cpu_mstate(CMS_SYSTEM, gethrtime_unscaled()); 5627c478bd9Sstevel@tonic-gate unset_idle_cpu(cp->cpu_id); /* arch-dependent hook */ 5637c478bd9Sstevel@tonic-gate } 5647c478bd9Sstevel@tonic-gate 5657c478bd9Sstevel@tonic-gate /* 5667c478bd9Sstevel@tonic-gate * Idle loop. 5677c478bd9Sstevel@tonic-gate */ 5687c478bd9Sstevel@tonic-gate void 5697c478bd9Sstevel@tonic-gate idle() 5707c478bd9Sstevel@tonic-gate { 5717c478bd9Sstevel@tonic-gate struct cpu *cp = CPU; /* pointer to this CPU */ 5727c478bd9Sstevel@tonic-gate kthread_t *t; /* taken thread */ 5737c478bd9Sstevel@tonic-gate 5747c478bd9Sstevel@tonic-gate idle_enter(); 5757c478bd9Sstevel@tonic-gate 5767c478bd9Sstevel@tonic-gate /* 5777c478bd9Sstevel@tonic-gate * Uniprocessor version of idle loop. 5787c478bd9Sstevel@tonic-gate * Do this until notified that we're on an actual multiprocessor. 5797c478bd9Sstevel@tonic-gate */ 5807c478bd9Sstevel@tonic-gate while (ncpus == 1) { 5817c478bd9Sstevel@tonic-gate if (cp->cpu_disp->disp_nrunnable == 0) { 5827c478bd9Sstevel@tonic-gate (*idle_cpu)(); 5837c478bd9Sstevel@tonic-gate continue; 5847c478bd9Sstevel@tonic-gate } 5857c478bd9Sstevel@tonic-gate idle_exit(); 5867c478bd9Sstevel@tonic-gate swtch(); 5877c478bd9Sstevel@tonic-gate 5887c478bd9Sstevel@tonic-gate idle_enter(); /* returned from swtch */ 5897c478bd9Sstevel@tonic-gate } 5907c478bd9Sstevel@tonic-gate 5917c478bd9Sstevel@tonic-gate /* 5927c478bd9Sstevel@tonic-gate * Multiprocessor idle loop. 5937c478bd9Sstevel@tonic-gate */ 5947c478bd9Sstevel@tonic-gate for (;;) { 5957c478bd9Sstevel@tonic-gate /* 5967c478bd9Sstevel@tonic-gate * If CPU is completely quiesced by p_online(2), just wait 5977c478bd9Sstevel@tonic-gate * here with minimal bus traffic until put online. 5987c478bd9Sstevel@tonic-gate */ 5997c478bd9Sstevel@tonic-gate while (cp->cpu_flags & CPU_QUIESCED) 6007c478bd9Sstevel@tonic-gate (*idle_cpu)(); 6017c478bd9Sstevel@tonic-gate 6027c478bd9Sstevel@tonic-gate if (cp->cpu_disp->disp_nrunnable != 0) { 6037c478bd9Sstevel@tonic-gate idle_exit(); 6047c478bd9Sstevel@tonic-gate swtch(); 6057c478bd9Sstevel@tonic-gate } else { 6067c478bd9Sstevel@tonic-gate if (cp->cpu_flags & CPU_OFFLINE) 6077c478bd9Sstevel@tonic-gate continue; 6087c478bd9Sstevel@tonic-gate if ((t = disp_getwork(cp)) == NULL) { 6097c478bd9Sstevel@tonic-gate if (cp->cpu_chosen_level != -1) { 6107c478bd9Sstevel@tonic-gate disp_t *dp = cp->cpu_disp; 6117c478bd9Sstevel@tonic-gate disp_t *kpq; 6127c478bd9Sstevel@tonic-gate 6137c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock); 6147c478bd9Sstevel@tonic-gate /* 6157c478bd9Sstevel@tonic-gate * Set kpq under lock to prevent 6167c478bd9Sstevel@tonic-gate * migration between partitions. 6177c478bd9Sstevel@tonic-gate */ 6187c478bd9Sstevel@tonic-gate kpq = &cp->cpu_part->cp_kp_queue; 6197c478bd9Sstevel@tonic-gate if (kpq->disp_maxrunpri == -1) 6207c478bd9Sstevel@tonic-gate cp->cpu_chosen_level = -1; 6217c478bd9Sstevel@tonic-gate disp_lock_exit(&dp->disp_lock); 6227c478bd9Sstevel@tonic-gate } 6237c478bd9Sstevel@tonic-gate (*idle_cpu)(); 6247c478bd9Sstevel@tonic-gate continue; 6257c478bd9Sstevel@tonic-gate } 626685679f7Sakolb /* 627685679f7Sakolb * If there was a thread but we couldn't steal 628685679f7Sakolb * it, then keep trying. 629685679f7Sakolb */ 630685679f7Sakolb if (t == T_DONTSTEAL) 631685679f7Sakolb continue; 6327c478bd9Sstevel@tonic-gate idle_exit(); 6337c478bd9Sstevel@tonic-gate swtch_to(t); 6347c478bd9Sstevel@tonic-gate } 6357c478bd9Sstevel@tonic-gate idle_enter(); /* returned from swtch/swtch_to */ 6367c478bd9Sstevel@tonic-gate } 6377c478bd9Sstevel@tonic-gate } 6387c478bd9Sstevel@tonic-gate 6397c478bd9Sstevel@tonic-gate 6407c478bd9Sstevel@tonic-gate /* 6417c478bd9Sstevel@tonic-gate * Preempt the currently running thread in favor of the highest 6427c478bd9Sstevel@tonic-gate * priority thread. The class of the current thread controls 6437c478bd9Sstevel@tonic-gate * where it goes on the dispatcher queues. If panicking, turn 6447c478bd9Sstevel@tonic-gate * preemption off. 6457c478bd9Sstevel@tonic-gate */ 6467c478bd9Sstevel@tonic-gate void 6477c478bd9Sstevel@tonic-gate preempt() 6487c478bd9Sstevel@tonic-gate { 6497c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 6507c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread); 6517c478bd9Sstevel@tonic-gate 6527c478bd9Sstevel@tonic-gate if (panicstr) 6537c478bd9Sstevel@tonic-gate return; 6547c478bd9Sstevel@tonic-gate 6557c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_PREEMPT_START, "preempt_start"); 6567c478bd9Sstevel@tonic-gate 6577c478bd9Sstevel@tonic-gate thread_lock(t); 6587c478bd9Sstevel@tonic-gate 6597c478bd9Sstevel@tonic-gate if (t->t_state != TS_ONPROC || t->t_disp_queue != CPU->cpu_disp) { 6607c478bd9Sstevel@tonic-gate /* 6617c478bd9Sstevel@tonic-gate * this thread has already been chosen to be run on 6627c478bd9Sstevel@tonic-gate * another CPU. Clear kprunrun on this CPU since we're 6637c478bd9Sstevel@tonic-gate * already headed for swtch(). 6647c478bd9Sstevel@tonic-gate */ 6657c478bd9Sstevel@tonic-gate CPU->cpu_kprunrun = 0; 6667c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(t); 6677c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_PREEMPT_END, "preempt_end"); 6687c478bd9Sstevel@tonic-gate } else { 6697c478bd9Sstevel@tonic-gate if (lwp != NULL) 6707c478bd9Sstevel@tonic-gate lwp->lwp_ru.nivcsw++; 6717c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, inv_swtch, 1); 6727c478bd9Sstevel@tonic-gate THREAD_TRANSITION(t); 6737c478bd9Sstevel@tonic-gate CL_PREEMPT(t); 6747c478bd9Sstevel@tonic-gate DTRACE_SCHED(preempt); 6757c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(t); 6767c478bd9Sstevel@tonic-gate 6777c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_PREEMPT_END, "preempt_end"); 6787c478bd9Sstevel@tonic-gate 6797c478bd9Sstevel@tonic-gate swtch(); /* clears CPU->cpu_runrun via disp() */ 6807c478bd9Sstevel@tonic-gate } 6817c478bd9Sstevel@tonic-gate } 6827c478bd9Sstevel@tonic-gate 6837c478bd9Sstevel@tonic-gate extern kthread_t *thread_unpin(); 6847c478bd9Sstevel@tonic-gate 6857c478bd9Sstevel@tonic-gate /* 6867c478bd9Sstevel@tonic-gate * disp() - find the highest priority thread for this processor to run, and 6877c478bd9Sstevel@tonic-gate * set it in TS_ONPROC state so that resume() can be called to run it. 6887c478bd9Sstevel@tonic-gate */ 6897c478bd9Sstevel@tonic-gate static kthread_t * 6907c478bd9Sstevel@tonic-gate disp() 6917c478bd9Sstevel@tonic-gate { 6927c478bd9Sstevel@tonic-gate cpu_t *cpup; 6937c478bd9Sstevel@tonic-gate disp_t *dp; 6947c478bd9Sstevel@tonic-gate kthread_t *tp; 6957c478bd9Sstevel@tonic-gate dispq_t *dq; 6967c478bd9Sstevel@tonic-gate int maxrunword; 6977c478bd9Sstevel@tonic-gate pri_t pri; 6987c478bd9Sstevel@tonic-gate disp_t *kpq; 6997c478bd9Sstevel@tonic-gate 7007c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_DISP_START, "disp_start"); 7017c478bd9Sstevel@tonic-gate 7027c478bd9Sstevel@tonic-gate cpup = CPU; 7037c478bd9Sstevel@tonic-gate /* 7047c478bd9Sstevel@tonic-gate * Find the highest priority loaded, runnable thread. 7057c478bd9Sstevel@tonic-gate */ 7067c478bd9Sstevel@tonic-gate dp = cpup->cpu_disp; 7077c478bd9Sstevel@tonic-gate 7087c478bd9Sstevel@tonic-gate reschedule: 7097c478bd9Sstevel@tonic-gate /* 7107c478bd9Sstevel@tonic-gate * If there is more important work on the global queue with a better 7117c478bd9Sstevel@tonic-gate * priority than the maximum on this CPU, take it now. 7127c478bd9Sstevel@tonic-gate */ 7137c478bd9Sstevel@tonic-gate kpq = &cpup->cpu_part->cp_kp_queue; 7147c478bd9Sstevel@tonic-gate while ((pri = kpq->disp_maxrunpri) >= 0 && 7157c478bd9Sstevel@tonic-gate pri >= dp->disp_maxrunpri && 7167c478bd9Sstevel@tonic-gate (cpup->cpu_flags & CPU_OFFLINE) == 0 && 7177c478bd9Sstevel@tonic-gate (tp = disp_getbest(kpq)) != NULL) { 7187c478bd9Sstevel@tonic-gate if (disp_ratify(tp, kpq) != NULL) { 7197c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_DISP, TR_DISP_END, 7207c478bd9Sstevel@tonic-gate "disp_end:tid %p", tp); 7217c478bd9Sstevel@tonic-gate return (tp); 7227c478bd9Sstevel@tonic-gate } 7237c478bd9Sstevel@tonic-gate } 7247c478bd9Sstevel@tonic-gate 7257c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock); 7267c478bd9Sstevel@tonic-gate pri = dp->disp_maxrunpri; 7277c478bd9Sstevel@tonic-gate 7287c478bd9Sstevel@tonic-gate /* 7297c478bd9Sstevel@tonic-gate * If there is nothing to run, look at what's runnable on other queues. 7307c478bd9Sstevel@tonic-gate * Choose the idle thread if the CPU is quiesced. 7317c478bd9Sstevel@tonic-gate * Note that CPUs that have the CPU_OFFLINE flag set can still run 7327c478bd9Sstevel@tonic-gate * interrupt threads, which will be the only threads on the CPU's own 7337c478bd9Sstevel@tonic-gate * queue, but cannot run threads from other queues. 7347c478bd9Sstevel@tonic-gate */ 7357c478bd9Sstevel@tonic-gate if (pri == -1) { 7367c478bd9Sstevel@tonic-gate if (!(cpup->cpu_flags & CPU_OFFLINE)) { 7377c478bd9Sstevel@tonic-gate disp_lock_exit(&dp->disp_lock); 738685679f7Sakolb if ((tp = disp_getwork(cpup)) == NULL || 739685679f7Sakolb tp == T_DONTSTEAL) { 7407c478bd9Sstevel@tonic-gate tp = cpup->cpu_idle_thread; 7417c478bd9Sstevel@tonic-gate (void) splhigh(); 7427c478bd9Sstevel@tonic-gate THREAD_ONPROC(tp, cpup); 7437c478bd9Sstevel@tonic-gate cpup->cpu_dispthread = tp; 7447c478bd9Sstevel@tonic-gate cpup->cpu_dispatch_pri = -1; 7457c478bd9Sstevel@tonic-gate cpup->cpu_runrun = cpup->cpu_kprunrun = 0; 7467c478bd9Sstevel@tonic-gate cpup->cpu_chosen_level = -1; 7477c478bd9Sstevel@tonic-gate } 7487c478bd9Sstevel@tonic-gate } else { 7497c478bd9Sstevel@tonic-gate disp_lock_exit_high(&dp->disp_lock); 7507c478bd9Sstevel@tonic-gate tp = cpup->cpu_idle_thread; 7517c478bd9Sstevel@tonic-gate THREAD_ONPROC(tp, cpup); 7527c478bd9Sstevel@tonic-gate cpup->cpu_dispthread = tp; 7537c478bd9Sstevel@tonic-gate cpup->cpu_dispatch_pri = -1; 7547c478bd9Sstevel@tonic-gate cpup->cpu_runrun = cpup->cpu_kprunrun = 0; 7557c478bd9Sstevel@tonic-gate cpup->cpu_chosen_level = -1; 7567c478bd9Sstevel@tonic-gate } 7577c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_DISP, TR_DISP_END, 7587c478bd9Sstevel@tonic-gate "disp_end:tid %p", tp); 7597c478bd9Sstevel@tonic-gate return (tp); 7607c478bd9Sstevel@tonic-gate } 7617c478bd9Sstevel@tonic-gate 7627c478bd9Sstevel@tonic-gate dq = &dp->disp_q[pri]; 7637c478bd9Sstevel@tonic-gate tp = dq->dq_first; 7647c478bd9Sstevel@tonic-gate 7657c478bd9Sstevel@tonic-gate ASSERT(tp != NULL); 7667c478bd9Sstevel@tonic-gate ASSERT(tp->t_schedflag & TS_LOAD); /* thread must be swapped in */ 7677c478bd9Sstevel@tonic-gate 7687c478bd9Sstevel@tonic-gate DTRACE_SCHED2(dequeue, kthread_t *, tp, disp_t *, dp); 7697c478bd9Sstevel@tonic-gate 7707c478bd9Sstevel@tonic-gate /* 7717c478bd9Sstevel@tonic-gate * Found it so remove it from queue. 7727c478bd9Sstevel@tonic-gate */ 7737c478bd9Sstevel@tonic-gate dp->disp_nrunnable--; 7747c478bd9Sstevel@tonic-gate dq->dq_sruncnt--; 7757c478bd9Sstevel@tonic-gate if ((dq->dq_first = tp->t_link) == NULL) { 7767c478bd9Sstevel@tonic-gate ulong_t *dqactmap = dp->disp_qactmap; 7777c478bd9Sstevel@tonic-gate 7787c478bd9Sstevel@tonic-gate ASSERT(dq->dq_sruncnt == 0); 7797c478bd9Sstevel@tonic-gate dq->dq_last = NULL; 7807c478bd9Sstevel@tonic-gate 7817c478bd9Sstevel@tonic-gate /* 7827c478bd9Sstevel@tonic-gate * The queue is empty, so the corresponding bit needs to be 7837c478bd9Sstevel@tonic-gate * turned off in dqactmap. If nrunnable != 0 just took the 7847c478bd9Sstevel@tonic-gate * last runnable thread off the 7857c478bd9Sstevel@tonic-gate * highest queue, so recompute disp_maxrunpri. 7867c478bd9Sstevel@tonic-gate */ 7877c478bd9Sstevel@tonic-gate maxrunword = pri >> BT_ULSHIFT; 7887c478bd9Sstevel@tonic-gate dqactmap[maxrunword] &= ~BT_BIW(pri); 7897c478bd9Sstevel@tonic-gate 7907c478bd9Sstevel@tonic-gate if (dp->disp_nrunnable == 0) { 7917c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = -1; 7927c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = -1; 7937c478bd9Sstevel@tonic-gate } else { 7947c478bd9Sstevel@tonic-gate int ipri; 7957c478bd9Sstevel@tonic-gate 7967c478bd9Sstevel@tonic-gate ipri = bt_gethighbit(dqactmap, maxrunword); 7977c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = ipri; 7987c478bd9Sstevel@tonic-gate if (ipri < dp->disp_max_unbound_pri) 7997c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = ipri; 8007c478bd9Sstevel@tonic-gate } 8017c478bd9Sstevel@tonic-gate } else { 8027c478bd9Sstevel@tonic-gate tp->t_link = NULL; 8037c478bd9Sstevel@tonic-gate } 8047c478bd9Sstevel@tonic-gate 8057c478bd9Sstevel@tonic-gate /* 8067c478bd9Sstevel@tonic-gate * Set TS_DONT_SWAP flag to prevent another processor from swapping 8077c478bd9Sstevel@tonic-gate * out this thread before we have a chance to run it. 8087c478bd9Sstevel@tonic-gate * While running, it is protected against swapping by t_lock. 8097c478bd9Sstevel@tonic-gate */ 8107c478bd9Sstevel@tonic-gate tp->t_schedflag |= TS_DONT_SWAP; 8117c478bd9Sstevel@tonic-gate cpup->cpu_dispthread = tp; /* protected by spl only */ 8127c478bd9Sstevel@tonic-gate cpup->cpu_dispatch_pri = pri; 8137c478bd9Sstevel@tonic-gate ASSERT(pri == DISP_PRIO(tp)); 8147c478bd9Sstevel@tonic-gate thread_onproc(tp, cpup); /* set t_state to TS_ONPROC */ 8157c478bd9Sstevel@tonic-gate disp_lock_exit_high(&dp->disp_lock); /* drop run queue lock */ 8167c478bd9Sstevel@tonic-gate 8177c478bd9Sstevel@tonic-gate ASSERT(tp != NULL); 8187c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_DISP, TR_DISP_END, 8197c478bd9Sstevel@tonic-gate "disp_end:tid %p", tp); 8207c478bd9Sstevel@tonic-gate 8217c478bd9Sstevel@tonic-gate if (disp_ratify(tp, kpq) == NULL) 8227c478bd9Sstevel@tonic-gate goto reschedule; 8237c478bd9Sstevel@tonic-gate 8247c478bd9Sstevel@tonic-gate return (tp); 8257c478bd9Sstevel@tonic-gate } 8267c478bd9Sstevel@tonic-gate 8277c478bd9Sstevel@tonic-gate /* 8287c478bd9Sstevel@tonic-gate * swtch() 8297c478bd9Sstevel@tonic-gate * Find best runnable thread and run it. 8307c478bd9Sstevel@tonic-gate * Called with the current thread already switched to a new state, 8317c478bd9Sstevel@tonic-gate * on a sleep queue, run queue, stopped, and not zombied. 8327c478bd9Sstevel@tonic-gate * May be called at any spl level less than or equal to LOCK_LEVEL. 8337c478bd9Sstevel@tonic-gate * Always drops spl to the base level (spl0()). 8347c478bd9Sstevel@tonic-gate */ 8357c478bd9Sstevel@tonic-gate void 8367c478bd9Sstevel@tonic-gate swtch() 8377c478bd9Sstevel@tonic-gate { 8387c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 8397c478bd9Sstevel@tonic-gate kthread_t *next; 8407c478bd9Sstevel@tonic-gate cpu_t *cp; 8417c478bd9Sstevel@tonic-gate 8427c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_SWTCH_START, "swtch_start"); 8437c478bd9Sstevel@tonic-gate 8447c478bd9Sstevel@tonic-gate if (t->t_flag & T_INTR_THREAD) 8457c478bd9Sstevel@tonic-gate cpu_intr_swtch_enter(t); 8467c478bd9Sstevel@tonic-gate 8477c478bd9Sstevel@tonic-gate if (t->t_intr != NULL) { 8487c478bd9Sstevel@tonic-gate /* 8497c478bd9Sstevel@tonic-gate * We are an interrupt thread. Setup and return 8507c478bd9Sstevel@tonic-gate * the interrupted thread to be resumed. 8517c478bd9Sstevel@tonic-gate */ 8527c478bd9Sstevel@tonic-gate (void) splhigh(); /* block other scheduler action */ 8537c478bd9Sstevel@tonic-gate cp = CPU; /* now protected against migration */ 8547c478bd9Sstevel@tonic-gate ASSERT(CPU_ON_INTR(cp) == 0); /* not called with PIL > 10 */ 8557c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, pswitch, 1); 8567c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, intrblk, 1); 8577c478bd9Sstevel@tonic-gate next = thread_unpin(); 8587c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start"); 8597c478bd9Sstevel@tonic-gate resume_from_intr(next); 8607c478bd9Sstevel@tonic-gate } else { 8617c478bd9Sstevel@tonic-gate #ifdef DEBUG 8627c478bd9Sstevel@tonic-gate if (t->t_state == TS_ONPROC && 8637c478bd9Sstevel@tonic-gate t->t_disp_queue->disp_cpu == CPU && 8647c478bd9Sstevel@tonic-gate t->t_preempt == 0) { 8657c478bd9Sstevel@tonic-gate thread_lock(t); 8667c478bd9Sstevel@tonic-gate ASSERT(t->t_state != TS_ONPROC || 8677c478bd9Sstevel@tonic-gate t->t_disp_queue->disp_cpu != CPU || 8687c478bd9Sstevel@tonic-gate t->t_preempt != 0); /* cannot migrate */ 8697c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(t); 8707c478bd9Sstevel@tonic-gate } 8717c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 8727c478bd9Sstevel@tonic-gate cp = CPU; 8737c478bd9Sstevel@tonic-gate next = disp(); /* returns with spl high */ 8747c478bd9Sstevel@tonic-gate ASSERT(CPU_ON_INTR(cp) == 0); /* not called with PIL > 10 */ 8757c478bd9Sstevel@tonic-gate 8767c478bd9Sstevel@tonic-gate /* OK to steal anything left on run queue */ 8777c478bd9Sstevel@tonic-gate cp->cpu_disp_flags &= ~CPU_DISP_DONTSTEAL; 8787c478bd9Sstevel@tonic-gate 8797c478bd9Sstevel@tonic-gate if (next != t) { 8807c478bd9Sstevel@tonic-gate if (t == cp->cpu_idle_thread) { 881fb2f18f8Sesaxe PG_NRUN_UPDATE(cp, 1); 8827c478bd9Sstevel@tonic-gate } else if (next == cp->cpu_idle_thread) { 883fb2f18f8Sesaxe PG_NRUN_UPDATE(cp, -1); 8847c478bd9Sstevel@tonic-gate } 8857c478bd9Sstevel@tonic-gate 886f2bd4627Sjohansen /* 887f2bd4627Sjohansen * If t was previously in the TS_ONPROC state, 888f2bd4627Sjohansen * setfrontdq and setbackdq won't have set its t_waitrq. 889f2bd4627Sjohansen * Since we now finally know that we're switching away 890f2bd4627Sjohansen * from this thread, set its t_waitrq if it is on a run 891f2bd4627Sjohansen * queue. 892f2bd4627Sjohansen */ 893f2bd4627Sjohansen if ((t->t_state == TS_RUN) && (t->t_waitrq == 0)) { 894f2bd4627Sjohansen t->t_waitrq = gethrtime_unscaled(); 895f2bd4627Sjohansen } 896f2bd4627Sjohansen 897f2bd4627Sjohansen /* 898f2bd4627Sjohansen * restore mstate of thread that we are switching to 899f2bd4627Sjohansen */ 900f2bd4627Sjohansen restore_mstate(next); 901f2bd4627Sjohansen 9027c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, pswitch, 1); 9037c478bd9Sstevel@tonic-gate cp->cpu_last_swtch = t->t_disp_time = lbolt; 9047c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start"); 9057c478bd9Sstevel@tonic-gate 9067c478bd9Sstevel@tonic-gate if (dtrace_vtime_active) 9077c478bd9Sstevel@tonic-gate dtrace_vtime_switch(next); 9087c478bd9Sstevel@tonic-gate 9097c478bd9Sstevel@tonic-gate resume(next); 9107c478bd9Sstevel@tonic-gate /* 9117c478bd9Sstevel@tonic-gate * The TR_RESUME_END and TR_SWTCH_END trace points 9127c478bd9Sstevel@tonic-gate * appear at the end of resume(), because we may not 9137c478bd9Sstevel@tonic-gate * return here 9147c478bd9Sstevel@tonic-gate */ 9157c478bd9Sstevel@tonic-gate } else { 9167c478bd9Sstevel@tonic-gate if (t->t_flag & T_INTR_THREAD) 9177c478bd9Sstevel@tonic-gate cpu_intr_swtch_exit(t); 9187c478bd9Sstevel@tonic-gate 9197c478bd9Sstevel@tonic-gate DTRACE_SCHED(remain__cpu); 9207c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_SWTCH_END, "swtch_end"); 9217c478bd9Sstevel@tonic-gate (void) spl0(); 9227c478bd9Sstevel@tonic-gate } 9237c478bd9Sstevel@tonic-gate } 9247c478bd9Sstevel@tonic-gate } 9257c478bd9Sstevel@tonic-gate 9267c478bd9Sstevel@tonic-gate /* 9277c478bd9Sstevel@tonic-gate * swtch_from_zombie() 9287c478bd9Sstevel@tonic-gate * Special case of swtch(), which allows checks for TS_ZOMB to be 9297c478bd9Sstevel@tonic-gate * eliminated from normal resume. 9307c478bd9Sstevel@tonic-gate * Find best runnable thread and run it. 9317c478bd9Sstevel@tonic-gate * Called with the current thread zombied. 9327c478bd9Sstevel@tonic-gate * Zombies cannot migrate, so CPU references are safe. 9337c478bd9Sstevel@tonic-gate */ 9347c478bd9Sstevel@tonic-gate void 9357c478bd9Sstevel@tonic-gate swtch_from_zombie() 9367c478bd9Sstevel@tonic-gate { 9377c478bd9Sstevel@tonic-gate kthread_t *next; 9387c478bd9Sstevel@tonic-gate cpu_t *cpu = CPU; 9397c478bd9Sstevel@tonic-gate 9407c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_SWTCH_START, "swtch_start"); 9417c478bd9Sstevel@tonic-gate 9427c478bd9Sstevel@tonic-gate ASSERT(curthread->t_state == TS_ZOMB); 9437c478bd9Sstevel@tonic-gate 9447c478bd9Sstevel@tonic-gate next = disp(); /* returns with spl high */ 9457c478bd9Sstevel@tonic-gate ASSERT(CPU_ON_INTR(CPU) == 0); /* not called with PIL > 10 */ 9467c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, pswitch, 1); 9477c478bd9Sstevel@tonic-gate ASSERT(next != curthread); 9487c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start"); 9497c478bd9Sstevel@tonic-gate 9507c478bd9Sstevel@tonic-gate if (next == cpu->cpu_idle_thread) 951fb2f18f8Sesaxe PG_NRUN_UPDATE(cpu, -1); 9527c478bd9Sstevel@tonic-gate 953f2bd4627Sjohansen restore_mstate(next); 954f2bd4627Sjohansen 9557c478bd9Sstevel@tonic-gate if (dtrace_vtime_active) 9567c478bd9Sstevel@tonic-gate dtrace_vtime_switch(next); 9577c478bd9Sstevel@tonic-gate 9587c478bd9Sstevel@tonic-gate resume_from_zombie(next); 9597c478bd9Sstevel@tonic-gate /* 9607c478bd9Sstevel@tonic-gate * The TR_RESUME_END and TR_SWTCH_END trace points 9617c478bd9Sstevel@tonic-gate * appear at the end of resume(), because we certainly will not 9627c478bd9Sstevel@tonic-gate * return here 9637c478bd9Sstevel@tonic-gate */ 9647c478bd9Sstevel@tonic-gate } 9657c478bd9Sstevel@tonic-gate 9667c478bd9Sstevel@tonic-gate #if defined(DEBUG) && (defined(DISP_DEBUG) || defined(lint)) 9677c478bd9Sstevel@tonic-gate 968057452c6Sjj209869 /* 969057452c6Sjj209869 * search_disp_queues() 970057452c6Sjj209869 * Search the given dispatch queues for thread tp. 971057452c6Sjj209869 * Return 1 if tp is found, otherwise return 0. 972057452c6Sjj209869 */ 973057452c6Sjj209869 static int 974057452c6Sjj209869 search_disp_queues(disp_t *dp, kthread_t *tp) 975057452c6Sjj209869 { 9767c478bd9Sstevel@tonic-gate dispq_t *dq; 9777c478bd9Sstevel@tonic-gate dispq_t *eq; 9787c478bd9Sstevel@tonic-gate 9797c478bd9Sstevel@tonic-gate disp_lock_enter_high(&dp->disp_lock); 980057452c6Sjj209869 9817c478bd9Sstevel@tonic-gate for (dq = dp->disp_q, eq = dp->disp_q_limit; dq < eq; ++dq) { 9827c478bd9Sstevel@tonic-gate kthread_t *rp; 9837c478bd9Sstevel@tonic-gate 984057452c6Sjj209869 ASSERT(dq->dq_last == NULL || dq->dq_last->t_link == NULL); 985057452c6Sjj209869 9867c478bd9Sstevel@tonic-gate for (rp = dq->dq_first; rp; rp = rp->t_link) 9877c478bd9Sstevel@tonic-gate if (tp == rp) { 9887c478bd9Sstevel@tonic-gate disp_lock_exit_high(&dp->disp_lock); 9897c478bd9Sstevel@tonic-gate return (1); 9907c478bd9Sstevel@tonic-gate } 9917c478bd9Sstevel@tonic-gate } 9927c478bd9Sstevel@tonic-gate disp_lock_exit_high(&dp->disp_lock); 993057452c6Sjj209869 9947c478bd9Sstevel@tonic-gate return (0); 995057452c6Sjj209869 } 996057452c6Sjj209869 997057452c6Sjj209869 /* 998057452c6Sjj209869 * thread_on_queue() 999057452c6Sjj209869 * Search all per-CPU dispatch queues and all partition-wide kpreempt 1000057452c6Sjj209869 * queues for thread tp. Return 1 if tp is found, otherwise return 0. 1001057452c6Sjj209869 */ 1002057452c6Sjj209869 static int 1003057452c6Sjj209869 thread_on_queue(kthread_t *tp) 1004057452c6Sjj209869 { 1005057452c6Sjj209869 cpu_t *cp; 1006057452c6Sjj209869 struct cpupart *part; 1007057452c6Sjj209869 1008057452c6Sjj209869 ASSERT(getpil() >= DISP_LEVEL); 1009057452c6Sjj209869 1010057452c6Sjj209869 /* 1011057452c6Sjj209869 * Search the per-CPU dispatch queues for tp. 1012057452c6Sjj209869 */ 1013057452c6Sjj209869 cp = CPU; 1014057452c6Sjj209869 do { 1015057452c6Sjj209869 if (search_disp_queues(cp->cpu_disp, tp)) 1016057452c6Sjj209869 return (1); 1017057452c6Sjj209869 } while ((cp = cp->cpu_next_onln) != CPU); 1018057452c6Sjj209869 1019057452c6Sjj209869 /* 1020057452c6Sjj209869 * Search the partition-wide kpreempt queues for tp. 1021057452c6Sjj209869 */ 1022057452c6Sjj209869 part = CPU->cpu_part; 1023057452c6Sjj209869 do { 1024057452c6Sjj209869 if (search_disp_queues(&part->cp_kp_queue, tp)) 1025057452c6Sjj209869 return (1); 1026057452c6Sjj209869 } while ((part = part->cp_next) != CPU->cpu_part); 1027057452c6Sjj209869 1028057452c6Sjj209869 return (0); 1029057452c6Sjj209869 } 1030057452c6Sjj209869 10317c478bd9Sstevel@tonic-gate #else 10327c478bd9Sstevel@tonic-gate 10337c478bd9Sstevel@tonic-gate #define thread_on_queue(tp) 0 /* ASSERT must be !thread_on_queue */ 10347c478bd9Sstevel@tonic-gate 10357c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 10367c478bd9Sstevel@tonic-gate 10377c478bd9Sstevel@tonic-gate /* 10387c478bd9Sstevel@tonic-gate * like swtch(), but switch to a specified thread taken from another CPU. 10397c478bd9Sstevel@tonic-gate * called with spl high.. 10407c478bd9Sstevel@tonic-gate */ 10417c478bd9Sstevel@tonic-gate void 10427c478bd9Sstevel@tonic-gate swtch_to(kthread_t *next) 10437c478bd9Sstevel@tonic-gate { 10447c478bd9Sstevel@tonic-gate cpu_t *cp = CPU; 10457c478bd9Sstevel@tonic-gate 10467c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_SWTCH_START, "swtch_start"); 10477c478bd9Sstevel@tonic-gate 10487c478bd9Sstevel@tonic-gate /* 10497c478bd9Sstevel@tonic-gate * Update context switch statistics. 10507c478bd9Sstevel@tonic-gate */ 10517c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, pswitch, 1); 10527c478bd9Sstevel@tonic-gate 10537c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start"); 10547c478bd9Sstevel@tonic-gate 10557c478bd9Sstevel@tonic-gate if (curthread == cp->cpu_idle_thread) 1056fb2f18f8Sesaxe PG_NRUN_UPDATE(cp, 1); 10577c478bd9Sstevel@tonic-gate 10587c478bd9Sstevel@tonic-gate /* OK to steal anything left on run queue */ 10597c478bd9Sstevel@tonic-gate cp->cpu_disp_flags &= ~CPU_DISP_DONTSTEAL; 10607c478bd9Sstevel@tonic-gate 10617c478bd9Sstevel@tonic-gate /* record last execution time */ 10627c478bd9Sstevel@tonic-gate cp->cpu_last_swtch = curthread->t_disp_time = lbolt; 10637c478bd9Sstevel@tonic-gate 1064f2bd4627Sjohansen /* 1065f2bd4627Sjohansen * If t was previously in the TS_ONPROC state, setfrontdq and setbackdq 1066f2bd4627Sjohansen * won't have set its t_waitrq. Since we now finally know that we're 1067f2bd4627Sjohansen * switching away from this thread, set its t_waitrq if it is on a run 1068f2bd4627Sjohansen * queue. 1069f2bd4627Sjohansen */ 1070f2bd4627Sjohansen if ((curthread->t_state == TS_RUN) && (curthread->t_waitrq == 0)) { 1071f2bd4627Sjohansen curthread->t_waitrq = gethrtime_unscaled(); 1072f2bd4627Sjohansen } 1073f2bd4627Sjohansen 1074f2bd4627Sjohansen /* restore next thread to previously running microstate */ 1075f2bd4627Sjohansen restore_mstate(next); 1076f2bd4627Sjohansen 10777c478bd9Sstevel@tonic-gate if (dtrace_vtime_active) 10787c478bd9Sstevel@tonic-gate dtrace_vtime_switch(next); 10797c478bd9Sstevel@tonic-gate 10807c478bd9Sstevel@tonic-gate resume(next); 10817c478bd9Sstevel@tonic-gate /* 10827c478bd9Sstevel@tonic-gate * The TR_RESUME_END and TR_SWTCH_END trace points 10837c478bd9Sstevel@tonic-gate * appear at the end of resume(), because we may not 10847c478bd9Sstevel@tonic-gate * return here 10857c478bd9Sstevel@tonic-gate */ 10867c478bd9Sstevel@tonic-gate } 10877c478bd9Sstevel@tonic-gate 10887c478bd9Sstevel@tonic-gate 10897c478bd9Sstevel@tonic-gate 10907c478bd9Sstevel@tonic-gate #define CPU_IDLING(pri) ((pri) == -1) 10917c478bd9Sstevel@tonic-gate 10927c478bd9Sstevel@tonic-gate static void 10937c478bd9Sstevel@tonic-gate cpu_resched(cpu_t *cp, pri_t tpri) 10947c478bd9Sstevel@tonic-gate { 10957c478bd9Sstevel@tonic-gate int call_poke_cpu = 0; 10967c478bd9Sstevel@tonic-gate pri_t cpupri = cp->cpu_dispatch_pri; 10977c478bd9Sstevel@tonic-gate 10987c478bd9Sstevel@tonic-gate if (!CPU_IDLING(cpupri) && (cpupri < tpri)) { 10997c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_DISP, TR_CPU_RESCHED, 11007c478bd9Sstevel@tonic-gate "CPU_RESCHED:Tpri %d Cpupri %d", tpri, cpupri); 11017c478bd9Sstevel@tonic-gate if (tpri >= upreemptpri && cp->cpu_runrun == 0) { 11027c478bd9Sstevel@tonic-gate cp->cpu_runrun = 1; 11037c478bd9Sstevel@tonic-gate aston(cp->cpu_dispthread); 11047c478bd9Sstevel@tonic-gate if (tpri < kpreemptpri && cp != CPU) 11057c478bd9Sstevel@tonic-gate call_poke_cpu = 1; 11067c478bd9Sstevel@tonic-gate } 11077c478bd9Sstevel@tonic-gate if (tpri >= kpreemptpri && cp->cpu_kprunrun == 0) { 11087c478bd9Sstevel@tonic-gate cp->cpu_kprunrun = 1; 11097c478bd9Sstevel@tonic-gate if (cp != CPU) 11107c478bd9Sstevel@tonic-gate call_poke_cpu = 1; 11117c478bd9Sstevel@tonic-gate } 11127c478bd9Sstevel@tonic-gate } 11137c478bd9Sstevel@tonic-gate 11147c478bd9Sstevel@tonic-gate /* 11157c478bd9Sstevel@tonic-gate * Propagate cpu_runrun, and cpu_kprunrun to global visibility. 11167c478bd9Sstevel@tonic-gate */ 11177c478bd9Sstevel@tonic-gate membar_enter(); 11187c478bd9Sstevel@tonic-gate 11197c478bd9Sstevel@tonic-gate if (call_poke_cpu) 11207c478bd9Sstevel@tonic-gate poke_cpu(cp->cpu_id); 11217c478bd9Sstevel@tonic-gate } 11227c478bd9Sstevel@tonic-gate 11237c478bd9Sstevel@tonic-gate /* 1124fb2f18f8Sesaxe * Perform multi-level CMT load balancing of running threads. 1125fb2f18f8Sesaxe * tp is the thread being enqueued 1126fb2f18f8Sesaxe * cp is the hint CPU (chosen by cpu_choose()). 11277c478bd9Sstevel@tonic-gate */ 11287c478bd9Sstevel@tonic-gate static cpu_t * 1129fb2f18f8Sesaxe cmt_balance(kthread_t *tp, cpu_t *cp) 11307c478bd9Sstevel@tonic-gate { 1131d129bde2Sesaxe int hint, i, cpu, nsiblings; 1132fb2f18f8Sesaxe int self = 0; 1133fb2f18f8Sesaxe group_t *cmt_pgs, *siblings; 1134fb2f18f8Sesaxe pg_cmt_t *pg, *pg_tmp, *tpg = NULL; 1135fb2f18f8Sesaxe int pg_nrun, tpg_nrun; 1136fb2f18f8Sesaxe int level = 0; 1137fb2f18f8Sesaxe cpu_t *newcp; 11387c478bd9Sstevel@tonic-gate 1139fb2f18f8Sesaxe ASSERT(THREAD_LOCK_HELD(tp)); 11407c478bd9Sstevel@tonic-gate 1141fb2f18f8Sesaxe cmt_pgs = &cp->cpu_pg->cmt_pgs; 1142fb2f18f8Sesaxe 1143fb2f18f8Sesaxe if (GROUP_SIZE(cmt_pgs) == 0) 1144fb2f18f8Sesaxe return (cp); /* nothing to do */ 1145fb2f18f8Sesaxe 1146fb2f18f8Sesaxe if (tp == curthread) 1147fb2f18f8Sesaxe self = 1; 11487c478bd9Sstevel@tonic-gate 11497c478bd9Sstevel@tonic-gate /* 1150fb2f18f8Sesaxe * Balance across siblings in the CPUs CMT lineage 11517c478bd9Sstevel@tonic-gate */ 11527c478bd9Sstevel@tonic-gate do { 1153fb2f18f8Sesaxe pg = GROUP_ACCESS(cmt_pgs, level); 11547c478bd9Sstevel@tonic-gate 1155d129bde2Sesaxe siblings = pg->cmt_siblings; 1156d129bde2Sesaxe nsiblings = GROUP_SIZE(siblings); /* self inclusive */ 1157d129bde2Sesaxe if (nsiblings == 1) 1158d129bde2Sesaxe continue; /* nobody to balance against */ 1159d129bde2Sesaxe 1160fb2f18f8Sesaxe pg_nrun = pg->cmt_nrunning; 1161fb2f18f8Sesaxe if (self && 1162fb2f18f8Sesaxe bitset_in_set(&pg->cmt_cpus_actv_set, CPU->cpu_seqid)) 1163fb2f18f8Sesaxe pg_nrun--; /* Ignore curthread's effect */ 1164fb2f18f8Sesaxe 1165fb2f18f8Sesaxe hint = pg->cmt_hint; 11667c478bd9Sstevel@tonic-gate /* 1167fb2f18f8Sesaxe * Check for validity of the hint 1168fb2f18f8Sesaxe * It should reference a valid sibling 11697c478bd9Sstevel@tonic-gate */ 1170d129bde2Sesaxe if (hint >= nsiblings) 1171fb2f18f8Sesaxe hint = pg->cmt_hint = 0; 1172fb2f18f8Sesaxe else 1173fb2f18f8Sesaxe pg->cmt_hint++; 11747c478bd9Sstevel@tonic-gate 11757c478bd9Sstevel@tonic-gate /* 1176fb2f18f8Sesaxe * Find a balancing candidate from among our siblings 1177fb2f18f8Sesaxe * "hint" is a hint for where to start looking 11787c478bd9Sstevel@tonic-gate */ 1179fb2f18f8Sesaxe i = hint; 1180fb2f18f8Sesaxe do { 1181d129bde2Sesaxe ASSERT(i < nsiblings); 1182fb2f18f8Sesaxe pg_tmp = GROUP_ACCESS(siblings, i); 1183fb2f18f8Sesaxe 1184fb2f18f8Sesaxe /* 1185fb2f18f8Sesaxe * The candidate must not be us, and must 1186fb2f18f8Sesaxe * have some CPU resources in the thread's 1187fb2f18f8Sesaxe * partition 1188fb2f18f8Sesaxe */ 1189fb2f18f8Sesaxe if (pg_tmp != pg && 1190fb2f18f8Sesaxe bitset_in_set(&tp->t_cpupart->cp_cmt_pgs, 1191fb2f18f8Sesaxe ((pg_t *)pg_tmp)->pg_id)) { 1192fb2f18f8Sesaxe tpg = pg_tmp; 11937c478bd9Sstevel@tonic-gate break; 1194fb2f18f8Sesaxe } 11957c478bd9Sstevel@tonic-gate 1196d129bde2Sesaxe if (++i >= nsiblings) 1197fb2f18f8Sesaxe i = 0; 1198fb2f18f8Sesaxe } while (i != hint); 1199fb2f18f8Sesaxe 1200fb2f18f8Sesaxe if (!tpg) 1201fb2f18f8Sesaxe continue; /* no candidates at this level */ 1202fb2f18f8Sesaxe 1203fb2f18f8Sesaxe /* 1204fb2f18f8Sesaxe * Check if the balancing target is underloaded 1205fb2f18f8Sesaxe * Decide to balance if the target is running fewer 1206fb2f18f8Sesaxe * threads, or if it's running the same number of threads 1207fb2f18f8Sesaxe * with more online CPUs 1208fb2f18f8Sesaxe */ 1209fb2f18f8Sesaxe tpg_nrun = tpg->cmt_nrunning; 1210fb2f18f8Sesaxe if (pg_nrun > tpg_nrun || 1211fb2f18f8Sesaxe (pg_nrun == tpg_nrun && 1212fb2f18f8Sesaxe (GROUP_SIZE(&tpg->cmt_cpus_actv) > 1213fb2f18f8Sesaxe GROUP_SIZE(&pg->cmt_cpus_actv)))) { 1214fb2f18f8Sesaxe break; 1215fb2f18f8Sesaxe } 1216fb2f18f8Sesaxe tpg = NULL; 1217fb2f18f8Sesaxe } while (++level < GROUP_SIZE(cmt_pgs)); 1218fb2f18f8Sesaxe 1219fb2f18f8Sesaxe 1220fb2f18f8Sesaxe if (tpg) { 1221fb2f18f8Sesaxe /* 1222fb2f18f8Sesaxe * Select an idle CPU from the target PG 1223fb2f18f8Sesaxe */ 1224fb2f18f8Sesaxe for (cpu = 0; cpu < GROUP_SIZE(&tpg->cmt_cpus_actv); cpu++) { 1225fb2f18f8Sesaxe newcp = GROUP_ACCESS(&tpg->cmt_cpus_actv, cpu); 1226fb2f18f8Sesaxe if (newcp->cpu_part == tp->t_cpupart && 1227fb2f18f8Sesaxe newcp->cpu_dispatch_pri == -1) { 1228fb2f18f8Sesaxe cp = newcp; 1229fb2f18f8Sesaxe break; 1230fb2f18f8Sesaxe } 1231fb2f18f8Sesaxe } 1232fb2f18f8Sesaxe } 1233fb2f18f8Sesaxe 12347c478bd9Sstevel@tonic-gate return (cp); 12357c478bd9Sstevel@tonic-gate } 12367c478bd9Sstevel@tonic-gate 12377c478bd9Sstevel@tonic-gate /* 12387c478bd9Sstevel@tonic-gate * setbackdq() keeps runqs balanced such that the difference in length 12397c478bd9Sstevel@tonic-gate * between the chosen runq and the next one is no more than RUNQ_MAX_DIFF. 12407c478bd9Sstevel@tonic-gate * For threads with priorities below RUNQ_MATCH_PRI levels, the runq's lengths 12417c478bd9Sstevel@tonic-gate * must match. When per-thread TS_RUNQMATCH flag is set, setbackdq() will 12427c478bd9Sstevel@tonic-gate * try to keep runqs perfectly balanced regardless of the thread priority. 12437c478bd9Sstevel@tonic-gate */ 12447c478bd9Sstevel@tonic-gate #define RUNQ_MATCH_PRI 16 /* pri below which queue lengths must match */ 12457c478bd9Sstevel@tonic-gate #define RUNQ_MAX_DIFF 2 /* maximum runq length difference */ 12467c478bd9Sstevel@tonic-gate #define RUNQ_LEN(cp, pri) ((cp)->cpu_disp->disp_q[pri].dq_sruncnt) 12477c478bd9Sstevel@tonic-gate 12487c478bd9Sstevel@tonic-gate /* 12497c478bd9Sstevel@tonic-gate * Put the specified thread on the back of the dispatcher 12507c478bd9Sstevel@tonic-gate * queue corresponding to its current priority. 12517c478bd9Sstevel@tonic-gate * 12527c478bd9Sstevel@tonic-gate * Called with the thread in transition, onproc or stopped state 12537c478bd9Sstevel@tonic-gate * and locked (transition implies locked) and at high spl. 12547c478bd9Sstevel@tonic-gate * Returns with the thread in TS_RUN state and still locked. 12557c478bd9Sstevel@tonic-gate */ 12567c478bd9Sstevel@tonic-gate void 12577c478bd9Sstevel@tonic-gate setbackdq(kthread_t *tp) 12587c478bd9Sstevel@tonic-gate { 12597c478bd9Sstevel@tonic-gate dispq_t *dq; 12607c478bd9Sstevel@tonic-gate disp_t *dp; 12617c478bd9Sstevel@tonic-gate cpu_t *cp; 12627c478bd9Sstevel@tonic-gate pri_t tpri; 12637c478bd9Sstevel@tonic-gate int bound; 12647c478bd9Sstevel@tonic-gate 12657c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 12667c478bd9Sstevel@tonic-gate ASSERT((tp->t_schedflag & TS_ALLSTART) == 0); 12677c478bd9Sstevel@tonic-gate ASSERT(!thread_on_queue(tp)); /* make sure tp isn't on a runq */ 12687c478bd9Sstevel@tonic-gate 12697c478bd9Sstevel@tonic-gate /* 12707c478bd9Sstevel@tonic-gate * If thread is "swapped" or on the swap queue don't 12717c478bd9Sstevel@tonic-gate * queue it, but wake sched. 12727c478bd9Sstevel@tonic-gate */ 12737c478bd9Sstevel@tonic-gate if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) { 12747c478bd9Sstevel@tonic-gate disp_swapped_setrun(tp); 12757c478bd9Sstevel@tonic-gate return; 12767c478bd9Sstevel@tonic-gate } 12777c478bd9Sstevel@tonic-gate 1278abd41583Sgd209917 if (tp->t_bound_cpu || tp->t_weakbound_cpu) 1279abd41583Sgd209917 bound = 1; 1280abd41583Sgd209917 else 1281abd41583Sgd209917 bound = 0; 1282abd41583Sgd209917 12837c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 12847c478bd9Sstevel@tonic-gate if (ncpus == 1) 12857c478bd9Sstevel@tonic-gate cp = tp->t_cpu; 1286abd41583Sgd209917 else if (!bound) { 12877c478bd9Sstevel@tonic-gate if (tpri >= kpqpri) { 12887c478bd9Sstevel@tonic-gate setkpdq(tp, SETKP_BACK); 12897c478bd9Sstevel@tonic-gate return; 12907c478bd9Sstevel@tonic-gate } 12917c478bd9Sstevel@tonic-gate /* 12927c478bd9Sstevel@tonic-gate * Let cpu_choose suggest a CPU. 12937c478bd9Sstevel@tonic-gate */ 12947c478bd9Sstevel@tonic-gate cp = cpu_choose(tp, tpri); 12957c478bd9Sstevel@tonic-gate 12967c478bd9Sstevel@tonic-gate if (tp->t_cpupart == cp->cpu_part) { 12977c478bd9Sstevel@tonic-gate int qlen; 12987c478bd9Sstevel@tonic-gate 12997c478bd9Sstevel@tonic-gate /* 1300fb2f18f8Sesaxe * Perform any CMT load balancing 13017c478bd9Sstevel@tonic-gate */ 1302fb2f18f8Sesaxe cp = cmt_balance(tp, cp); 13037c478bd9Sstevel@tonic-gate 13047c478bd9Sstevel@tonic-gate /* 13057c478bd9Sstevel@tonic-gate * Balance across the run queues 13067c478bd9Sstevel@tonic-gate */ 13077c478bd9Sstevel@tonic-gate qlen = RUNQ_LEN(cp, tpri); 13087c478bd9Sstevel@tonic-gate if (tpri >= RUNQ_MATCH_PRI && 13097c478bd9Sstevel@tonic-gate !(tp->t_schedflag & TS_RUNQMATCH)) 13107c478bd9Sstevel@tonic-gate qlen -= RUNQ_MAX_DIFF; 13117c478bd9Sstevel@tonic-gate if (qlen > 0) { 1312685679f7Sakolb cpu_t *newcp; 13137c478bd9Sstevel@tonic-gate 1314685679f7Sakolb if (tp->t_lpl->lpl_lgrpid == LGRP_ROOTID) { 1315685679f7Sakolb newcp = cp->cpu_next_part; 1316685679f7Sakolb } else if ((newcp = cp->cpu_next_lpl) == cp) { 1317685679f7Sakolb newcp = cp->cpu_next_part; 13187c478bd9Sstevel@tonic-gate } 1319685679f7Sakolb 1320685679f7Sakolb if (RUNQ_LEN(newcp, tpri) < qlen) { 1321685679f7Sakolb DTRACE_PROBE3(runq__balance, 1322685679f7Sakolb kthread_t *, tp, 1323685679f7Sakolb cpu_t *, cp, cpu_t *, newcp); 1324685679f7Sakolb cp = newcp; 1325685679f7Sakolb } 13267c478bd9Sstevel@tonic-gate } 13277c478bd9Sstevel@tonic-gate } else { 13287c478bd9Sstevel@tonic-gate /* 13297c478bd9Sstevel@tonic-gate * Migrate to a cpu in the new partition. 13307c478bd9Sstevel@tonic-gate */ 13317c478bd9Sstevel@tonic-gate cp = disp_lowpri_cpu(tp->t_cpupart->cp_cpulist, 13327c478bd9Sstevel@tonic-gate tp->t_lpl, tp->t_pri, NULL); 13337c478bd9Sstevel@tonic-gate } 13347c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0); 13357c478bd9Sstevel@tonic-gate } else { 13367c478bd9Sstevel@tonic-gate /* 13377c478bd9Sstevel@tonic-gate * It is possible that t_weakbound_cpu != t_bound_cpu (for 13387c478bd9Sstevel@tonic-gate * a short time until weak binding that existed when the 13397c478bd9Sstevel@tonic-gate * strong binding was established has dropped) so we must 13407c478bd9Sstevel@tonic-gate * favour weak binding over strong. 13417c478bd9Sstevel@tonic-gate */ 13427c478bd9Sstevel@tonic-gate cp = tp->t_weakbound_cpu ? 13437c478bd9Sstevel@tonic-gate tp->t_weakbound_cpu : tp->t_bound_cpu; 13447c478bd9Sstevel@tonic-gate } 1345f2bd4627Sjohansen /* 1346f2bd4627Sjohansen * A thread that is ONPROC may be temporarily placed on the run queue 1347f2bd4627Sjohansen * but then chosen to run again by disp. If the thread we're placing on 1348f2bd4627Sjohansen * the queue is in TS_ONPROC state, don't set its t_waitrq until a 1349f2bd4627Sjohansen * replacement process is actually scheduled in swtch(). In this 1350f2bd4627Sjohansen * situation, curthread is the only thread that could be in the ONPROC 1351f2bd4627Sjohansen * state. 1352f2bd4627Sjohansen */ 1353f2bd4627Sjohansen if ((tp != curthread) && (tp->t_waitrq == 0)) { 1354f2bd4627Sjohansen hrtime_t curtime; 1355f2bd4627Sjohansen 1356f2bd4627Sjohansen curtime = gethrtime_unscaled(); 1357f2bd4627Sjohansen (void) cpu_update_pct(tp, curtime); 1358f2bd4627Sjohansen tp->t_waitrq = curtime; 1359f2bd4627Sjohansen } else { 1360f2bd4627Sjohansen (void) cpu_update_pct(tp, gethrtime_unscaled()); 1361f2bd4627Sjohansen } 1362f2bd4627Sjohansen 13637c478bd9Sstevel@tonic-gate dp = cp->cpu_disp; 13647c478bd9Sstevel@tonic-gate disp_lock_enter_high(&dp->disp_lock); 13657c478bd9Sstevel@tonic-gate 13667c478bd9Sstevel@tonic-gate DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, 0); 13677c478bd9Sstevel@tonic-gate TRACE_3(TR_FAC_DISP, TR_BACKQ, "setbackdq:pri %d cpu %p tid %p", 13687c478bd9Sstevel@tonic-gate tpri, cp, tp); 13697c478bd9Sstevel@tonic-gate 13707c478bd9Sstevel@tonic-gate #ifndef NPROBE 13717c478bd9Sstevel@tonic-gate /* Kernel probe */ 13727c478bd9Sstevel@tonic-gate if (tnf_tracing_active) 13737c478bd9Sstevel@tonic-gate tnf_thread_queue(tp, cp, tpri); 13747c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 13757c478bd9Sstevel@tonic-gate 13767c478bd9Sstevel@tonic-gate ASSERT(tpri >= 0 && tpri < dp->disp_npri); 13777c478bd9Sstevel@tonic-gate 13787c478bd9Sstevel@tonic-gate THREAD_RUN(tp, &dp->disp_lock); /* set t_state to TS_RUN */ 13797c478bd9Sstevel@tonic-gate tp->t_disp_queue = dp; 13807c478bd9Sstevel@tonic-gate tp->t_link = NULL; 13817c478bd9Sstevel@tonic-gate 13827c478bd9Sstevel@tonic-gate dq = &dp->disp_q[tpri]; 13837c478bd9Sstevel@tonic-gate dp->disp_nrunnable++; 1384685679f7Sakolb if (!bound) 1385685679f7Sakolb dp->disp_steal = 0; 13867c478bd9Sstevel@tonic-gate membar_enter(); 13877c478bd9Sstevel@tonic-gate 13887c478bd9Sstevel@tonic-gate if (dq->dq_sruncnt++ != 0) { 13897c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first != NULL); 13907c478bd9Sstevel@tonic-gate dq->dq_last->t_link = tp; 13917c478bd9Sstevel@tonic-gate dq->dq_last = tp; 13927c478bd9Sstevel@tonic-gate } else { 13937c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first == NULL); 13947c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL); 13957c478bd9Sstevel@tonic-gate dq->dq_first = dq->dq_last = tp; 13967c478bd9Sstevel@tonic-gate BT_SET(dp->disp_qactmap, tpri); 13977c478bd9Sstevel@tonic-gate if (tpri > dp->disp_maxrunpri) { 13987c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = tpri; 13997c478bd9Sstevel@tonic-gate membar_enter(); 14007c478bd9Sstevel@tonic-gate cpu_resched(cp, tpri); 14017c478bd9Sstevel@tonic-gate } 14027c478bd9Sstevel@tonic-gate } 14037c478bd9Sstevel@tonic-gate 14047c478bd9Sstevel@tonic-gate if (!bound && tpri > dp->disp_max_unbound_pri) { 14057c478bd9Sstevel@tonic-gate if (tp == curthread && dp->disp_max_unbound_pri == -1 && 14067c478bd9Sstevel@tonic-gate cp == CPU) { 14077c478bd9Sstevel@tonic-gate /* 14087c478bd9Sstevel@tonic-gate * If there are no other unbound threads on the 14097c478bd9Sstevel@tonic-gate * run queue, don't allow other CPUs to steal 14107c478bd9Sstevel@tonic-gate * this thread while we are in the middle of a 14117c478bd9Sstevel@tonic-gate * context switch. We may just switch to it 14127c478bd9Sstevel@tonic-gate * again right away. CPU_DISP_DONTSTEAL is cleared 14137c478bd9Sstevel@tonic-gate * in swtch and swtch_to. 14147c478bd9Sstevel@tonic-gate */ 14157c478bd9Sstevel@tonic-gate cp->cpu_disp_flags |= CPU_DISP_DONTSTEAL; 14167c478bd9Sstevel@tonic-gate } 14177c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = tpri; 14187c478bd9Sstevel@tonic-gate } 14197c478bd9Sstevel@tonic-gate (*disp_enq_thread)(cp, bound); 14207c478bd9Sstevel@tonic-gate } 14217c478bd9Sstevel@tonic-gate 14227c478bd9Sstevel@tonic-gate /* 14237c478bd9Sstevel@tonic-gate * Put the specified thread on the front of the dispatcher 14247c478bd9Sstevel@tonic-gate * queue corresponding to its current priority. 14257c478bd9Sstevel@tonic-gate * 14267c478bd9Sstevel@tonic-gate * Called with the thread in transition, onproc or stopped state 14277c478bd9Sstevel@tonic-gate * and locked (transition implies locked) and at high spl. 14287c478bd9Sstevel@tonic-gate * Returns with the thread in TS_RUN state and still locked. 14297c478bd9Sstevel@tonic-gate */ 14307c478bd9Sstevel@tonic-gate void 14317c478bd9Sstevel@tonic-gate setfrontdq(kthread_t *tp) 14327c478bd9Sstevel@tonic-gate { 14337c478bd9Sstevel@tonic-gate disp_t *dp; 14347c478bd9Sstevel@tonic-gate dispq_t *dq; 14357c478bd9Sstevel@tonic-gate cpu_t *cp; 14367c478bd9Sstevel@tonic-gate pri_t tpri; 14377c478bd9Sstevel@tonic-gate int bound; 14387c478bd9Sstevel@tonic-gate 14397c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 14407c478bd9Sstevel@tonic-gate ASSERT((tp->t_schedflag & TS_ALLSTART) == 0); 14417c478bd9Sstevel@tonic-gate ASSERT(!thread_on_queue(tp)); /* make sure tp isn't on a runq */ 14427c478bd9Sstevel@tonic-gate 14437c478bd9Sstevel@tonic-gate /* 14447c478bd9Sstevel@tonic-gate * If thread is "swapped" or on the swap queue don't 14457c478bd9Sstevel@tonic-gate * queue it, but wake sched. 14467c478bd9Sstevel@tonic-gate */ 14477c478bd9Sstevel@tonic-gate if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) { 14487c478bd9Sstevel@tonic-gate disp_swapped_setrun(tp); 14497c478bd9Sstevel@tonic-gate return; 14507c478bd9Sstevel@tonic-gate } 14517c478bd9Sstevel@tonic-gate 1452abd41583Sgd209917 if (tp->t_bound_cpu || tp->t_weakbound_cpu) 1453abd41583Sgd209917 bound = 1; 1454abd41583Sgd209917 else 1455abd41583Sgd209917 bound = 0; 1456abd41583Sgd209917 14577c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 14587c478bd9Sstevel@tonic-gate if (ncpus == 1) 14597c478bd9Sstevel@tonic-gate cp = tp->t_cpu; 1460abd41583Sgd209917 else if (!bound) { 14617c478bd9Sstevel@tonic-gate if (tpri >= kpqpri) { 14627c478bd9Sstevel@tonic-gate setkpdq(tp, SETKP_FRONT); 14637c478bd9Sstevel@tonic-gate return; 14647c478bd9Sstevel@tonic-gate } 14657c478bd9Sstevel@tonic-gate cp = tp->t_cpu; 14667c478bd9Sstevel@tonic-gate if (tp->t_cpupart == cp->cpu_part) { 14677c478bd9Sstevel@tonic-gate /* 14687c478bd9Sstevel@tonic-gate * If we are of higher or equal priority than 14697c478bd9Sstevel@tonic-gate * the highest priority runnable thread of 14707c478bd9Sstevel@tonic-gate * the current CPU, just pick this CPU. Otherwise 14717c478bd9Sstevel@tonic-gate * Let cpu_choose() select the CPU. If this cpu 14727c478bd9Sstevel@tonic-gate * is the target of an offline request then do not 14737c478bd9Sstevel@tonic-gate * pick it - a thread_nomigrate() on the in motion 14747c478bd9Sstevel@tonic-gate * cpu relies on this when it forces a preempt. 14757c478bd9Sstevel@tonic-gate */ 14767c478bd9Sstevel@tonic-gate if (tpri < cp->cpu_disp->disp_maxrunpri || 14777c478bd9Sstevel@tonic-gate cp == cpu_inmotion) 14787c478bd9Sstevel@tonic-gate cp = cpu_choose(tp, tpri); 14797c478bd9Sstevel@tonic-gate } else { 14807c478bd9Sstevel@tonic-gate /* 14817c478bd9Sstevel@tonic-gate * Migrate to a cpu in the new partition. 14827c478bd9Sstevel@tonic-gate */ 14837c478bd9Sstevel@tonic-gate cp = disp_lowpri_cpu(tp->t_cpupart->cp_cpulist, 14847c478bd9Sstevel@tonic-gate tp->t_lpl, tp->t_pri, NULL); 14857c478bd9Sstevel@tonic-gate } 14867c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0); 14877c478bd9Sstevel@tonic-gate } else { 14887c478bd9Sstevel@tonic-gate /* 14897c478bd9Sstevel@tonic-gate * It is possible that t_weakbound_cpu != t_bound_cpu (for 14907c478bd9Sstevel@tonic-gate * a short time until weak binding that existed when the 14917c478bd9Sstevel@tonic-gate * strong binding was established has dropped) so we must 14927c478bd9Sstevel@tonic-gate * favour weak binding over strong. 14937c478bd9Sstevel@tonic-gate */ 14947c478bd9Sstevel@tonic-gate cp = tp->t_weakbound_cpu ? 14957c478bd9Sstevel@tonic-gate tp->t_weakbound_cpu : tp->t_bound_cpu; 14967c478bd9Sstevel@tonic-gate } 1497f2bd4627Sjohansen 1498f2bd4627Sjohansen /* 1499f2bd4627Sjohansen * A thread that is ONPROC may be temporarily placed on the run queue 1500f2bd4627Sjohansen * but then chosen to run again by disp. If the thread we're placing on 1501f2bd4627Sjohansen * the queue is in TS_ONPROC state, don't set its t_waitrq until a 1502f2bd4627Sjohansen * replacement process is actually scheduled in swtch(). In this 1503f2bd4627Sjohansen * situation, curthread is the only thread that could be in the ONPROC 1504f2bd4627Sjohansen * state. 1505f2bd4627Sjohansen */ 1506f2bd4627Sjohansen if ((tp != curthread) && (tp->t_waitrq == 0)) { 1507f2bd4627Sjohansen hrtime_t curtime; 1508f2bd4627Sjohansen 1509f2bd4627Sjohansen curtime = gethrtime_unscaled(); 1510f2bd4627Sjohansen (void) cpu_update_pct(tp, curtime); 1511f2bd4627Sjohansen tp->t_waitrq = curtime; 1512f2bd4627Sjohansen } else { 1513f2bd4627Sjohansen (void) cpu_update_pct(tp, gethrtime_unscaled()); 1514f2bd4627Sjohansen } 1515f2bd4627Sjohansen 15167c478bd9Sstevel@tonic-gate dp = cp->cpu_disp; 15177c478bd9Sstevel@tonic-gate disp_lock_enter_high(&dp->disp_lock); 15187c478bd9Sstevel@tonic-gate 15197c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_DISP, TR_FRONTQ, "frontq:pri %d tid %p", tpri, tp); 15207c478bd9Sstevel@tonic-gate DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, 1); 15217c478bd9Sstevel@tonic-gate 15227c478bd9Sstevel@tonic-gate #ifndef NPROBE 15237c478bd9Sstevel@tonic-gate /* Kernel probe */ 15247c478bd9Sstevel@tonic-gate if (tnf_tracing_active) 15257c478bd9Sstevel@tonic-gate tnf_thread_queue(tp, cp, tpri); 15267c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 15277c478bd9Sstevel@tonic-gate 15287c478bd9Sstevel@tonic-gate ASSERT(tpri >= 0 && tpri < dp->disp_npri); 15297c478bd9Sstevel@tonic-gate 15307c478bd9Sstevel@tonic-gate THREAD_RUN(tp, &dp->disp_lock); /* set TS_RUN state and lock */ 15317c478bd9Sstevel@tonic-gate tp->t_disp_queue = dp; 15327c478bd9Sstevel@tonic-gate 15337c478bd9Sstevel@tonic-gate dq = &dp->disp_q[tpri]; 15347c478bd9Sstevel@tonic-gate dp->disp_nrunnable++; 1535685679f7Sakolb if (!bound) 1536685679f7Sakolb dp->disp_steal = 0; 15377c478bd9Sstevel@tonic-gate membar_enter(); 15387c478bd9Sstevel@tonic-gate 15397c478bd9Sstevel@tonic-gate if (dq->dq_sruncnt++ != 0) { 15407c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last != NULL); 15417c478bd9Sstevel@tonic-gate tp->t_link = dq->dq_first; 15427c478bd9Sstevel@tonic-gate dq->dq_first = tp; 15437c478bd9Sstevel@tonic-gate } else { 15447c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL); 15457c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first == NULL); 15467c478bd9Sstevel@tonic-gate tp->t_link = NULL; 15477c478bd9Sstevel@tonic-gate dq->dq_first = dq->dq_last = tp; 15487c478bd9Sstevel@tonic-gate BT_SET(dp->disp_qactmap, tpri); 15497c478bd9Sstevel@tonic-gate if (tpri > dp->disp_maxrunpri) { 15507c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = tpri; 15517c478bd9Sstevel@tonic-gate membar_enter(); 15527c478bd9Sstevel@tonic-gate cpu_resched(cp, tpri); 15537c478bd9Sstevel@tonic-gate } 15547c478bd9Sstevel@tonic-gate } 15557c478bd9Sstevel@tonic-gate 15567c478bd9Sstevel@tonic-gate if (!bound && tpri > dp->disp_max_unbound_pri) { 15577c478bd9Sstevel@tonic-gate if (tp == curthread && dp->disp_max_unbound_pri == -1 && 15587c478bd9Sstevel@tonic-gate cp == CPU) { 15597c478bd9Sstevel@tonic-gate /* 15607c478bd9Sstevel@tonic-gate * If there are no other unbound threads on the 15617c478bd9Sstevel@tonic-gate * run queue, don't allow other CPUs to steal 15627c478bd9Sstevel@tonic-gate * this thread while we are in the middle of a 15637c478bd9Sstevel@tonic-gate * context switch. We may just switch to it 15647c478bd9Sstevel@tonic-gate * again right away. CPU_DISP_DONTSTEAL is cleared 15657c478bd9Sstevel@tonic-gate * in swtch and swtch_to. 15667c478bd9Sstevel@tonic-gate */ 15677c478bd9Sstevel@tonic-gate cp->cpu_disp_flags |= CPU_DISP_DONTSTEAL; 15687c478bd9Sstevel@tonic-gate } 15697c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = tpri; 15707c478bd9Sstevel@tonic-gate } 15717c478bd9Sstevel@tonic-gate (*disp_enq_thread)(cp, bound); 15727c478bd9Sstevel@tonic-gate } 15737c478bd9Sstevel@tonic-gate 15747c478bd9Sstevel@tonic-gate /* 15757c478bd9Sstevel@tonic-gate * Put a high-priority unbound thread on the kp queue 15767c478bd9Sstevel@tonic-gate */ 15777c478bd9Sstevel@tonic-gate static void 15787c478bd9Sstevel@tonic-gate setkpdq(kthread_t *tp, int borf) 15797c478bd9Sstevel@tonic-gate { 15807c478bd9Sstevel@tonic-gate dispq_t *dq; 15817c478bd9Sstevel@tonic-gate disp_t *dp; 15827c478bd9Sstevel@tonic-gate cpu_t *cp; 15837c478bd9Sstevel@tonic-gate pri_t tpri; 15847c478bd9Sstevel@tonic-gate 15857c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 15867c478bd9Sstevel@tonic-gate 15877c478bd9Sstevel@tonic-gate dp = &tp->t_cpupart->cp_kp_queue; 15887c478bd9Sstevel@tonic-gate disp_lock_enter_high(&dp->disp_lock); 15897c478bd9Sstevel@tonic-gate 15907c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_DISP, TR_FRONTQ, "frontq:pri %d tid %p", tpri, tp); 15917c478bd9Sstevel@tonic-gate 15927c478bd9Sstevel@tonic-gate ASSERT(tpri >= 0 && tpri < dp->disp_npri); 15937c478bd9Sstevel@tonic-gate DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, borf); 15947c478bd9Sstevel@tonic-gate THREAD_RUN(tp, &dp->disp_lock); /* set t_state to TS_RUN */ 15957c478bd9Sstevel@tonic-gate tp->t_disp_queue = dp; 15967c478bd9Sstevel@tonic-gate dp->disp_nrunnable++; 15977c478bd9Sstevel@tonic-gate dq = &dp->disp_q[tpri]; 15987c478bd9Sstevel@tonic-gate 15997c478bd9Sstevel@tonic-gate if (dq->dq_sruncnt++ != 0) { 16007c478bd9Sstevel@tonic-gate if (borf == SETKP_BACK) { 16017c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first != NULL); 16027c478bd9Sstevel@tonic-gate tp->t_link = NULL; 16037c478bd9Sstevel@tonic-gate dq->dq_last->t_link = tp; 16047c478bd9Sstevel@tonic-gate dq->dq_last = tp; 16057c478bd9Sstevel@tonic-gate } else { 16067c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last != NULL); 16077c478bd9Sstevel@tonic-gate tp->t_link = dq->dq_first; 16087c478bd9Sstevel@tonic-gate dq->dq_first = tp; 16097c478bd9Sstevel@tonic-gate } 16107c478bd9Sstevel@tonic-gate } else { 16117c478bd9Sstevel@tonic-gate if (borf == SETKP_BACK) { 16127c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first == NULL); 16137c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL); 16147c478bd9Sstevel@tonic-gate dq->dq_first = dq->dq_last = tp; 16157c478bd9Sstevel@tonic-gate } else { 16167c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL); 16177c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first == NULL); 16187c478bd9Sstevel@tonic-gate tp->t_link = NULL; 16197c478bd9Sstevel@tonic-gate dq->dq_first = dq->dq_last = tp; 16207c478bd9Sstevel@tonic-gate } 16217c478bd9Sstevel@tonic-gate BT_SET(dp->disp_qactmap, tpri); 16227c478bd9Sstevel@tonic-gate if (tpri > dp->disp_max_unbound_pri) 16237c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = tpri; 16247c478bd9Sstevel@tonic-gate if (tpri > dp->disp_maxrunpri) { 16257c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = tpri; 16267c478bd9Sstevel@tonic-gate membar_enter(); 16277c478bd9Sstevel@tonic-gate } 16287c478bd9Sstevel@tonic-gate } 16297c478bd9Sstevel@tonic-gate 16307c478bd9Sstevel@tonic-gate cp = tp->t_cpu; 16317c478bd9Sstevel@tonic-gate if (tp->t_cpupart != cp->cpu_part) { 16327c478bd9Sstevel@tonic-gate /* migrate to a cpu in the new partition */ 16337c478bd9Sstevel@tonic-gate cp = tp->t_cpupart->cp_cpulist; 16347c478bd9Sstevel@tonic-gate } 16357c478bd9Sstevel@tonic-gate cp = disp_lowpri_cpu(cp, tp->t_lpl, tp->t_pri, NULL); 16367c478bd9Sstevel@tonic-gate disp_lock_enter_high(&cp->cpu_disp->disp_lock); 16377c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0); 16387c478bd9Sstevel@tonic-gate 16397c478bd9Sstevel@tonic-gate #ifndef NPROBE 16407c478bd9Sstevel@tonic-gate /* Kernel probe */ 16417c478bd9Sstevel@tonic-gate if (tnf_tracing_active) 16427c478bd9Sstevel@tonic-gate tnf_thread_queue(tp, cp, tpri); 16437c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 16447c478bd9Sstevel@tonic-gate 16457c478bd9Sstevel@tonic-gate if (cp->cpu_chosen_level < tpri) 16467c478bd9Sstevel@tonic-gate cp->cpu_chosen_level = tpri; 16477c478bd9Sstevel@tonic-gate cpu_resched(cp, tpri); 16487c478bd9Sstevel@tonic-gate disp_lock_exit_high(&cp->cpu_disp->disp_lock); 16497c478bd9Sstevel@tonic-gate (*disp_enq_thread)(cp, 0); 16507c478bd9Sstevel@tonic-gate } 16517c478bd9Sstevel@tonic-gate 16527c478bd9Sstevel@tonic-gate /* 16537c478bd9Sstevel@tonic-gate * Remove a thread from the dispatcher queue if it is on it. 16547c478bd9Sstevel@tonic-gate * It is not an error if it is not found but we return whether 16557c478bd9Sstevel@tonic-gate * or not it was found in case the caller wants to check. 16567c478bd9Sstevel@tonic-gate */ 16577c478bd9Sstevel@tonic-gate int 16587c478bd9Sstevel@tonic-gate dispdeq(kthread_t *tp) 16597c478bd9Sstevel@tonic-gate { 16607c478bd9Sstevel@tonic-gate disp_t *dp; 16617c478bd9Sstevel@tonic-gate dispq_t *dq; 16627c478bd9Sstevel@tonic-gate kthread_t *rp; 16637c478bd9Sstevel@tonic-gate kthread_t *trp; 16647c478bd9Sstevel@tonic-gate kthread_t **ptp; 16657c478bd9Sstevel@tonic-gate int tpri; 16667c478bd9Sstevel@tonic-gate 16677c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 16687c478bd9Sstevel@tonic-gate 16697c478bd9Sstevel@tonic-gate if (tp->t_state != TS_RUN) 16707c478bd9Sstevel@tonic-gate return (0); 16717c478bd9Sstevel@tonic-gate 16727c478bd9Sstevel@tonic-gate /* 16737c478bd9Sstevel@tonic-gate * The thread is "swapped" or is on the swap queue and 16747c478bd9Sstevel@tonic-gate * hence no longer on the run queue, so return true. 16757c478bd9Sstevel@tonic-gate */ 16767c478bd9Sstevel@tonic-gate if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) 16777c478bd9Sstevel@tonic-gate return (1); 16787c478bd9Sstevel@tonic-gate 16797c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 16807c478bd9Sstevel@tonic-gate dp = tp->t_disp_queue; 16817c478bd9Sstevel@tonic-gate ASSERT(tpri < dp->disp_npri); 16827c478bd9Sstevel@tonic-gate dq = &dp->disp_q[tpri]; 16837c478bd9Sstevel@tonic-gate ptp = &dq->dq_first; 16847c478bd9Sstevel@tonic-gate rp = *ptp; 16857c478bd9Sstevel@tonic-gate trp = NULL; 16867c478bd9Sstevel@tonic-gate 16877c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL || dq->dq_last->t_link == NULL); 16887c478bd9Sstevel@tonic-gate 16897c478bd9Sstevel@tonic-gate /* 16907c478bd9Sstevel@tonic-gate * Search for thread in queue. 16917c478bd9Sstevel@tonic-gate * Double links would simplify this at the expense of disp/setrun. 16927c478bd9Sstevel@tonic-gate */ 16937c478bd9Sstevel@tonic-gate while (rp != tp && rp != NULL) { 16947c478bd9Sstevel@tonic-gate trp = rp; 16957c478bd9Sstevel@tonic-gate ptp = &trp->t_link; 16967c478bd9Sstevel@tonic-gate rp = trp->t_link; 16977c478bd9Sstevel@tonic-gate } 16987c478bd9Sstevel@tonic-gate 16997c478bd9Sstevel@tonic-gate if (rp == NULL) { 17007c478bd9Sstevel@tonic-gate panic("dispdeq: thread not on queue"); 17017c478bd9Sstevel@tonic-gate } 17027c478bd9Sstevel@tonic-gate 17037c478bd9Sstevel@tonic-gate DTRACE_SCHED2(dequeue, kthread_t *, tp, disp_t *, dp); 17047c478bd9Sstevel@tonic-gate 17057c478bd9Sstevel@tonic-gate /* 17067c478bd9Sstevel@tonic-gate * Found it so remove it from queue. 17077c478bd9Sstevel@tonic-gate */ 17087c478bd9Sstevel@tonic-gate if ((*ptp = rp->t_link) == NULL) 17097c478bd9Sstevel@tonic-gate dq->dq_last = trp; 17107c478bd9Sstevel@tonic-gate 17117c478bd9Sstevel@tonic-gate dp->disp_nrunnable--; 17127c478bd9Sstevel@tonic-gate if (--dq->dq_sruncnt == 0) { 17137c478bd9Sstevel@tonic-gate dp->disp_qactmap[tpri >> BT_ULSHIFT] &= ~BT_BIW(tpri); 17147c478bd9Sstevel@tonic-gate if (dp->disp_nrunnable == 0) { 17157c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = -1; 17167c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = -1; 17177c478bd9Sstevel@tonic-gate } else if (tpri == dp->disp_maxrunpri) { 17187c478bd9Sstevel@tonic-gate int ipri; 17197c478bd9Sstevel@tonic-gate 17207c478bd9Sstevel@tonic-gate ipri = bt_gethighbit(dp->disp_qactmap, 17217c478bd9Sstevel@tonic-gate dp->disp_maxrunpri >> BT_ULSHIFT); 17227c478bd9Sstevel@tonic-gate if (ipri < dp->disp_max_unbound_pri) 17237c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = ipri; 17247c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = ipri; 17257c478bd9Sstevel@tonic-gate } 17267c478bd9Sstevel@tonic-gate } 17277c478bd9Sstevel@tonic-gate tp->t_link = NULL; 17287c478bd9Sstevel@tonic-gate THREAD_TRANSITION(tp); /* put in intermediate state */ 17297c478bd9Sstevel@tonic-gate return (1); 17307c478bd9Sstevel@tonic-gate } 17317c478bd9Sstevel@tonic-gate 17327c478bd9Sstevel@tonic-gate 17337c478bd9Sstevel@tonic-gate /* 17347c478bd9Sstevel@tonic-gate * dq_sruninc and dq_srundec are public functions for 17357c478bd9Sstevel@tonic-gate * incrementing/decrementing the sruncnts when a thread on 17367c478bd9Sstevel@tonic-gate * a dispatcher queue is made schedulable/unschedulable by 17377c478bd9Sstevel@tonic-gate * resetting the TS_LOAD flag. 17387c478bd9Sstevel@tonic-gate * 17397c478bd9Sstevel@tonic-gate * The caller MUST have the thread lock and therefore the dispatcher 17407c478bd9Sstevel@tonic-gate * queue lock so that the operation which changes 17417c478bd9Sstevel@tonic-gate * the flag, the operation that checks the status of the thread to 17427c478bd9Sstevel@tonic-gate * determine if it's on a disp queue AND the call to this function 17437c478bd9Sstevel@tonic-gate * are one atomic operation with respect to interrupts. 17447c478bd9Sstevel@tonic-gate */ 17457c478bd9Sstevel@tonic-gate 17467c478bd9Sstevel@tonic-gate /* 17477c478bd9Sstevel@tonic-gate * Called by sched AFTER TS_LOAD flag is set on a swapped, runnable thread. 17487c478bd9Sstevel@tonic-gate */ 17497c478bd9Sstevel@tonic-gate void 17507c478bd9Sstevel@tonic-gate dq_sruninc(kthread_t *t) 17517c478bd9Sstevel@tonic-gate { 17527c478bd9Sstevel@tonic-gate ASSERT(t->t_state == TS_RUN); 17537c478bd9Sstevel@tonic-gate ASSERT(t->t_schedflag & TS_LOAD); 17547c478bd9Sstevel@tonic-gate 17557c478bd9Sstevel@tonic-gate THREAD_TRANSITION(t); 17567c478bd9Sstevel@tonic-gate setfrontdq(t); 17577c478bd9Sstevel@tonic-gate } 17587c478bd9Sstevel@tonic-gate 17597c478bd9Sstevel@tonic-gate /* 17607c478bd9Sstevel@tonic-gate * See comment on calling conventions above. 17617c478bd9Sstevel@tonic-gate * Called by sched BEFORE TS_LOAD flag is cleared on a runnable thread. 17627c478bd9Sstevel@tonic-gate */ 17637c478bd9Sstevel@tonic-gate void 17647c478bd9Sstevel@tonic-gate dq_srundec(kthread_t *t) 17657c478bd9Sstevel@tonic-gate { 17667c478bd9Sstevel@tonic-gate ASSERT(t->t_schedflag & TS_LOAD); 17677c478bd9Sstevel@tonic-gate 17687c478bd9Sstevel@tonic-gate (void) dispdeq(t); 17697c478bd9Sstevel@tonic-gate disp_swapped_enq(t); 17707c478bd9Sstevel@tonic-gate } 17717c478bd9Sstevel@tonic-gate 17727c478bd9Sstevel@tonic-gate /* 17737c478bd9Sstevel@tonic-gate * Change the dispatcher lock of thread to the "swapped_lock" 17747c478bd9Sstevel@tonic-gate * and return with thread lock still held. 17757c478bd9Sstevel@tonic-gate * 17767c478bd9Sstevel@tonic-gate * Called with thread_lock held, in transition state, and at high spl. 17777c478bd9Sstevel@tonic-gate */ 17787c478bd9Sstevel@tonic-gate void 17797c478bd9Sstevel@tonic-gate disp_swapped_enq(kthread_t *tp) 17807c478bd9Sstevel@tonic-gate { 17817c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 17827c478bd9Sstevel@tonic-gate ASSERT(tp->t_schedflag & TS_LOAD); 17837c478bd9Sstevel@tonic-gate 17847c478bd9Sstevel@tonic-gate switch (tp->t_state) { 17857c478bd9Sstevel@tonic-gate case TS_RUN: 17867c478bd9Sstevel@tonic-gate disp_lock_enter_high(&swapped_lock); 17877c478bd9Sstevel@tonic-gate THREAD_SWAP(tp, &swapped_lock); /* set TS_RUN state and lock */ 17887c478bd9Sstevel@tonic-gate break; 17897c478bd9Sstevel@tonic-gate case TS_ONPROC: 17907c478bd9Sstevel@tonic-gate disp_lock_enter_high(&swapped_lock); 17917c478bd9Sstevel@tonic-gate THREAD_TRANSITION(tp); 17927c478bd9Sstevel@tonic-gate wake_sched_sec = 1; /* tell clock to wake sched */ 17937c478bd9Sstevel@tonic-gate THREAD_SWAP(tp, &swapped_lock); /* set TS_RUN state and lock */ 17947c478bd9Sstevel@tonic-gate break; 17957c478bd9Sstevel@tonic-gate default: 17967c478bd9Sstevel@tonic-gate panic("disp_swapped: tp: %p bad t_state", (void *)tp); 17977c478bd9Sstevel@tonic-gate } 17987c478bd9Sstevel@tonic-gate } 17997c478bd9Sstevel@tonic-gate 18007c478bd9Sstevel@tonic-gate /* 18017c478bd9Sstevel@tonic-gate * This routine is called by setbackdq/setfrontdq if the thread is 18027c478bd9Sstevel@tonic-gate * not loaded or loaded and on the swap queue. 18037c478bd9Sstevel@tonic-gate * 18047c478bd9Sstevel@tonic-gate * Thread state TS_SLEEP implies that a swapped thread 18057c478bd9Sstevel@tonic-gate * has been woken up and needs to be swapped in by the swapper. 18067c478bd9Sstevel@tonic-gate * 18077c478bd9Sstevel@tonic-gate * Thread state TS_RUN, it implies that the priority of a swapped 18087c478bd9Sstevel@tonic-gate * thread is being increased by scheduling class (e.g. ts_update). 18097c478bd9Sstevel@tonic-gate */ 18107c478bd9Sstevel@tonic-gate static void 18117c478bd9Sstevel@tonic-gate disp_swapped_setrun(kthread_t *tp) 18127c478bd9Sstevel@tonic-gate { 18137c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 18147c478bd9Sstevel@tonic-gate ASSERT((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD); 18157c478bd9Sstevel@tonic-gate 18167c478bd9Sstevel@tonic-gate switch (tp->t_state) { 18177c478bd9Sstevel@tonic-gate case TS_SLEEP: 18187c478bd9Sstevel@tonic-gate disp_lock_enter_high(&swapped_lock); 18197c478bd9Sstevel@tonic-gate /* 18207c478bd9Sstevel@tonic-gate * Wakeup sched immediately (i.e., next tick) if the 18217c478bd9Sstevel@tonic-gate * thread priority is above maxclsyspri. 18227c478bd9Sstevel@tonic-gate */ 18237c478bd9Sstevel@tonic-gate if (DISP_PRIO(tp) > maxclsyspri) 18247c478bd9Sstevel@tonic-gate wake_sched = 1; 18257c478bd9Sstevel@tonic-gate else 18267c478bd9Sstevel@tonic-gate wake_sched_sec = 1; 18277c478bd9Sstevel@tonic-gate THREAD_RUN(tp, &swapped_lock); /* set TS_RUN state and lock */ 18287c478bd9Sstevel@tonic-gate break; 18297c478bd9Sstevel@tonic-gate case TS_RUN: /* called from ts_update */ 18307c478bd9Sstevel@tonic-gate break; 18317c478bd9Sstevel@tonic-gate default: 1832*8793b36bSNick Todd panic("disp_swapped_setrun: tp: %p bad t_state", (void *)tp); 18337c478bd9Sstevel@tonic-gate } 18347c478bd9Sstevel@tonic-gate } 18357c478bd9Sstevel@tonic-gate 18367c478bd9Sstevel@tonic-gate 18377c478bd9Sstevel@tonic-gate /* 18387c478bd9Sstevel@tonic-gate * Make a thread give up its processor. Find the processor on 18397c478bd9Sstevel@tonic-gate * which this thread is executing, and have that processor 18407c478bd9Sstevel@tonic-gate * preempt. 18417c478bd9Sstevel@tonic-gate */ 18427c478bd9Sstevel@tonic-gate void 18437c478bd9Sstevel@tonic-gate cpu_surrender(kthread_t *tp) 18447c478bd9Sstevel@tonic-gate { 18457c478bd9Sstevel@tonic-gate cpu_t *cpup; 18467c478bd9Sstevel@tonic-gate int max_pri; 18477c478bd9Sstevel@tonic-gate int max_run_pri; 18487c478bd9Sstevel@tonic-gate klwp_t *lwp; 18497c478bd9Sstevel@tonic-gate 18507c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 18517c478bd9Sstevel@tonic-gate 18527c478bd9Sstevel@tonic-gate if (tp->t_state != TS_ONPROC) 18537c478bd9Sstevel@tonic-gate return; 18547c478bd9Sstevel@tonic-gate cpup = tp->t_disp_queue->disp_cpu; /* CPU thread dispatched to */ 18557c478bd9Sstevel@tonic-gate max_pri = cpup->cpu_disp->disp_maxrunpri; /* best pri of that CPU */ 18567c478bd9Sstevel@tonic-gate max_run_pri = CP_MAXRUNPRI(cpup->cpu_part); 18577c478bd9Sstevel@tonic-gate if (max_pri < max_run_pri) 18587c478bd9Sstevel@tonic-gate max_pri = max_run_pri; 18597c478bd9Sstevel@tonic-gate 18607c478bd9Sstevel@tonic-gate cpup->cpu_runrun = 1; 18617c478bd9Sstevel@tonic-gate if (max_pri >= kpreemptpri && cpup->cpu_kprunrun == 0) { 18627c478bd9Sstevel@tonic-gate cpup->cpu_kprunrun = 1; 18637c478bd9Sstevel@tonic-gate } 18647c478bd9Sstevel@tonic-gate 18657c478bd9Sstevel@tonic-gate /* 18667c478bd9Sstevel@tonic-gate * Propagate cpu_runrun, and cpu_kprunrun to global visibility. 18677c478bd9Sstevel@tonic-gate */ 18687c478bd9Sstevel@tonic-gate membar_enter(); 18697c478bd9Sstevel@tonic-gate 18707c478bd9Sstevel@tonic-gate DTRACE_SCHED1(surrender, kthread_t *, tp); 18717c478bd9Sstevel@tonic-gate 18727c478bd9Sstevel@tonic-gate /* 18737c478bd9Sstevel@tonic-gate * Make the target thread take an excursion through trap() 18747c478bd9Sstevel@tonic-gate * to do preempt() (unless we're already in trap or post_syscall, 18757c478bd9Sstevel@tonic-gate * calling cpu_surrender via CL_TRAPRET). 18767c478bd9Sstevel@tonic-gate */ 18777c478bd9Sstevel@tonic-gate if (tp != curthread || (lwp = tp->t_lwp) == NULL || 18787c478bd9Sstevel@tonic-gate lwp->lwp_state != LWP_USER) { 18797c478bd9Sstevel@tonic-gate aston(tp); 18807c478bd9Sstevel@tonic-gate if (cpup != CPU) 18817c478bd9Sstevel@tonic-gate poke_cpu(cpup->cpu_id); 18827c478bd9Sstevel@tonic-gate } 18837c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_DISP, TR_CPU_SURRENDER, 18847c478bd9Sstevel@tonic-gate "cpu_surrender:tid %p cpu %p", tp, cpup); 18857c478bd9Sstevel@tonic-gate } 18867c478bd9Sstevel@tonic-gate 18877c478bd9Sstevel@tonic-gate 18887c478bd9Sstevel@tonic-gate /* 18897c478bd9Sstevel@tonic-gate * Commit to and ratify a scheduling decision 18907c478bd9Sstevel@tonic-gate */ 18917c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 18927c478bd9Sstevel@tonic-gate static kthread_t * 18937c478bd9Sstevel@tonic-gate disp_ratify(kthread_t *tp, disp_t *kpq) 18947c478bd9Sstevel@tonic-gate { 18957c478bd9Sstevel@tonic-gate pri_t tpri, maxpri; 18967c478bd9Sstevel@tonic-gate pri_t maxkpri; 18977c478bd9Sstevel@tonic-gate cpu_t *cpup; 18987c478bd9Sstevel@tonic-gate 18997c478bd9Sstevel@tonic-gate ASSERT(tp != NULL); 19007c478bd9Sstevel@tonic-gate /* 19017c478bd9Sstevel@tonic-gate * Commit to, then ratify scheduling decision 19027c478bd9Sstevel@tonic-gate */ 19037c478bd9Sstevel@tonic-gate cpup = CPU; 19047c478bd9Sstevel@tonic-gate if (cpup->cpu_runrun != 0) 19057c478bd9Sstevel@tonic-gate cpup->cpu_runrun = 0; 19067c478bd9Sstevel@tonic-gate if (cpup->cpu_kprunrun != 0) 19077c478bd9Sstevel@tonic-gate cpup->cpu_kprunrun = 0; 19087c478bd9Sstevel@tonic-gate if (cpup->cpu_chosen_level != -1) 19097c478bd9Sstevel@tonic-gate cpup->cpu_chosen_level = -1; 19107c478bd9Sstevel@tonic-gate membar_enter(); 19117c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 19127c478bd9Sstevel@tonic-gate maxpri = cpup->cpu_disp->disp_maxrunpri; 19137c478bd9Sstevel@tonic-gate maxkpri = kpq->disp_maxrunpri; 19147c478bd9Sstevel@tonic-gate if (maxpri < maxkpri) 19157c478bd9Sstevel@tonic-gate maxpri = maxkpri; 19167c478bd9Sstevel@tonic-gate if (tpri < maxpri) { 19177c478bd9Sstevel@tonic-gate /* 19187c478bd9Sstevel@tonic-gate * should have done better 19197c478bd9Sstevel@tonic-gate * put this one back and indicate to try again 19207c478bd9Sstevel@tonic-gate */ 19217c478bd9Sstevel@tonic-gate cpup->cpu_dispthread = curthread; /* fixup dispthread */ 19227c478bd9Sstevel@tonic-gate cpup->cpu_dispatch_pri = DISP_PRIO(curthread); 19237c478bd9Sstevel@tonic-gate thread_lock_high(tp); 19247c478bd9Sstevel@tonic-gate THREAD_TRANSITION(tp); 19257c478bd9Sstevel@tonic-gate setfrontdq(tp); 19267c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(tp); 19277c478bd9Sstevel@tonic-gate 19287c478bd9Sstevel@tonic-gate tp = NULL; 19297c478bd9Sstevel@tonic-gate } 19307c478bd9Sstevel@tonic-gate return (tp); 19317c478bd9Sstevel@tonic-gate } 19327c478bd9Sstevel@tonic-gate 19337c478bd9Sstevel@tonic-gate /* 19347c478bd9Sstevel@tonic-gate * See if there is any work on the dispatcher queue for other CPUs. 19357c478bd9Sstevel@tonic-gate * If there is, dequeue the best thread and return. 19367c478bd9Sstevel@tonic-gate */ 19377c478bd9Sstevel@tonic-gate static kthread_t * 19387c478bd9Sstevel@tonic-gate disp_getwork(cpu_t *cp) 19397c478bd9Sstevel@tonic-gate { 19407c478bd9Sstevel@tonic-gate cpu_t *ocp; /* other CPU */ 19417c478bd9Sstevel@tonic-gate cpu_t *ocp_start; 19427c478bd9Sstevel@tonic-gate cpu_t *tcp; /* target local CPU */ 19437c478bd9Sstevel@tonic-gate kthread_t *tp; 1944685679f7Sakolb kthread_t *retval = NULL; 19457c478bd9Sstevel@tonic-gate pri_t maxpri; 19467c478bd9Sstevel@tonic-gate disp_t *kpq; /* kp queue for this partition */ 19477c478bd9Sstevel@tonic-gate lpl_t *lpl, *lpl_leaf; 19487c478bd9Sstevel@tonic-gate int hint, leafidx; 1949685679f7Sakolb hrtime_t stealtime; 19507c478bd9Sstevel@tonic-gate 19517c478bd9Sstevel@tonic-gate maxpri = -1; 19527c478bd9Sstevel@tonic-gate tcp = NULL; 19537c478bd9Sstevel@tonic-gate 19547c478bd9Sstevel@tonic-gate kpq = &cp->cpu_part->cp_kp_queue; 19557c478bd9Sstevel@tonic-gate while (kpq->disp_maxrunpri >= 0) { 19567c478bd9Sstevel@tonic-gate /* 19577c478bd9Sstevel@tonic-gate * Try to take a thread from the kp_queue. 19587c478bd9Sstevel@tonic-gate */ 19597c478bd9Sstevel@tonic-gate tp = (disp_getbest(kpq)); 19607c478bd9Sstevel@tonic-gate if (tp) 19617c478bd9Sstevel@tonic-gate return (disp_ratify(tp, kpq)); 19627c478bd9Sstevel@tonic-gate } 19637c478bd9Sstevel@tonic-gate 1964ab761399Sesaxe kpreempt_disable(); /* protect the cpu_active list */ 19657c478bd9Sstevel@tonic-gate 19667c478bd9Sstevel@tonic-gate /* 19677c478bd9Sstevel@tonic-gate * Try to find something to do on another CPU's run queue. 19687c478bd9Sstevel@tonic-gate * Loop through all other CPUs looking for the one with the highest 19697c478bd9Sstevel@tonic-gate * priority unbound thread. 19707c478bd9Sstevel@tonic-gate * 19717c478bd9Sstevel@tonic-gate * On NUMA machines, the partition's CPUs are consulted in order of 19727c478bd9Sstevel@tonic-gate * distance from the current CPU. This way, the first available 19737c478bd9Sstevel@tonic-gate * work found is also the closest, and will suffer the least 19747c478bd9Sstevel@tonic-gate * from being migrated. 19757c478bd9Sstevel@tonic-gate */ 19767c478bd9Sstevel@tonic-gate lpl = lpl_leaf = cp->cpu_lpl; 19777c478bd9Sstevel@tonic-gate hint = leafidx = 0; 19787c478bd9Sstevel@tonic-gate 19797c478bd9Sstevel@tonic-gate /* 19807c478bd9Sstevel@tonic-gate * This loop traverses the lpl hierarchy. Higher level lpls represent 19817c478bd9Sstevel@tonic-gate * broader levels of locality 19827c478bd9Sstevel@tonic-gate */ 19837c478bd9Sstevel@tonic-gate do { 19847c478bd9Sstevel@tonic-gate /* This loop iterates over the lpl's leaves */ 19857c478bd9Sstevel@tonic-gate do { 19867c478bd9Sstevel@tonic-gate if (lpl_leaf != cp->cpu_lpl) 19877c478bd9Sstevel@tonic-gate ocp = lpl_leaf->lpl_cpus; 19887c478bd9Sstevel@tonic-gate else 19897c478bd9Sstevel@tonic-gate ocp = cp->cpu_next_lpl; 19907c478bd9Sstevel@tonic-gate 19917c478bd9Sstevel@tonic-gate /* This loop iterates over the CPUs in the leaf */ 19927c478bd9Sstevel@tonic-gate ocp_start = ocp; 19937c478bd9Sstevel@tonic-gate do { 19947c478bd9Sstevel@tonic-gate pri_t pri; 19957c478bd9Sstevel@tonic-gate 19967c478bd9Sstevel@tonic-gate ASSERT(CPU_ACTIVE(ocp)); 19977c478bd9Sstevel@tonic-gate 19987c478bd9Sstevel@tonic-gate /* 199939bac370Sesaxe * End our stroll around this lpl if: 20007c478bd9Sstevel@tonic-gate * 20017c478bd9Sstevel@tonic-gate * - Something became runnable on the local 200239bac370Sesaxe * queue...which also ends our stroll around 200339bac370Sesaxe * the partition. 20047c478bd9Sstevel@tonic-gate * 200539bac370Sesaxe * - We happen across another idle CPU. 200639bac370Sesaxe * Since it is patrolling the next portion 200739bac370Sesaxe * of the lpl's list (assuming it's not 200839bac370Sesaxe * halted), move to the next higher level 200939bac370Sesaxe * of locality. 20107c478bd9Sstevel@tonic-gate */ 201139bac370Sesaxe if (cp->cpu_disp->disp_nrunnable != 0) { 201239bac370Sesaxe kpreempt_enable(); 201339bac370Sesaxe return (NULL); 201439bac370Sesaxe } 20157c478bd9Sstevel@tonic-gate if (ocp->cpu_dispatch_pri == -1) { 20167c478bd9Sstevel@tonic-gate if (ocp->cpu_disp_flags & 20177c478bd9Sstevel@tonic-gate CPU_DISP_HALTED) 20187c478bd9Sstevel@tonic-gate continue; 201939bac370Sesaxe else 20207c478bd9Sstevel@tonic-gate break; 20217c478bd9Sstevel@tonic-gate } 20227c478bd9Sstevel@tonic-gate 20237c478bd9Sstevel@tonic-gate /* 20247c478bd9Sstevel@tonic-gate * If there's only one thread and the CPU 20257c478bd9Sstevel@tonic-gate * is in the middle of a context switch, 20267c478bd9Sstevel@tonic-gate * or it's currently running the idle thread, 20277c478bd9Sstevel@tonic-gate * don't steal it. 20287c478bd9Sstevel@tonic-gate */ 20297c478bd9Sstevel@tonic-gate if ((ocp->cpu_disp_flags & 20307c478bd9Sstevel@tonic-gate CPU_DISP_DONTSTEAL) && 20317c478bd9Sstevel@tonic-gate ocp->cpu_disp->disp_nrunnable == 1) 20327c478bd9Sstevel@tonic-gate continue; 20337c478bd9Sstevel@tonic-gate 20347c478bd9Sstevel@tonic-gate pri = ocp->cpu_disp->disp_max_unbound_pri; 20357c478bd9Sstevel@tonic-gate if (pri > maxpri) { 2036685679f7Sakolb /* 2037685679f7Sakolb * Don't steal threads that we attempted 2038fb2f18f8Sesaxe * to steal recently until they're ready 2039fb2f18f8Sesaxe * to be stolen again. 2040685679f7Sakolb */ 2041685679f7Sakolb stealtime = ocp->cpu_disp->disp_steal; 2042685679f7Sakolb if (stealtime == 0 || 2043685679f7Sakolb stealtime - gethrtime() <= 0) { 20447c478bd9Sstevel@tonic-gate maxpri = pri; 20457c478bd9Sstevel@tonic-gate tcp = ocp; 2046685679f7Sakolb } else { 2047685679f7Sakolb /* 2048685679f7Sakolb * Don't update tcp, just set 2049685679f7Sakolb * the retval to T_DONTSTEAL, so 2050685679f7Sakolb * that if no acceptable CPUs 2051685679f7Sakolb * are found the return value 2052685679f7Sakolb * will be T_DONTSTEAL rather 2053685679f7Sakolb * then NULL. 2054685679f7Sakolb */ 2055685679f7Sakolb retval = T_DONTSTEAL; 2056685679f7Sakolb } 20577c478bd9Sstevel@tonic-gate } 20587c478bd9Sstevel@tonic-gate } while ((ocp = ocp->cpu_next_lpl) != ocp_start); 20597c478bd9Sstevel@tonic-gate 20607c478bd9Sstevel@tonic-gate if ((lpl_leaf = lpl->lpl_rset[++leafidx]) == NULL) { 20617c478bd9Sstevel@tonic-gate leafidx = 0; 20627c478bd9Sstevel@tonic-gate lpl_leaf = lpl->lpl_rset[leafidx]; 20637c478bd9Sstevel@tonic-gate } 20647c478bd9Sstevel@tonic-gate } while (leafidx != hint); 20657c478bd9Sstevel@tonic-gate 20667c478bd9Sstevel@tonic-gate hint = leafidx = lpl->lpl_hint; 20677c478bd9Sstevel@tonic-gate if ((lpl = lpl->lpl_parent) != NULL) 20687c478bd9Sstevel@tonic-gate lpl_leaf = lpl->lpl_rset[hint]; 20697c478bd9Sstevel@tonic-gate } while (!tcp && lpl); 20707c478bd9Sstevel@tonic-gate 2071ab761399Sesaxe kpreempt_enable(); 20727c478bd9Sstevel@tonic-gate 20737c478bd9Sstevel@tonic-gate /* 20747c478bd9Sstevel@tonic-gate * If another queue looks good, and there is still nothing on 20757c478bd9Sstevel@tonic-gate * the local queue, try to transfer one or more threads 20767c478bd9Sstevel@tonic-gate * from it to our queue. 20777c478bd9Sstevel@tonic-gate */ 20787c478bd9Sstevel@tonic-gate if (tcp && cp->cpu_disp->disp_nrunnable == 0) { 2079685679f7Sakolb tp = disp_getbest(tcp->cpu_disp); 2080685679f7Sakolb if (tp == NULL || tp == T_DONTSTEAL) 2081685679f7Sakolb return (tp); 20827c478bd9Sstevel@tonic-gate return (disp_ratify(tp, kpq)); 20837c478bd9Sstevel@tonic-gate } 2084685679f7Sakolb return (retval); 20857c478bd9Sstevel@tonic-gate } 20867c478bd9Sstevel@tonic-gate 20877c478bd9Sstevel@tonic-gate 20887c478bd9Sstevel@tonic-gate /* 20897c478bd9Sstevel@tonic-gate * disp_fix_unbound_pri() 20907c478bd9Sstevel@tonic-gate * Determines the maximum priority of unbound threads on the queue. 20917c478bd9Sstevel@tonic-gate * The priority is kept for the queue, but is only increased, never 20927c478bd9Sstevel@tonic-gate * reduced unless some CPU is looking for something on that queue. 20937c478bd9Sstevel@tonic-gate * 20947c478bd9Sstevel@tonic-gate * The priority argument is the known upper limit. 20957c478bd9Sstevel@tonic-gate * 20967c478bd9Sstevel@tonic-gate * Perhaps this should be kept accurately, but that probably means 20977c478bd9Sstevel@tonic-gate * separate bitmaps for bound and unbound threads. Since only idled 20987c478bd9Sstevel@tonic-gate * CPUs will have to do this recalculation, it seems better this way. 20997c478bd9Sstevel@tonic-gate */ 21007c478bd9Sstevel@tonic-gate static void 21017c478bd9Sstevel@tonic-gate disp_fix_unbound_pri(disp_t *dp, pri_t pri) 21027c478bd9Sstevel@tonic-gate { 21037c478bd9Sstevel@tonic-gate kthread_t *tp; 21047c478bd9Sstevel@tonic-gate dispq_t *dq; 21057c478bd9Sstevel@tonic-gate ulong_t *dqactmap = dp->disp_qactmap; 21067c478bd9Sstevel@tonic-gate ulong_t mapword; 21077c478bd9Sstevel@tonic-gate int wx; 21087c478bd9Sstevel@tonic-gate 21097c478bd9Sstevel@tonic-gate ASSERT(DISP_LOCK_HELD(&dp->disp_lock)); 21107c478bd9Sstevel@tonic-gate 21117c478bd9Sstevel@tonic-gate ASSERT(pri >= 0); /* checked by caller */ 21127c478bd9Sstevel@tonic-gate 21137c478bd9Sstevel@tonic-gate /* 21147c478bd9Sstevel@tonic-gate * Start the search at the next lowest priority below the supplied 21157c478bd9Sstevel@tonic-gate * priority. This depends on the bitmap implementation. 21167c478bd9Sstevel@tonic-gate */ 21177c478bd9Sstevel@tonic-gate do { 21187c478bd9Sstevel@tonic-gate wx = pri >> BT_ULSHIFT; /* index of word in map */ 21197c478bd9Sstevel@tonic-gate 21207c478bd9Sstevel@tonic-gate /* 21217c478bd9Sstevel@tonic-gate * Form mask for all lower priorities in the word. 21227c478bd9Sstevel@tonic-gate */ 21237c478bd9Sstevel@tonic-gate mapword = dqactmap[wx] & (BT_BIW(pri) - 1); 21247c478bd9Sstevel@tonic-gate 21257c478bd9Sstevel@tonic-gate /* 21267c478bd9Sstevel@tonic-gate * Get next lower active priority. 21277c478bd9Sstevel@tonic-gate */ 21287c478bd9Sstevel@tonic-gate if (mapword != 0) { 21297c478bd9Sstevel@tonic-gate pri = (wx << BT_ULSHIFT) + highbit(mapword) - 1; 21307c478bd9Sstevel@tonic-gate } else if (wx > 0) { 21317c478bd9Sstevel@tonic-gate pri = bt_gethighbit(dqactmap, wx - 1); /* sign extend */ 21327c478bd9Sstevel@tonic-gate if (pri < 0) 21337c478bd9Sstevel@tonic-gate break; 21347c478bd9Sstevel@tonic-gate } else { 21357c478bd9Sstevel@tonic-gate pri = -1; 21367c478bd9Sstevel@tonic-gate break; 21377c478bd9Sstevel@tonic-gate } 21387c478bd9Sstevel@tonic-gate 21397c478bd9Sstevel@tonic-gate /* 21407c478bd9Sstevel@tonic-gate * Search the queue for unbound, runnable threads. 21417c478bd9Sstevel@tonic-gate */ 21427c478bd9Sstevel@tonic-gate dq = &dp->disp_q[pri]; 21437c478bd9Sstevel@tonic-gate tp = dq->dq_first; 21447c478bd9Sstevel@tonic-gate 21457c478bd9Sstevel@tonic-gate while (tp && (tp->t_bound_cpu || tp->t_weakbound_cpu)) { 21467c478bd9Sstevel@tonic-gate tp = tp->t_link; 21477c478bd9Sstevel@tonic-gate } 21487c478bd9Sstevel@tonic-gate 21497c478bd9Sstevel@tonic-gate /* 21507c478bd9Sstevel@tonic-gate * If a thread was found, set the priority and return. 21517c478bd9Sstevel@tonic-gate */ 21527c478bd9Sstevel@tonic-gate } while (tp == NULL); 21537c478bd9Sstevel@tonic-gate 21547c478bd9Sstevel@tonic-gate /* 21557c478bd9Sstevel@tonic-gate * pri holds the maximum unbound thread priority or -1. 21567c478bd9Sstevel@tonic-gate */ 21577c478bd9Sstevel@tonic-gate if (dp->disp_max_unbound_pri != pri) 21587c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = pri; 21597c478bd9Sstevel@tonic-gate } 21607c478bd9Sstevel@tonic-gate 21617c478bd9Sstevel@tonic-gate /* 21627c478bd9Sstevel@tonic-gate * disp_adjust_unbound_pri() - thread is becoming unbound, so we should 21637c478bd9Sstevel@tonic-gate * check if the CPU to which is was previously bound should have 21647c478bd9Sstevel@tonic-gate * its disp_max_unbound_pri increased. 21657c478bd9Sstevel@tonic-gate */ 21667c478bd9Sstevel@tonic-gate void 21677c478bd9Sstevel@tonic-gate disp_adjust_unbound_pri(kthread_t *tp) 21687c478bd9Sstevel@tonic-gate { 21697c478bd9Sstevel@tonic-gate disp_t *dp; 21707c478bd9Sstevel@tonic-gate pri_t tpri; 21717c478bd9Sstevel@tonic-gate 21727c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 21737c478bd9Sstevel@tonic-gate 21747c478bd9Sstevel@tonic-gate /* 21757c478bd9Sstevel@tonic-gate * Don't do anything if the thread is not bound, or 21767c478bd9Sstevel@tonic-gate * currently not runnable or swapped out. 21777c478bd9Sstevel@tonic-gate */ 21787c478bd9Sstevel@tonic-gate if (tp->t_bound_cpu == NULL || 21797c478bd9Sstevel@tonic-gate tp->t_state != TS_RUN || 21807c478bd9Sstevel@tonic-gate tp->t_schedflag & TS_ON_SWAPQ) 21817c478bd9Sstevel@tonic-gate return; 21827c478bd9Sstevel@tonic-gate 21837c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 21847c478bd9Sstevel@tonic-gate dp = tp->t_bound_cpu->cpu_disp; 21857c478bd9Sstevel@tonic-gate ASSERT(tpri >= 0 && tpri < dp->disp_npri); 21867c478bd9Sstevel@tonic-gate if (tpri > dp->disp_max_unbound_pri) 21877c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = tpri; 21887c478bd9Sstevel@tonic-gate } 21897c478bd9Sstevel@tonic-gate 21907c478bd9Sstevel@tonic-gate /* 2191685679f7Sakolb * disp_getbest() 2192685679f7Sakolb * De-queue the highest priority unbound runnable thread. 2193685679f7Sakolb * Returns with the thread unlocked and onproc but at splhigh (like disp()). 2194685679f7Sakolb * Returns NULL if nothing found. 2195685679f7Sakolb * Returns T_DONTSTEAL if the thread was not stealable. 2196685679f7Sakolb * so that the caller will try again later. 21977c478bd9Sstevel@tonic-gate * 2198685679f7Sakolb * Passed a pointer to a dispatch queue not associated with this CPU, and 2199685679f7Sakolb * its type. 22007c478bd9Sstevel@tonic-gate */ 22017c478bd9Sstevel@tonic-gate static kthread_t * 22027c478bd9Sstevel@tonic-gate disp_getbest(disp_t *dp) 22037c478bd9Sstevel@tonic-gate { 22047c478bd9Sstevel@tonic-gate kthread_t *tp; 22057c478bd9Sstevel@tonic-gate dispq_t *dq; 22067c478bd9Sstevel@tonic-gate pri_t pri; 2207685679f7Sakolb cpu_t *cp, *tcp; 2208685679f7Sakolb boolean_t allbound; 22097c478bd9Sstevel@tonic-gate 22107c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock); 22117c478bd9Sstevel@tonic-gate 22127c478bd9Sstevel@tonic-gate /* 22137c478bd9Sstevel@tonic-gate * If there is nothing to run, or the CPU is in the middle of a 22147c478bd9Sstevel@tonic-gate * context switch of the only thread, return NULL. 22157c478bd9Sstevel@tonic-gate */ 2216685679f7Sakolb tcp = dp->disp_cpu; 2217685679f7Sakolb cp = CPU; 22187c478bd9Sstevel@tonic-gate pri = dp->disp_max_unbound_pri; 22197c478bd9Sstevel@tonic-gate if (pri == -1 || 2220685679f7Sakolb (tcp != NULL && (tcp->cpu_disp_flags & CPU_DISP_DONTSTEAL) && 2221685679f7Sakolb tcp->cpu_disp->disp_nrunnable == 1)) { 22227c478bd9Sstevel@tonic-gate disp_lock_exit_nopreempt(&dp->disp_lock); 22237c478bd9Sstevel@tonic-gate return (NULL); 22247c478bd9Sstevel@tonic-gate } 22257c478bd9Sstevel@tonic-gate 22267c478bd9Sstevel@tonic-gate dq = &dp->disp_q[pri]; 2227685679f7Sakolb 22287c478bd9Sstevel@tonic-gate 22297c478bd9Sstevel@tonic-gate /* 2230685679f7Sakolb * Assume that all threads are bound on this queue, and change it 2231685679f7Sakolb * later when we find out that it is not the case. 22327c478bd9Sstevel@tonic-gate */ 2233685679f7Sakolb allbound = B_TRUE; 2234685679f7Sakolb for (tp = dq->dq_first; tp != NULL; tp = tp->t_link) { 2235685679f7Sakolb hrtime_t now, nosteal, rqtime; 2236685679f7Sakolb 2237685679f7Sakolb /* 2238685679f7Sakolb * Skip over bound threads which could be here even 2239685679f7Sakolb * though disp_max_unbound_pri indicated this level. 2240685679f7Sakolb */ 2241685679f7Sakolb if (tp->t_bound_cpu || tp->t_weakbound_cpu) 2242685679f7Sakolb continue; 2243685679f7Sakolb 2244685679f7Sakolb /* 2245685679f7Sakolb * We've got some unbound threads on this queue, so turn 2246685679f7Sakolb * the allbound flag off now. 2247685679f7Sakolb */ 2248685679f7Sakolb allbound = B_FALSE; 2249685679f7Sakolb 2250685679f7Sakolb /* 2251685679f7Sakolb * The thread is a candidate for stealing from its run queue. We 2252685679f7Sakolb * don't want to steal threads that became runnable just a 2253685679f7Sakolb * moment ago. This improves CPU affinity for threads that get 2254685679f7Sakolb * preempted for short periods of time and go back on the run 2255685679f7Sakolb * queue. 2256685679f7Sakolb * 2257685679f7Sakolb * We want to let it stay on its run queue if it was only placed 2258685679f7Sakolb * there recently and it was running on the same CPU before that 2259685679f7Sakolb * to preserve its cache investment. For the thread to remain on 2260685679f7Sakolb * its run queue, ALL of the following conditions must be 2261685679f7Sakolb * satisfied: 2262685679f7Sakolb * 2263685679f7Sakolb * - the disp queue should not be the kernel preemption queue 2264685679f7Sakolb * - delayed idle stealing should not be disabled 2265685679f7Sakolb * - nosteal_nsec should be non-zero 2266685679f7Sakolb * - it should run with user priority 2267685679f7Sakolb * - it should be on the run queue of the CPU where it was 2268685679f7Sakolb * running before being placed on the run queue 2269685679f7Sakolb * - it should be the only thread on the run queue (to prevent 2270685679f7Sakolb * extra scheduling latency for other threads) 2271685679f7Sakolb * - it should sit on the run queue for less than per-chip 2272685679f7Sakolb * nosteal interval or global nosteal interval 2273685679f7Sakolb * - in case of CPUs with shared cache it should sit in a run 2274685679f7Sakolb * queue of a CPU from a different chip 2275685679f7Sakolb * 2276685679f7Sakolb * The checks are arranged so that the ones that are faster are 2277685679f7Sakolb * placed earlier. 2278685679f7Sakolb */ 2279685679f7Sakolb if (tcp == NULL || 2280685679f7Sakolb pri >= minclsyspri || 2281685679f7Sakolb tp->t_cpu != tcp) 2282685679f7Sakolb break; 2283685679f7Sakolb 2284685679f7Sakolb /* 2285fb2f18f8Sesaxe * Steal immediately if, due to CMT processor architecture 2286fb2f18f8Sesaxe * migraiton between cp and tcp would incur no performance 2287fb2f18f8Sesaxe * penalty. 2288685679f7Sakolb */ 2289fb2f18f8Sesaxe if (pg_cmt_can_migrate(cp, tcp)) 2290685679f7Sakolb break; 2291685679f7Sakolb 2292fb2f18f8Sesaxe nosteal = nosteal_nsec; 2293fb2f18f8Sesaxe if (nosteal == 0) 2294685679f7Sakolb break; 2295685679f7Sakolb 2296685679f7Sakolb /* 2297685679f7Sakolb * Calculate time spent sitting on run queue 2298685679f7Sakolb */ 2299685679f7Sakolb now = gethrtime_unscaled(); 2300685679f7Sakolb rqtime = now - tp->t_waitrq; 2301685679f7Sakolb scalehrtime(&rqtime); 2302685679f7Sakolb 2303685679f7Sakolb /* 2304685679f7Sakolb * Steal immediately if the time spent on this run queue is more 2305685679f7Sakolb * than allowed nosteal delay. 2306685679f7Sakolb * 2307685679f7Sakolb * Negative rqtime check is needed here to avoid infinite 2308685679f7Sakolb * stealing delays caused by unlikely but not impossible 2309685679f7Sakolb * drifts between CPU times on different CPUs. 2310685679f7Sakolb */ 2311685679f7Sakolb if (rqtime > nosteal || rqtime < 0) 2312685679f7Sakolb break; 2313685679f7Sakolb 2314685679f7Sakolb DTRACE_PROBE4(nosteal, kthread_t *, tp, 2315685679f7Sakolb cpu_t *, tcp, cpu_t *, cp, hrtime_t, rqtime); 2316685679f7Sakolb scalehrtime(&now); 2317685679f7Sakolb /* 2318685679f7Sakolb * Calculate when this thread becomes stealable 2319685679f7Sakolb */ 2320685679f7Sakolb now += (nosteal - rqtime); 2321685679f7Sakolb 2322685679f7Sakolb /* 2323685679f7Sakolb * Calculate time when some thread becomes stealable 2324685679f7Sakolb */ 2325685679f7Sakolb if (now < dp->disp_steal) 2326685679f7Sakolb dp->disp_steal = now; 23277c478bd9Sstevel@tonic-gate } 23287c478bd9Sstevel@tonic-gate 23297c478bd9Sstevel@tonic-gate /* 23307c478bd9Sstevel@tonic-gate * If there were no unbound threads on this queue, find the queue 2331685679f7Sakolb * where they are and then return later. The value of 2332685679f7Sakolb * disp_max_unbound_pri is not always accurate because it isn't 2333685679f7Sakolb * reduced until another idle CPU looks for work. 2334685679f7Sakolb */ 2335685679f7Sakolb if (allbound) 2336685679f7Sakolb disp_fix_unbound_pri(dp, pri); 2337685679f7Sakolb 2338685679f7Sakolb /* 2339685679f7Sakolb * If we reached the end of the queue and found no unbound threads 2340685679f7Sakolb * then return NULL so that other CPUs will be considered. If there 2341685679f7Sakolb * are unbound threads but they cannot yet be stolen, then 2342685679f7Sakolb * return T_DONTSTEAL and try again later. 23437c478bd9Sstevel@tonic-gate */ 23447c478bd9Sstevel@tonic-gate if (tp == NULL) { 23457c478bd9Sstevel@tonic-gate disp_lock_exit_nopreempt(&dp->disp_lock); 2346685679f7Sakolb return (allbound ? NULL : T_DONTSTEAL); 23477c478bd9Sstevel@tonic-gate } 23487c478bd9Sstevel@tonic-gate 23497c478bd9Sstevel@tonic-gate /* 23507c478bd9Sstevel@tonic-gate * Found a runnable, unbound thread, so remove it from queue. 23517c478bd9Sstevel@tonic-gate * dispdeq() requires that we have the thread locked, and we do, 23527c478bd9Sstevel@tonic-gate * by virtue of holding the dispatch queue lock. dispdeq() will 23537c478bd9Sstevel@tonic-gate * put the thread in transition state, thereby dropping the dispq 23547c478bd9Sstevel@tonic-gate * lock. 23557c478bd9Sstevel@tonic-gate */ 2356685679f7Sakolb 23577c478bd9Sstevel@tonic-gate #ifdef DEBUG 23587c478bd9Sstevel@tonic-gate { 23597c478bd9Sstevel@tonic-gate int thread_was_on_queue; 23607c478bd9Sstevel@tonic-gate 23617c478bd9Sstevel@tonic-gate thread_was_on_queue = dispdeq(tp); /* drops disp_lock */ 23627c478bd9Sstevel@tonic-gate ASSERT(thread_was_on_queue); 23637c478bd9Sstevel@tonic-gate } 2364685679f7Sakolb 23657c478bd9Sstevel@tonic-gate #else /* DEBUG */ 23667c478bd9Sstevel@tonic-gate (void) dispdeq(tp); /* drops disp_lock */ 23677c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 23687c478bd9Sstevel@tonic-gate 2369685679f7Sakolb /* 2370685679f7Sakolb * Reset the disp_queue steal time - we do not know what is the smallest 2371685679f7Sakolb * value across the queue is. 2372685679f7Sakolb */ 2373685679f7Sakolb dp->disp_steal = 0; 2374685679f7Sakolb 23757c478bd9Sstevel@tonic-gate tp->t_schedflag |= TS_DONT_SWAP; 23767c478bd9Sstevel@tonic-gate 23777c478bd9Sstevel@tonic-gate /* 23787c478bd9Sstevel@tonic-gate * Setup thread to run on the current CPU. 23797c478bd9Sstevel@tonic-gate */ 23807c478bd9Sstevel@tonic-gate tp->t_disp_queue = cp->cpu_disp; 23817c478bd9Sstevel@tonic-gate 23827c478bd9Sstevel@tonic-gate cp->cpu_dispthread = tp; /* protected by spl only */ 23837c478bd9Sstevel@tonic-gate cp->cpu_dispatch_pri = pri; 23840f500aa6Sbpramod 23850f500aa6Sbpramod /* 23860f500aa6Sbpramod * There can be a memory synchronization race between disp_getbest() 23870f500aa6Sbpramod * and disp_ratify() vs cpu_resched() where cpu_resched() is trying 23880f500aa6Sbpramod * to preempt the current thread to run the enqueued thread while 23890f500aa6Sbpramod * disp_getbest() and disp_ratify() are changing the current thread 23900f500aa6Sbpramod * to the stolen thread. This may lead to a situation where 23910f500aa6Sbpramod * cpu_resched() tries to preempt the wrong thread and the 23920f500aa6Sbpramod * stolen thread continues to run on the CPU which has been tagged 23930f500aa6Sbpramod * for preemption. 23940f500aa6Sbpramod * Later the clock thread gets enqueued but doesn't get to run on the 23950f500aa6Sbpramod * CPU causing the system to hang. 23960f500aa6Sbpramod * 23970f500aa6Sbpramod * To avoid this, grabbing and dropping the disp_lock (which does 23980f500aa6Sbpramod * a memory barrier) is needed to synchronize the execution of 23990f500aa6Sbpramod * cpu_resched() with disp_getbest() and disp_ratify() and 24000f500aa6Sbpramod * synchronize the memory read and written by cpu_resched(), 24010f500aa6Sbpramod * disp_getbest(), and disp_ratify() with each other. 24020f500aa6Sbpramod * (see CR#6482861 for more details). 24030f500aa6Sbpramod */ 24040f500aa6Sbpramod disp_lock_enter_high(&cp->cpu_disp->disp_lock); 24050f500aa6Sbpramod disp_lock_exit_high(&cp->cpu_disp->disp_lock); 24060f500aa6Sbpramod 24077c478bd9Sstevel@tonic-gate ASSERT(pri == DISP_PRIO(tp)); 24087c478bd9Sstevel@tonic-gate 2409685679f7Sakolb DTRACE_PROBE3(steal, kthread_t *, tp, cpu_t *, tcp, cpu_t *, cp); 2410685679f7Sakolb 24117c478bd9Sstevel@tonic-gate thread_onproc(tp, cp); /* set t_state to TS_ONPROC */ 24127c478bd9Sstevel@tonic-gate 24137c478bd9Sstevel@tonic-gate /* 24147c478bd9Sstevel@tonic-gate * Return with spl high so that swtch() won't need to raise it. 24157c478bd9Sstevel@tonic-gate * The disp_lock was dropped by dispdeq(). 24167c478bd9Sstevel@tonic-gate */ 24177c478bd9Sstevel@tonic-gate 24187c478bd9Sstevel@tonic-gate return (tp); 24197c478bd9Sstevel@tonic-gate } 24207c478bd9Sstevel@tonic-gate 24217c478bd9Sstevel@tonic-gate /* 24227c478bd9Sstevel@tonic-gate * disp_bound_common() - common routine for higher level functions 24237c478bd9Sstevel@tonic-gate * that check for bound threads under certain conditions. 24247c478bd9Sstevel@tonic-gate * If 'threadlistsafe' is set then there is no need to acquire 24257c478bd9Sstevel@tonic-gate * pidlock to stop the thread list from changing (eg, if 24267c478bd9Sstevel@tonic-gate * disp_bound_* is called with cpus paused). 24277c478bd9Sstevel@tonic-gate */ 24287c478bd9Sstevel@tonic-gate static int 24297c478bd9Sstevel@tonic-gate disp_bound_common(cpu_t *cp, int threadlistsafe, int flag) 24307c478bd9Sstevel@tonic-gate { 24317c478bd9Sstevel@tonic-gate int found = 0; 24327c478bd9Sstevel@tonic-gate kthread_t *tp; 24337c478bd9Sstevel@tonic-gate 24347c478bd9Sstevel@tonic-gate ASSERT(flag); 24357c478bd9Sstevel@tonic-gate 24367c478bd9Sstevel@tonic-gate if (!threadlistsafe) 24377c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 24387c478bd9Sstevel@tonic-gate tp = curthread; /* faster than allthreads */ 24397c478bd9Sstevel@tonic-gate do { 24407c478bd9Sstevel@tonic-gate if (tp->t_state != TS_FREE) { 24417c478bd9Sstevel@tonic-gate /* 24427c478bd9Sstevel@tonic-gate * If an interrupt thread is busy, but the 24437c478bd9Sstevel@tonic-gate * caller doesn't care (i.e. BOUND_INTR is off), 24447c478bd9Sstevel@tonic-gate * then just ignore it and continue through. 24457c478bd9Sstevel@tonic-gate */ 24467c478bd9Sstevel@tonic-gate if ((tp->t_flag & T_INTR_THREAD) && 24477c478bd9Sstevel@tonic-gate !(flag & BOUND_INTR)) 24487c478bd9Sstevel@tonic-gate continue; 24497c478bd9Sstevel@tonic-gate 24507c478bd9Sstevel@tonic-gate /* 24517c478bd9Sstevel@tonic-gate * Skip the idle thread for the CPU 24527c478bd9Sstevel@tonic-gate * we're about to set offline. 24537c478bd9Sstevel@tonic-gate */ 24547c478bd9Sstevel@tonic-gate if (tp == cp->cpu_idle_thread) 24557c478bd9Sstevel@tonic-gate continue; 24567c478bd9Sstevel@tonic-gate 24577c478bd9Sstevel@tonic-gate /* 24587c478bd9Sstevel@tonic-gate * Skip the pause thread for the CPU 24597c478bd9Sstevel@tonic-gate * we're about to set offline. 24607c478bd9Sstevel@tonic-gate */ 24617c478bd9Sstevel@tonic-gate if (tp == cp->cpu_pause_thread) 24627c478bd9Sstevel@tonic-gate continue; 24637c478bd9Sstevel@tonic-gate 24647c478bd9Sstevel@tonic-gate if ((flag & BOUND_CPU) && 24657c478bd9Sstevel@tonic-gate (tp->t_bound_cpu == cp || 24667c478bd9Sstevel@tonic-gate tp->t_bind_cpu == cp->cpu_id || 24677c478bd9Sstevel@tonic-gate tp->t_weakbound_cpu == cp)) { 24687c478bd9Sstevel@tonic-gate found = 1; 24697c478bd9Sstevel@tonic-gate break; 24707c478bd9Sstevel@tonic-gate } 24717c478bd9Sstevel@tonic-gate 24727c478bd9Sstevel@tonic-gate if ((flag & BOUND_PARTITION) && 24737c478bd9Sstevel@tonic-gate (tp->t_cpupart == cp->cpu_part)) { 24747c478bd9Sstevel@tonic-gate found = 1; 24757c478bd9Sstevel@tonic-gate break; 24767c478bd9Sstevel@tonic-gate } 24777c478bd9Sstevel@tonic-gate } 24787c478bd9Sstevel@tonic-gate } while ((tp = tp->t_next) != curthread && found == 0); 24797c478bd9Sstevel@tonic-gate if (!threadlistsafe) 24807c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 24817c478bd9Sstevel@tonic-gate return (found); 24827c478bd9Sstevel@tonic-gate } 24837c478bd9Sstevel@tonic-gate 24847c478bd9Sstevel@tonic-gate /* 24857c478bd9Sstevel@tonic-gate * disp_bound_threads - return nonzero if threads are bound to the processor. 24867c478bd9Sstevel@tonic-gate * Called infrequently. Keep this simple. 24877c478bd9Sstevel@tonic-gate * Includes threads that are asleep or stopped but not onproc. 24887c478bd9Sstevel@tonic-gate */ 24897c478bd9Sstevel@tonic-gate int 24907c478bd9Sstevel@tonic-gate disp_bound_threads(cpu_t *cp, int threadlistsafe) 24917c478bd9Sstevel@tonic-gate { 24927c478bd9Sstevel@tonic-gate return (disp_bound_common(cp, threadlistsafe, BOUND_CPU)); 24937c478bd9Sstevel@tonic-gate } 24947c478bd9Sstevel@tonic-gate 24957c478bd9Sstevel@tonic-gate /* 24967c478bd9Sstevel@tonic-gate * disp_bound_anythreads - return nonzero if _any_ threads are bound 24977c478bd9Sstevel@tonic-gate * to the given processor, including interrupt threads. 24987c478bd9Sstevel@tonic-gate */ 24997c478bd9Sstevel@tonic-gate int 25007c478bd9Sstevel@tonic-gate disp_bound_anythreads(cpu_t *cp, int threadlistsafe) 25017c478bd9Sstevel@tonic-gate { 25027c478bd9Sstevel@tonic-gate return (disp_bound_common(cp, threadlistsafe, BOUND_CPU | BOUND_INTR)); 25037c478bd9Sstevel@tonic-gate } 25047c478bd9Sstevel@tonic-gate 25057c478bd9Sstevel@tonic-gate /* 25067c478bd9Sstevel@tonic-gate * disp_bound_partition - return nonzero if threads are bound to the same 25077c478bd9Sstevel@tonic-gate * partition as the processor. 25087c478bd9Sstevel@tonic-gate * Called infrequently. Keep this simple. 25097c478bd9Sstevel@tonic-gate * Includes threads that are asleep or stopped but not onproc. 25107c478bd9Sstevel@tonic-gate */ 25117c478bd9Sstevel@tonic-gate int 25127c478bd9Sstevel@tonic-gate disp_bound_partition(cpu_t *cp, int threadlistsafe) 25137c478bd9Sstevel@tonic-gate { 25147c478bd9Sstevel@tonic-gate return (disp_bound_common(cp, threadlistsafe, BOUND_PARTITION)); 25157c478bd9Sstevel@tonic-gate } 25167c478bd9Sstevel@tonic-gate 25177c478bd9Sstevel@tonic-gate /* 25187c478bd9Sstevel@tonic-gate * disp_cpu_inactive - make a CPU inactive by moving all of its unbound 25197c478bd9Sstevel@tonic-gate * threads to other CPUs. 25207c478bd9Sstevel@tonic-gate */ 25217c478bd9Sstevel@tonic-gate void 25227c478bd9Sstevel@tonic-gate disp_cpu_inactive(cpu_t *cp) 25237c478bd9Sstevel@tonic-gate { 25247c478bd9Sstevel@tonic-gate kthread_t *tp; 25257c478bd9Sstevel@tonic-gate disp_t *dp = cp->cpu_disp; 25267c478bd9Sstevel@tonic-gate dispq_t *dq; 25277c478bd9Sstevel@tonic-gate pri_t pri; 25287c478bd9Sstevel@tonic-gate int wasonq; 25297c478bd9Sstevel@tonic-gate 25307c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock); 25317c478bd9Sstevel@tonic-gate while ((pri = dp->disp_max_unbound_pri) != -1) { 25327c478bd9Sstevel@tonic-gate dq = &dp->disp_q[pri]; 25337c478bd9Sstevel@tonic-gate tp = dq->dq_first; 25347c478bd9Sstevel@tonic-gate 25357c478bd9Sstevel@tonic-gate /* 25367c478bd9Sstevel@tonic-gate * Skip over bound threads. 25377c478bd9Sstevel@tonic-gate */ 25387c478bd9Sstevel@tonic-gate while (tp != NULL && tp->t_bound_cpu != NULL) { 25397c478bd9Sstevel@tonic-gate tp = tp->t_link; 25407c478bd9Sstevel@tonic-gate } 25417c478bd9Sstevel@tonic-gate 25427c478bd9Sstevel@tonic-gate if (tp == NULL) { 25437c478bd9Sstevel@tonic-gate /* disp_max_unbound_pri must be inaccurate, so fix it */ 25447c478bd9Sstevel@tonic-gate disp_fix_unbound_pri(dp, pri); 25457c478bd9Sstevel@tonic-gate continue; 25467c478bd9Sstevel@tonic-gate } 25477c478bd9Sstevel@tonic-gate 25487c478bd9Sstevel@tonic-gate wasonq = dispdeq(tp); /* drops disp_lock */ 25497c478bd9Sstevel@tonic-gate ASSERT(wasonq); 25507c478bd9Sstevel@tonic-gate ASSERT(tp->t_weakbound_cpu == NULL); 25517c478bd9Sstevel@tonic-gate 25527c478bd9Sstevel@tonic-gate setbackdq(tp); 25537c478bd9Sstevel@tonic-gate /* 25547c478bd9Sstevel@tonic-gate * Called from cpu_offline: 25557c478bd9Sstevel@tonic-gate * 25567c478bd9Sstevel@tonic-gate * cp has already been removed from the list of active cpus 25577c478bd9Sstevel@tonic-gate * and tp->t_cpu has been changed so there is no risk of 25587c478bd9Sstevel@tonic-gate * tp ending up back on cp. 25597c478bd9Sstevel@tonic-gate * 25607c478bd9Sstevel@tonic-gate * Called from cpupart_move_cpu: 25617c478bd9Sstevel@tonic-gate * 25627c478bd9Sstevel@tonic-gate * The cpu has moved to a new cpupart. Any threads that 25637c478bd9Sstevel@tonic-gate * were on it's dispatch queues before the move remain 25647c478bd9Sstevel@tonic-gate * in the old partition and can't run in the new partition. 25657c478bd9Sstevel@tonic-gate */ 25667c478bd9Sstevel@tonic-gate ASSERT(tp->t_cpu != cp); 25677c478bd9Sstevel@tonic-gate thread_unlock(tp); 25687c478bd9Sstevel@tonic-gate 25697c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock); 25707c478bd9Sstevel@tonic-gate } 25717c478bd9Sstevel@tonic-gate disp_lock_exit(&dp->disp_lock); 25727c478bd9Sstevel@tonic-gate } 25737c478bd9Sstevel@tonic-gate 25747c478bd9Sstevel@tonic-gate /* 25757c478bd9Sstevel@tonic-gate * disp_lowpri_cpu - find CPU running the lowest priority thread. 25767c478bd9Sstevel@tonic-gate * The hint passed in is used as a starting point so we don't favor 25777c478bd9Sstevel@tonic-gate * CPU 0 or any other CPU. The caller should pass in the most recently 25787c478bd9Sstevel@tonic-gate * used CPU for the thread. 25797c478bd9Sstevel@tonic-gate * 25807c478bd9Sstevel@tonic-gate * The lgroup and priority are used to determine the best CPU to run on 25817c478bd9Sstevel@tonic-gate * in a NUMA machine. The lgroup specifies which CPUs are closest while 25827c478bd9Sstevel@tonic-gate * the thread priority will indicate whether the thread will actually run 25837c478bd9Sstevel@tonic-gate * there. To pick the best CPU, the CPUs inside and outside of the given 25847c478bd9Sstevel@tonic-gate * lgroup which are running the lowest priority threads are found. The 25857c478bd9Sstevel@tonic-gate * remote CPU is chosen only if the thread will not run locally on a CPU 25867c478bd9Sstevel@tonic-gate * within the lgroup, but will run on the remote CPU. If the thread 25877c478bd9Sstevel@tonic-gate * cannot immediately run on any CPU, the best local CPU will be chosen. 25887c478bd9Sstevel@tonic-gate * 25897c478bd9Sstevel@tonic-gate * The lpl specified also identifies the cpu partition from which 25907c478bd9Sstevel@tonic-gate * disp_lowpri_cpu should select a CPU. 25917c478bd9Sstevel@tonic-gate * 25927c478bd9Sstevel@tonic-gate * curcpu is used to indicate that disp_lowpri_cpu is being called on 25937c478bd9Sstevel@tonic-gate * behalf of the current thread. (curthread is looking for a new cpu) 25947c478bd9Sstevel@tonic-gate * In this case, cpu_dispatch_pri for this thread's cpu should be 25957c478bd9Sstevel@tonic-gate * ignored. 25967c478bd9Sstevel@tonic-gate * 25977c478bd9Sstevel@tonic-gate * If a cpu is the target of an offline request then try to avoid it. 25987c478bd9Sstevel@tonic-gate * 25997c478bd9Sstevel@tonic-gate * This function must be called at either high SPL, or with preemption 26007c478bd9Sstevel@tonic-gate * disabled, so that the "hint" CPU cannot be removed from the online 26017c478bd9Sstevel@tonic-gate * CPU list while we are traversing it. 26027c478bd9Sstevel@tonic-gate */ 26037c478bd9Sstevel@tonic-gate cpu_t * 26047c478bd9Sstevel@tonic-gate disp_lowpri_cpu(cpu_t *hint, lpl_t *lpl, pri_t tpri, cpu_t *curcpu) 26057c478bd9Sstevel@tonic-gate { 26067c478bd9Sstevel@tonic-gate cpu_t *bestcpu; 26077c478bd9Sstevel@tonic-gate cpu_t *besthomecpu; 26087c478bd9Sstevel@tonic-gate cpu_t *cp, *cpstart; 26097c478bd9Sstevel@tonic-gate 26107c478bd9Sstevel@tonic-gate pri_t bestpri; 26117c478bd9Sstevel@tonic-gate pri_t cpupri; 26127c478bd9Sstevel@tonic-gate 26137c478bd9Sstevel@tonic-gate klgrpset_t done; 26147c478bd9Sstevel@tonic-gate klgrpset_t cur_set; 26157c478bd9Sstevel@tonic-gate 26167c478bd9Sstevel@tonic-gate lpl_t *lpl_iter, *lpl_leaf; 26177c478bd9Sstevel@tonic-gate int i; 26187c478bd9Sstevel@tonic-gate 26197c478bd9Sstevel@tonic-gate /* 26207c478bd9Sstevel@tonic-gate * Scan for a CPU currently running the lowest priority thread. 26217c478bd9Sstevel@tonic-gate * Cannot get cpu_lock here because it is adaptive. 26227c478bd9Sstevel@tonic-gate * We do not require lock on CPU list. 26237c478bd9Sstevel@tonic-gate */ 26247c478bd9Sstevel@tonic-gate ASSERT(hint != NULL); 26257c478bd9Sstevel@tonic-gate ASSERT(lpl != NULL); 26267c478bd9Sstevel@tonic-gate ASSERT(lpl->lpl_ncpu > 0); 26277c478bd9Sstevel@tonic-gate 26287c478bd9Sstevel@tonic-gate /* 26297c478bd9Sstevel@tonic-gate * First examine local CPUs. Note that it's possible the hint CPU 26307c478bd9Sstevel@tonic-gate * passed in in remote to the specified home lgroup. If our priority 26317c478bd9Sstevel@tonic-gate * isn't sufficient enough such that we can run immediately at home, 26327c478bd9Sstevel@tonic-gate * then examine CPUs remote to our home lgroup. 26337c478bd9Sstevel@tonic-gate * We would like to give preference to CPUs closest to "home". 26347c478bd9Sstevel@tonic-gate * If we can't find a CPU where we'll run at a given level 26357c478bd9Sstevel@tonic-gate * of locality, we expand our search to include the next level. 26367c478bd9Sstevel@tonic-gate */ 26377c478bd9Sstevel@tonic-gate bestcpu = besthomecpu = NULL; 26387c478bd9Sstevel@tonic-gate klgrpset_clear(done); 26397c478bd9Sstevel@tonic-gate /* start with lpl we were passed */ 26407c478bd9Sstevel@tonic-gate 26417c478bd9Sstevel@tonic-gate lpl_iter = lpl; 26427c478bd9Sstevel@tonic-gate 26437c478bd9Sstevel@tonic-gate do { 26447c478bd9Sstevel@tonic-gate 26457c478bd9Sstevel@tonic-gate bestpri = SHRT_MAX; 26467c478bd9Sstevel@tonic-gate klgrpset_clear(cur_set); 26477c478bd9Sstevel@tonic-gate 26487c478bd9Sstevel@tonic-gate for (i = 0; i < lpl_iter->lpl_nrset; i++) { 26497c478bd9Sstevel@tonic-gate lpl_leaf = lpl_iter->lpl_rset[i]; 26507c478bd9Sstevel@tonic-gate if (klgrpset_ismember(done, lpl_leaf->lpl_lgrpid)) 26517c478bd9Sstevel@tonic-gate continue; 26527c478bd9Sstevel@tonic-gate 26537c478bd9Sstevel@tonic-gate klgrpset_add(cur_set, lpl_leaf->lpl_lgrpid); 26547c478bd9Sstevel@tonic-gate 26557c478bd9Sstevel@tonic-gate if (hint->cpu_lpl == lpl_leaf) 26567c478bd9Sstevel@tonic-gate cp = cpstart = hint; 26577c478bd9Sstevel@tonic-gate else 26587c478bd9Sstevel@tonic-gate cp = cpstart = lpl_leaf->lpl_cpus; 26597c478bd9Sstevel@tonic-gate 26607c478bd9Sstevel@tonic-gate do { 26617c478bd9Sstevel@tonic-gate if (cp == curcpu) 26627c478bd9Sstevel@tonic-gate cpupri = -1; 26637c478bd9Sstevel@tonic-gate else if (cp == cpu_inmotion) 26647c478bd9Sstevel@tonic-gate cpupri = SHRT_MAX; 26657c478bd9Sstevel@tonic-gate else 26667c478bd9Sstevel@tonic-gate cpupri = cp->cpu_dispatch_pri; 26677c478bd9Sstevel@tonic-gate if (cp->cpu_disp->disp_maxrunpri > cpupri) 26687c478bd9Sstevel@tonic-gate cpupri = cp->cpu_disp->disp_maxrunpri; 26697c478bd9Sstevel@tonic-gate if (cp->cpu_chosen_level > cpupri) 26707c478bd9Sstevel@tonic-gate cpupri = cp->cpu_chosen_level; 26717c478bd9Sstevel@tonic-gate if (cpupri < bestpri) { 26727c478bd9Sstevel@tonic-gate if (CPU_IDLING(cpupri)) { 26737c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & 26747c478bd9Sstevel@tonic-gate CPU_QUIESCED) == 0); 26757c478bd9Sstevel@tonic-gate return (cp); 26767c478bd9Sstevel@tonic-gate } 26777c478bd9Sstevel@tonic-gate bestcpu = cp; 26787c478bd9Sstevel@tonic-gate bestpri = cpupri; 26797c478bd9Sstevel@tonic-gate } 26807c478bd9Sstevel@tonic-gate } while ((cp = cp->cpu_next_lpl) != cpstart); 26817c478bd9Sstevel@tonic-gate } 26827c478bd9Sstevel@tonic-gate 26837c478bd9Sstevel@tonic-gate if (bestcpu && (tpri > bestpri)) { 26847c478bd9Sstevel@tonic-gate ASSERT((bestcpu->cpu_flags & CPU_QUIESCED) == 0); 26857c478bd9Sstevel@tonic-gate return (bestcpu); 26867c478bd9Sstevel@tonic-gate } 26877c478bd9Sstevel@tonic-gate if (besthomecpu == NULL) 26887c478bd9Sstevel@tonic-gate besthomecpu = bestcpu; 26897c478bd9Sstevel@tonic-gate /* 26907c478bd9Sstevel@tonic-gate * Add the lgrps we just considered to the "done" set 26917c478bd9Sstevel@tonic-gate */ 26927c478bd9Sstevel@tonic-gate klgrpset_or(done, cur_set); 26937c478bd9Sstevel@tonic-gate 26947c478bd9Sstevel@tonic-gate } while ((lpl_iter = lpl_iter->lpl_parent) != NULL); 26957c478bd9Sstevel@tonic-gate 26967c478bd9Sstevel@tonic-gate /* 26977c478bd9Sstevel@tonic-gate * The specified priority isn't high enough to run immediately 26987c478bd9Sstevel@tonic-gate * anywhere, so just return the best CPU from the home lgroup. 26997c478bd9Sstevel@tonic-gate */ 27007c478bd9Sstevel@tonic-gate ASSERT((besthomecpu->cpu_flags & CPU_QUIESCED) == 0); 27017c478bd9Sstevel@tonic-gate return (besthomecpu); 27027c478bd9Sstevel@tonic-gate } 27037c478bd9Sstevel@tonic-gate 27047c478bd9Sstevel@tonic-gate /* 27057c478bd9Sstevel@tonic-gate * This routine provides the generic idle cpu function for all processors. 27067c478bd9Sstevel@tonic-gate * If a processor has some specific code to execute when idle (say, to stop 27077c478bd9Sstevel@tonic-gate * the pipeline and save power) then that routine should be defined in the 27087c478bd9Sstevel@tonic-gate * processors specific code (module_xx.c) and the global variable idle_cpu 27097c478bd9Sstevel@tonic-gate * set to that function. 27107c478bd9Sstevel@tonic-gate */ 27117c478bd9Sstevel@tonic-gate static void 27127c478bd9Sstevel@tonic-gate generic_idle_cpu(void) 27137c478bd9Sstevel@tonic-gate { 27147c478bd9Sstevel@tonic-gate } 27157c478bd9Sstevel@tonic-gate 27167c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 27177c478bd9Sstevel@tonic-gate static void 27187c478bd9Sstevel@tonic-gate generic_enq_thread(cpu_t *cpu, int bound) 27197c478bd9Sstevel@tonic-gate { 27207c478bd9Sstevel@tonic-gate } 27217c478bd9Sstevel@tonic-gate 27227c478bd9Sstevel@tonic-gate /* 27237c478bd9Sstevel@tonic-gate * Select a CPU for this thread to run on. Choose t->t_cpu unless: 27247c478bd9Sstevel@tonic-gate * - t->t_cpu is not in this thread's assigned lgrp 27257c478bd9Sstevel@tonic-gate * - the time since the thread last came off t->t_cpu exceeds the 27267c478bd9Sstevel@tonic-gate * rechoose time for this cpu (ignore this if t is curthread in 27277c478bd9Sstevel@tonic-gate * which case it's on CPU and t->t_disp_time is inaccurate) 27287c478bd9Sstevel@tonic-gate * - t->t_cpu is presently the target of an offline or partition move 27297c478bd9Sstevel@tonic-gate * request 27307c478bd9Sstevel@tonic-gate */ 27317c478bd9Sstevel@tonic-gate static cpu_t * 27327c478bd9Sstevel@tonic-gate cpu_choose(kthread_t *t, pri_t tpri) 27337c478bd9Sstevel@tonic-gate { 27347c478bd9Sstevel@tonic-gate ASSERT(tpri < kpqpri); 27357c478bd9Sstevel@tonic-gate 2736fb2f18f8Sesaxe if ((((lbolt - t->t_disp_time) > rechoose_interval) && 27377c478bd9Sstevel@tonic-gate t != curthread) || t->t_cpu == cpu_inmotion) { 27387c478bd9Sstevel@tonic-gate return (disp_lowpri_cpu(t->t_cpu, t->t_lpl, tpri, NULL)); 27397c478bd9Sstevel@tonic-gate } 27407c478bd9Sstevel@tonic-gate 27417c478bd9Sstevel@tonic-gate /* 27427c478bd9Sstevel@tonic-gate * Take a trip through disp_lowpri_cpu() if the thread was 27437c478bd9Sstevel@tonic-gate * running outside it's home lgroup 27447c478bd9Sstevel@tonic-gate */ 27457c478bd9Sstevel@tonic-gate if (!klgrpset_ismember(t->t_lpl->lpl_lgrp->lgrp_set[LGRP_RSRC_CPU], 27467c478bd9Sstevel@tonic-gate t->t_cpu->cpu_lpl->lpl_lgrpid)) { 27477c478bd9Sstevel@tonic-gate return (disp_lowpri_cpu(t->t_cpu, t->t_lpl, tpri, 27487c478bd9Sstevel@tonic-gate (t == curthread) ? t->t_cpu : NULL)); 27497c478bd9Sstevel@tonic-gate } 27507c478bd9Sstevel@tonic-gate return (t->t_cpu); 27517c478bd9Sstevel@tonic-gate } 2752