17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5ab761399Sesaxe * Common Development and Distribution License (the "License"). 6ab761399Sesaxe * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22f2bd4627Sjohansen * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 277c478bd9Sstevel@tonic-gate /* All Rights Reserved */ 287c478bd9Sstevel@tonic-gate 297c478bd9Sstevel@tonic-gate 307c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" /* from SVr4.0 1.30 */ 317c478bd9Sstevel@tonic-gate 327c478bd9Sstevel@tonic-gate #include <sys/types.h> 337c478bd9Sstevel@tonic-gate #include <sys/param.h> 347c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 357c478bd9Sstevel@tonic-gate #include <sys/signal.h> 367c478bd9Sstevel@tonic-gate #include <sys/user.h> 377c478bd9Sstevel@tonic-gate #include <sys/systm.h> 387c478bd9Sstevel@tonic-gate #include <sys/sysinfo.h> 397c478bd9Sstevel@tonic-gate #include <sys/var.h> 407c478bd9Sstevel@tonic-gate #include <sys/errno.h> 417c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 427c478bd9Sstevel@tonic-gate #include <sys/debug.h> 437c478bd9Sstevel@tonic-gate #include <sys/inline.h> 447c478bd9Sstevel@tonic-gate #include <sys/disp.h> 457c478bd9Sstevel@tonic-gate #include <sys/class.h> 467c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 477c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 487c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 497c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 507c478bd9Sstevel@tonic-gate #include <sys/tnf.h> 517c478bd9Sstevel@tonic-gate #include <sys/cpupart.h> 527c478bd9Sstevel@tonic-gate #include <sys/lgrp.h> 53fb2f18f8Sesaxe #include <sys/pg.h> 54fb2f18f8Sesaxe #include <sys/cmt.h> 55fb2f18f8Sesaxe #include <sys/bitset.h> 567c478bd9Sstevel@tonic-gate #include <sys/schedctl.h> 577c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 587c478bd9Sstevel@tonic-gate #include <sys/dtrace.h> 597c478bd9Sstevel@tonic-gate #include <sys/sdt.h> 607c478bd9Sstevel@tonic-gate 617c478bd9Sstevel@tonic-gate #include <vm/as.h> 627c478bd9Sstevel@tonic-gate 637c478bd9Sstevel@tonic-gate #define BOUND_CPU 0x1 647c478bd9Sstevel@tonic-gate #define BOUND_PARTITION 0x2 657c478bd9Sstevel@tonic-gate #define BOUND_INTR 0x4 667c478bd9Sstevel@tonic-gate 677c478bd9Sstevel@tonic-gate /* Dispatch queue allocation structure and functions */ 687c478bd9Sstevel@tonic-gate struct disp_queue_info { 697c478bd9Sstevel@tonic-gate disp_t *dp; 707c478bd9Sstevel@tonic-gate dispq_t *olddispq; 717c478bd9Sstevel@tonic-gate dispq_t *newdispq; 727c478bd9Sstevel@tonic-gate ulong_t *olddqactmap; 737c478bd9Sstevel@tonic-gate ulong_t *newdqactmap; 747c478bd9Sstevel@tonic-gate int oldnglobpris; 757c478bd9Sstevel@tonic-gate }; 767c478bd9Sstevel@tonic-gate static void disp_dq_alloc(struct disp_queue_info *dptr, int numpris, 777c478bd9Sstevel@tonic-gate disp_t *dp); 787c478bd9Sstevel@tonic-gate static void disp_dq_assign(struct disp_queue_info *dptr, int numpris); 797c478bd9Sstevel@tonic-gate static void disp_dq_free(struct disp_queue_info *dptr); 807c478bd9Sstevel@tonic-gate 817c478bd9Sstevel@tonic-gate /* platform-specific routine to call when processor is idle */ 827c478bd9Sstevel@tonic-gate static void generic_idle_cpu(); 837c478bd9Sstevel@tonic-gate void (*idle_cpu)() = generic_idle_cpu; 847c478bd9Sstevel@tonic-gate 857c478bd9Sstevel@tonic-gate /* routines invoked when a CPU enters/exits the idle loop */ 867c478bd9Sstevel@tonic-gate static void idle_enter(); 877c478bd9Sstevel@tonic-gate static void idle_exit(); 887c478bd9Sstevel@tonic-gate 897c478bd9Sstevel@tonic-gate /* platform-specific routine to call when thread is enqueued */ 907c478bd9Sstevel@tonic-gate static void generic_enq_thread(cpu_t *, int); 917c478bd9Sstevel@tonic-gate void (*disp_enq_thread)(cpu_t *, int) = generic_enq_thread; 927c478bd9Sstevel@tonic-gate 937c478bd9Sstevel@tonic-gate pri_t kpreemptpri; /* priority where kernel preemption applies */ 947c478bd9Sstevel@tonic-gate pri_t upreemptpri = 0; /* priority where normal preemption applies */ 957c478bd9Sstevel@tonic-gate pri_t intr_pri; /* interrupt thread priority base level */ 967c478bd9Sstevel@tonic-gate 97685679f7Sakolb #define KPQPRI -1 /* pri where cpu affinity is dropped for kpq */ 987c478bd9Sstevel@tonic-gate pri_t kpqpri = KPQPRI; /* can be set in /etc/system */ 997c478bd9Sstevel@tonic-gate disp_t cpu0_disp; /* boot CPU's dispatch queue */ 1007c478bd9Sstevel@tonic-gate disp_lock_t swapped_lock; /* lock swapped threads and swap queue */ 1017c478bd9Sstevel@tonic-gate int nswapped; /* total number of swapped threads */ 1027c478bd9Sstevel@tonic-gate void disp_swapped_enq(kthread_t *tp); 1037c478bd9Sstevel@tonic-gate static void disp_swapped_setrun(kthread_t *tp); 1047c478bd9Sstevel@tonic-gate static void cpu_resched(cpu_t *cp, pri_t tpri); 1057c478bd9Sstevel@tonic-gate 1067c478bd9Sstevel@tonic-gate /* 1077c478bd9Sstevel@tonic-gate * If this is set, only interrupt threads will cause kernel preemptions. 1087c478bd9Sstevel@tonic-gate * This is done by changing the value of kpreemptpri. kpreemptpri 1097c478bd9Sstevel@tonic-gate * will either be the max sysclass pri + 1 or the min interrupt pri. 1107c478bd9Sstevel@tonic-gate */ 1117c478bd9Sstevel@tonic-gate int only_intr_kpreempt; 1127c478bd9Sstevel@tonic-gate 1137c478bd9Sstevel@tonic-gate extern void set_idle_cpu(int cpun); 1147c478bd9Sstevel@tonic-gate extern void unset_idle_cpu(int cpun); 1157c478bd9Sstevel@tonic-gate static void setkpdq(kthread_t *tp, int borf); 1167c478bd9Sstevel@tonic-gate #define SETKP_BACK 0 1177c478bd9Sstevel@tonic-gate #define SETKP_FRONT 1 1187c478bd9Sstevel@tonic-gate /* 1197c478bd9Sstevel@tonic-gate * Parameter that determines how recently a thread must have run 1207c478bd9Sstevel@tonic-gate * on the CPU to be considered loosely-bound to that CPU to reduce 1217c478bd9Sstevel@tonic-gate * cold cache effects. The interval is in hertz. 1227c478bd9Sstevel@tonic-gate */ 123fb2f18f8Sesaxe #define RECHOOSE_INTERVAL 3 1247c478bd9Sstevel@tonic-gate int rechoose_interval = RECHOOSE_INTERVAL; 1257c478bd9Sstevel@tonic-gate static cpu_t *cpu_choose(kthread_t *, pri_t); 1267c478bd9Sstevel@tonic-gate 127685679f7Sakolb /* 128685679f7Sakolb * Parameter that determines how long (in nanoseconds) a thread must 129685679f7Sakolb * be sitting on a run queue before it can be stolen by another CPU 130685679f7Sakolb * to reduce migrations. The interval is in nanoseconds. 131685679f7Sakolb * 132685679f7Sakolb * The nosteal_nsec should be set by a platform code to an appropriate value. 133fb2f18f8Sesaxe * Setting it to 0 effectively disables the nosteal 'protection' 134685679f7Sakolb */ 135fb2f18f8Sesaxe hrtime_t nosteal_nsec = -1; 136685679f7Sakolb 1377c478bd9Sstevel@tonic-gate id_t defaultcid; /* system "default" class; see dispadmin(1M) */ 1387c478bd9Sstevel@tonic-gate 1397c478bd9Sstevel@tonic-gate disp_lock_t transition_lock; /* lock on transitioning threads */ 1407c478bd9Sstevel@tonic-gate disp_lock_t stop_lock; /* lock on stopped threads */ 1417c478bd9Sstevel@tonic-gate 1427c478bd9Sstevel@tonic-gate static void cpu_dispqalloc(int numpris); 1437c478bd9Sstevel@tonic-gate 144685679f7Sakolb /* 145685679f7Sakolb * This gets returned by disp_getwork/disp_getbest if we couldn't steal 146685679f7Sakolb * a thread because it was sitting on its run queue for a very short 147685679f7Sakolb * period of time. 148685679f7Sakolb */ 149685679f7Sakolb #define T_DONTSTEAL (kthread_t *)(-1) /* returned by disp_getwork/getbest */ 150685679f7Sakolb 1517c478bd9Sstevel@tonic-gate static kthread_t *disp_getwork(cpu_t *to); 1527c478bd9Sstevel@tonic-gate static kthread_t *disp_getbest(disp_t *from); 1537c478bd9Sstevel@tonic-gate static kthread_t *disp_ratify(kthread_t *tp, disp_t *kpq); 1547c478bd9Sstevel@tonic-gate 1557c478bd9Sstevel@tonic-gate void swtch_to(kthread_t *); 1567c478bd9Sstevel@tonic-gate 1577c478bd9Sstevel@tonic-gate /* 1587c478bd9Sstevel@tonic-gate * dispatcher and scheduler initialization 1597c478bd9Sstevel@tonic-gate */ 1607c478bd9Sstevel@tonic-gate 1617c478bd9Sstevel@tonic-gate /* 1627c478bd9Sstevel@tonic-gate * disp_setup - Common code to calculate and allocate dispatcher 1637c478bd9Sstevel@tonic-gate * variables and structures based on the maximum priority. 1647c478bd9Sstevel@tonic-gate */ 1657c478bd9Sstevel@tonic-gate static void 1667c478bd9Sstevel@tonic-gate disp_setup(pri_t maxglobpri, pri_t oldnglobpris) 1677c478bd9Sstevel@tonic-gate { 1687c478bd9Sstevel@tonic-gate pri_t newnglobpris; 1697c478bd9Sstevel@tonic-gate 1707c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 1717c478bd9Sstevel@tonic-gate 1727c478bd9Sstevel@tonic-gate newnglobpris = maxglobpri + 1 + LOCK_LEVEL; 1737c478bd9Sstevel@tonic-gate 1747c478bd9Sstevel@tonic-gate if (newnglobpris > oldnglobpris) { 1757c478bd9Sstevel@tonic-gate /* 1767c478bd9Sstevel@tonic-gate * Allocate new kp queues for each CPU partition. 1777c478bd9Sstevel@tonic-gate */ 1787c478bd9Sstevel@tonic-gate cpupart_kpqalloc(newnglobpris); 1797c478bd9Sstevel@tonic-gate 1807c478bd9Sstevel@tonic-gate /* 1817c478bd9Sstevel@tonic-gate * Allocate new dispatch queues for each CPU. 1827c478bd9Sstevel@tonic-gate */ 1837c478bd9Sstevel@tonic-gate cpu_dispqalloc(newnglobpris); 1847c478bd9Sstevel@tonic-gate 1857c478bd9Sstevel@tonic-gate /* 1867c478bd9Sstevel@tonic-gate * compute new interrupt thread base priority 1877c478bd9Sstevel@tonic-gate */ 1887c478bd9Sstevel@tonic-gate intr_pri = maxglobpri; 1897c478bd9Sstevel@tonic-gate if (only_intr_kpreempt) { 1907c478bd9Sstevel@tonic-gate kpreemptpri = intr_pri + 1; 1917c478bd9Sstevel@tonic-gate if (kpqpri == KPQPRI) 1927c478bd9Sstevel@tonic-gate kpqpri = kpreemptpri; 1937c478bd9Sstevel@tonic-gate } 1947c478bd9Sstevel@tonic-gate v.v_nglobpris = newnglobpris; 1957c478bd9Sstevel@tonic-gate } 1967c478bd9Sstevel@tonic-gate } 1977c478bd9Sstevel@tonic-gate 1987c478bd9Sstevel@tonic-gate /* 1997c478bd9Sstevel@tonic-gate * dispinit - Called to initialize all loaded classes and the 2007c478bd9Sstevel@tonic-gate * dispatcher framework. 2017c478bd9Sstevel@tonic-gate */ 2027c478bd9Sstevel@tonic-gate void 2037c478bd9Sstevel@tonic-gate dispinit(void) 2047c478bd9Sstevel@tonic-gate { 2057c478bd9Sstevel@tonic-gate id_t cid; 2067c478bd9Sstevel@tonic-gate pri_t maxglobpri; 2077c478bd9Sstevel@tonic-gate pri_t cl_maxglobpri; 2087c478bd9Sstevel@tonic-gate 2097c478bd9Sstevel@tonic-gate maxglobpri = -1; 2107c478bd9Sstevel@tonic-gate 2117c478bd9Sstevel@tonic-gate /* 2127c478bd9Sstevel@tonic-gate * Initialize transition lock, which will always be set. 2137c478bd9Sstevel@tonic-gate */ 2147c478bd9Sstevel@tonic-gate DISP_LOCK_INIT(&transition_lock); 2157c478bd9Sstevel@tonic-gate disp_lock_enter_high(&transition_lock); 2167c478bd9Sstevel@tonic-gate DISP_LOCK_INIT(&stop_lock); 2177c478bd9Sstevel@tonic-gate 2187c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 2197c478bd9Sstevel@tonic-gate CPU->cpu_disp->disp_maxrunpri = -1; 2207c478bd9Sstevel@tonic-gate CPU->cpu_disp->disp_max_unbound_pri = -1; 221fb2f18f8Sesaxe 2227c478bd9Sstevel@tonic-gate /* 2237c478bd9Sstevel@tonic-gate * Initialize the default CPU partition. 2247c478bd9Sstevel@tonic-gate */ 2257c478bd9Sstevel@tonic-gate cpupart_initialize_default(); 2267c478bd9Sstevel@tonic-gate /* 2277c478bd9Sstevel@tonic-gate * Call the class specific initialization functions for 2287c478bd9Sstevel@tonic-gate * all pre-installed schedulers. 2297c478bd9Sstevel@tonic-gate * 2307c478bd9Sstevel@tonic-gate * We pass the size of a class specific parameter 2317c478bd9Sstevel@tonic-gate * buffer to each of the initialization functions 2327c478bd9Sstevel@tonic-gate * to try to catch problems with backward compatibility 2337c478bd9Sstevel@tonic-gate * of class modules. 2347c478bd9Sstevel@tonic-gate * 2357c478bd9Sstevel@tonic-gate * For example a new class module running on an old system 2367c478bd9Sstevel@tonic-gate * which didn't provide sufficiently large parameter buffers 2377c478bd9Sstevel@tonic-gate * would be bad news. Class initialization modules can check for 2387c478bd9Sstevel@tonic-gate * this and take action if they detect a problem. 2397c478bd9Sstevel@tonic-gate */ 2407c478bd9Sstevel@tonic-gate 2417c478bd9Sstevel@tonic-gate for (cid = 0; cid < nclass; cid++) { 2427c478bd9Sstevel@tonic-gate sclass_t *sc; 2437c478bd9Sstevel@tonic-gate 2447c478bd9Sstevel@tonic-gate sc = &sclass[cid]; 2457c478bd9Sstevel@tonic-gate if (SCHED_INSTALLED(sc)) { 2467c478bd9Sstevel@tonic-gate cl_maxglobpri = sc->cl_init(cid, PC_CLPARMSZ, 2477c478bd9Sstevel@tonic-gate &sc->cl_funcs); 2487c478bd9Sstevel@tonic-gate if (cl_maxglobpri > maxglobpri) 2497c478bd9Sstevel@tonic-gate maxglobpri = cl_maxglobpri; 2507c478bd9Sstevel@tonic-gate } 2517c478bd9Sstevel@tonic-gate } 2527c478bd9Sstevel@tonic-gate kpreemptpri = (pri_t)v.v_maxsyspri + 1; 2537c478bd9Sstevel@tonic-gate if (kpqpri == KPQPRI) 2547c478bd9Sstevel@tonic-gate kpqpri = kpreemptpri; 2557c478bd9Sstevel@tonic-gate 2567c478bd9Sstevel@tonic-gate ASSERT(maxglobpri >= 0); 2577c478bd9Sstevel@tonic-gate disp_setup(maxglobpri, 0); 2587c478bd9Sstevel@tonic-gate 2597c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 2607c478bd9Sstevel@tonic-gate 2617c478bd9Sstevel@tonic-gate /* 2627c478bd9Sstevel@tonic-gate * Get the default class ID; this may be later modified via 2637c478bd9Sstevel@tonic-gate * dispadmin(1M). This will load the class (normally TS) and that will 2647c478bd9Sstevel@tonic-gate * call disp_add(), which is why we had to drop cpu_lock first. 2657c478bd9Sstevel@tonic-gate */ 2667c478bd9Sstevel@tonic-gate if (getcid(defaultclass, &defaultcid) != 0) { 2677c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "Couldn't load default scheduling class '%s'", 2687c478bd9Sstevel@tonic-gate defaultclass); 2697c478bd9Sstevel@tonic-gate } 2707c478bd9Sstevel@tonic-gate } 2717c478bd9Sstevel@tonic-gate 2727c478bd9Sstevel@tonic-gate /* 2737c478bd9Sstevel@tonic-gate * disp_add - Called with class pointer to initialize the dispatcher 2747c478bd9Sstevel@tonic-gate * for a newly loaded class. 2757c478bd9Sstevel@tonic-gate */ 2767c478bd9Sstevel@tonic-gate void 2777c478bd9Sstevel@tonic-gate disp_add(sclass_t *clp) 2787c478bd9Sstevel@tonic-gate { 2797c478bd9Sstevel@tonic-gate pri_t maxglobpri; 2807c478bd9Sstevel@tonic-gate pri_t cl_maxglobpri; 2817c478bd9Sstevel@tonic-gate 2827c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 2837c478bd9Sstevel@tonic-gate /* 2847c478bd9Sstevel@tonic-gate * Initialize the scheduler class. 2857c478bd9Sstevel@tonic-gate */ 2867c478bd9Sstevel@tonic-gate maxglobpri = (pri_t)(v.v_nglobpris - LOCK_LEVEL - 1); 2877c478bd9Sstevel@tonic-gate cl_maxglobpri = clp->cl_init(clp - sclass, PC_CLPARMSZ, &clp->cl_funcs); 2887c478bd9Sstevel@tonic-gate if (cl_maxglobpri > maxglobpri) 2897c478bd9Sstevel@tonic-gate maxglobpri = cl_maxglobpri; 2907c478bd9Sstevel@tonic-gate 2917c478bd9Sstevel@tonic-gate /* 2927c478bd9Sstevel@tonic-gate * Save old queue information. Since we're initializing a 2937c478bd9Sstevel@tonic-gate * new scheduling class which has just been loaded, then 2947c478bd9Sstevel@tonic-gate * the size of the dispq may have changed. We need to handle 2957c478bd9Sstevel@tonic-gate * that here. 2967c478bd9Sstevel@tonic-gate */ 2977c478bd9Sstevel@tonic-gate disp_setup(maxglobpri, v.v_nglobpris); 2987c478bd9Sstevel@tonic-gate 2997c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 3007c478bd9Sstevel@tonic-gate } 3017c478bd9Sstevel@tonic-gate 3027c478bd9Sstevel@tonic-gate 3037c478bd9Sstevel@tonic-gate /* 3047c478bd9Sstevel@tonic-gate * For each CPU, allocate new dispatch queues 3057c478bd9Sstevel@tonic-gate * with the stated number of priorities. 3067c478bd9Sstevel@tonic-gate */ 3077c478bd9Sstevel@tonic-gate static void 3087c478bd9Sstevel@tonic-gate cpu_dispqalloc(int numpris) 3097c478bd9Sstevel@tonic-gate { 3107c478bd9Sstevel@tonic-gate cpu_t *cpup; 3117c478bd9Sstevel@tonic-gate struct disp_queue_info *disp_mem; 3127c478bd9Sstevel@tonic-gate int i, num; 3137c478bd9Sstevel@tonic-gate 3147c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 3157c478bd9Sstevel@tonic-gate 3167c478bd9Sstevel@tonic-gate disp_mem = kmem_zalloc(NCPU * 3177c478bd9Sstevel@tonic-gate sizeof (struct disp_queue_info), KM_SLEEP); 3187c478bd9Sstevel@tonic-gate 3197c478bd9Sstevel@tonic-gate /* 3207c478bd9Sstevel@tonic-gate * This routine must allocate all of the memory before stopping 3217c478bd9Sstevel@tonic-gate * the cpus because it must not sleep in kmem_alloc while the 3227c478bd9Sstevel@tonic-gate * CPUs are stopped. Locks they hold will not be freed until they 3237c478bd9Sstevel@tonic-gate * are restarted. 3247c478bd9Sstevel@tonic-gate */ 3257c478bd9Sstevel@tonic-gate i = 0; 3267c478bd9Sstevel@tonic-gate cpup = cpu_list; 3277c478bd9Sstevel@tonic-gate do { 3287c478bd9Sstevel@tonic-gate disp_dq_alloc(&disp_mem[i], numpris, cpup->cpu_disp); 3297c478bd9Sstevel@tonic-gate i++; 3307c478bd9Sstevel@tonic-gate cpup = cpup->cpu_next; 3317c478bd9Sstevel@tonic-gate } while (cpup != cpu_list); 3327c478bd9Sstevel@tonic-gate num = i; 3337c478bd9Sstevel@tonic-gate 3347c478bd9Sstevel@tonic-gate pause_cpus(NULL); 3357c478bd9Sstevel@tonic-gate for (i = 0; i < num; i++) 3367c478bd9Sstevel@tonic-gate disp_dq_assign(&disp_mem[i], numpris); 3377c478bd9Sstevel@tonic-gate start_cpus(); 3387c478bd9Sstevel@tonic-gate 3397c478bd9Sstevel@tonic-gate /* 3407c478bd9Sstevel@tonic-gate * I must free all of the memory after starting the cpus because 3417c478bd9Sstevel@tonic-gate * I can not risk sleeping in kmem_free while the cpus are stopped. 3427c478bd9Sstevel@tonic-gate */ 3437c478bd9Sstevel@tonic-gate for (i = 0; i < num; i++) 3447c478bd9Sstevel@tonic-gate disp_dq_free(&disp_mem[i]); 3457c478bd9Sstevel@tonic-gate 3467c478bd9Sstevel@tonic-gate kmem_free(disp_mem, NCPU * sizeof (struct disp_queue_info)); 3477c478bd9Sstevel@tonic-gate } 3487c478bd9Sstevel@tonic-gate 3497c478bd9Sstevel@tonic-gate static void 3507c478bd9Sstevel@tonic-gate disp_dq_alloc(struct disp_queue_info *dptr, int numpris, disp_t *dp) 3517c478bd9Sstevel@tonic-gate { 3527c478bd9Sstevel@tonic-gate dptr->newdispq = kmem_zalloc(numpris * sizeof (dispq_t), KM_SLEEP); 3537c478bd9Sstevel@tonic-gate dptr->newdqactmap = kmem_zalloc(((numpris / BT_NBIPUL) + 1) * 3547c478bd9Sstevel@tonic-gate sizeof (long), KM_SLEEP); 3557c478bd9Sstevel@tonic-gate dptr->dp = dp; 3567c478bd9Sstevel@tonic-gate } 3577c478bd9Sstevel@tonic-gate 3587c478bd9Sstevel@tonic-gate static void 3597c478bd9Sstevel@tonic-gate disp_dq_assign(struct disp_queue_info *dptr, int numpris) 3607c478bd9Sstevel@tonic-gate { 3617c478bd9Sstevel@tonic-gate disp_t *dp; 3627c478bd9Sstevel@tonic-gate 3637c478bd9Sstevel@tonic-gate dp = dptr->dp; 3647c478bd9Sstevel@tonic-gate dptr->olddispq = dp->disp_q; 3657c478bd9Sstevel@tonic-gate dptr->olddqactmap = dp->disp_qactmap; 3667c478bd9Sstevel@tonic-gate dptr->oldnglobpris = dp->disp_npri; 3677c478bd9Sstevel@tonic-gate 3687c478bd9Sstevel@tonic-gate ASSERT(dptr->oldnglobpris < numpris); 3697c478bd9Sstevel@tonic-gate 3707c478bd9Sstevel@tonic-gate if (dptr->olddispq != NULL) { 3717c478bd9Sstevel@tonic-gate /* 3727c478bd9Sstevel@tonic-gate * Use kcopy because bcopy is platform-specific 3737c478bd9Sstevel@tonic-gate * and could block while we might have paused the cpus. 3747c478bd9Sstevel@tonic-gate */ 3757c478bd9Sstevel@tonic-gate (void) kcopy(dptr->olddispq, dptr->newdispq, 3767c478bd9Sstevel@tonic-gate dptr->oldnglobpris * sizeof (dispq_t)); 3777c478bd9Sstevel@tonic-gate (void) kcopy(dptr->olddqactmap, dptr->newdqactmap, 3787c478bd9Sstevel@tonic-gate ((dptr->oldnglobpris / BT_NBIPUL) + 1) * 3797c478bd9Sstevel@tonic-gate sizeof (long)); 3807c478bd9Sstevel@tonic-gate } 3817c478bd9Sstevel@tonic-gate dp->disp_q = dptr->newdispq; 3827c478bd9Sstevel@tonic-gate dp->disp_qactmap = dptr->newdqactmap; 3837c478bd9Sstevel@tonic-gate dp->disp_q_limit = &dptr->newdispq[numpris]; 3847c478bd9Sstevel@tonic-gate dp->disp_npri = numpris; 3857c478bd9Sstevel@tonic-gate } 3867c478bd9Sstevel@tonic-gate 3877c478bd9Sstevel@tonic-gate static void 3887c478bd9Sstevel@tonic-gate disp_dq_free(struct disp_queue_info *dptr) 3897c478bd9Sstevel@tonic-gate { 3907c478bd9Sstevel@tonic-gate if (dptr->olddispq != NULL) 3917c478bd9Sstevel@tonic-gate kmem_free(dptr->olddispq, 3927c478bd9Sstevel@tonic-gate dptr->oldnglobpris * sizeof (dispq_t)); 3937c478bd9Sstevel@tonic-gate if (dptr->olddqactmap != NULL) 3947c478bd9Sstevel@tonic-gate kmem_free(dptr->olddqactmap, 3957c478bd9Sstevel@tonic-gate ((dptr->oldnglobpris / BT_NBIPUL) + 1) * sizeof (long)); 3967c478bd9Sstevel@tonic-gate } 3977c478bd9Sstevel@tonic-gate 3987c478bd9Sstevel@tonic-gate /* 3997c478bd9Sstevel@tonic-gate * For a newly created CPU, initialize the dispatch queue. 4007c478bd9Sstevel@tonic-gate * This is called before the CPU is known through cpu[] or on any lists. 4017c478bd9Sstevel@tonic-gate */ 4027c478bd9Sstevel@tonic-gate void 4037c478bd9Sstevel@tonic-gate disp_cpu_init(cpu_t *cp) 4047c478bd9Sstevel@tonic-gate { 4057c478bd9Sstevel@tonic-gate disp_t *dp; 4067c478bd9Sstevel@tonic-gate dispq_t *newdispq; 4077c478bd9Sstevel@tonic-gate ulong_t *newdqactmap; 4087c478bd9Sstevel@tonic-gate 4097c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); /* protect dispatcher queue sizes */ 4107c478bd9Sstevel@tonic-gate 4117c478bd9Sstevel@tonic-gate if (cp == cpu0_disp.disp_cpu) 4127c478bd9Sstevel@tonic-gate dp = &cpu0_disp; 4137c478bd9Sstevel@tonic-gate else 4147c478bd9Sstevel@tonic-gate dp = kmem_alloc(sizeof (disp_t), KM_SLEEP); 4157c478bd9Sstevel@tonic-gate bzero(dp, sizeof (disp_t)); 4167c478bd9Sstevel@tonic-gate cp->cpu_disp = dp; 4177c478bd9Sstevel@tonic-gate dp->disp_cpu = cp; 4187c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = -1; 4197c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = -1; 4207c478bd9Sstevel@tonic-gate DISP_LOCK_INIT(&cp->cpu_thread_lock); 4217c478bd9Sstevel@tonic-gate /* 4227c478bd9Sstevel@tonic-gate * Allocate memory for the dispatcher queue headers 4237c478bd9Sstevel@tonic-gate * and the active queue bitmap. 4247c478bd9Sstevel@tonic-gate */ 4257c478bd9Sstevel@tonic-gate newdispq = kmem_zalloc(v.v_nglobpris * sizeof (dispq_t), KM_SLEEP); 4267c478bd9Sstevel@tonic-gate newdqactmap = kmem_zalloc(((v.v_nglobpris / BT_NBIPUL) + 1) * 4277c478bd9Sstevel@tonic-gate sizeof (long), KM_SLEEP); 4287c478bd9Sstevel@tonic-gate dp->disp_q = newdispq; 4297c478bd9Sstevel@tonic-gate dp->disp_qactmap = newdqactmap; 4307c478bd9Sstevel@tonic-gate dp->disp_q_limit = &newdispq[v.v_nglobpris]; 4317c478bd9Sstevel@tonic-gate dp->disp_npri = v.v_nglobpris; 4327c478bd9Sstevel@tonic-gate } 4337c478bd9Sstevel@tonic-gate 4347c478bd9Sstevel@tonic-gate void 4357c478bd9Sstevel@tonic-gate disp_cpu_fini(cpu_t *cp) 4367c478bd9Sstevel@tonic-gate { 4377c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 4387c478bd9Sstevel@tonic-gate 4397c478bd9Sstevel@tonic-gate disp_kp_free(cp->cpu_disp); 4407c478bd9Sstevel@tonic-gate if (cp->cpu_disp != &cpu0_disp) 4417c478bd9Sstevel@tonic-gate kmem_free(cp->cpu_disp, sizeof (disp_t)); 4427c478bd9Sstevel@tonic-gate } 4437c478bd9Sstevel@tonic-gate 4447c478bd9Sstevel@tonic-gate /* 4457c478bd9Sstevel@tonic-gate * Allocate new, larger kpreempt dispatch queue to replace the old one. 4467c478bd9Sstevel@tonic-gate */ 4477c478bd9Sstevel@tonic-gate void 4487c478bd9Sstevel@tonic-gate disp_kp_alloc(disp_t *dq, pri_t npri) 4497c478bd9Sstevel@tonic-gate { 4507c478bd9Sstevel@tonic-gate struct disp_queue_info mem_info; 4517c478bd9Sstevel@tonic-gate 4527c478bd9Sstevel@tonic-gate if (npri > dq->disp_npri) { 4537c478bd9Sstevel@tonic-gate /* 4547c478bd9Sstevel@tonic-gate * Allocate memory for the new array. 4557c478bd9Sstevel@tonic-gate */ 4567c478bd9Sstevel@tonic-gate disp_dq_alloc(&mem_info, npri, dq); 4577c478bd9Sstevel@tonic-gate 4587c478bd9Sstevel@tonic-gate /* 4597c478bd9Sstevel@tonic-gate * We need to copy the old structures to the new 4607c478bd9Sstevel@tonic-gate * and free the old. 4617c478bd9Sstevel@tonic-gate */ 4627c478bd9Sstevel@tonic-gate disp_dq_assign(&mem_info, npri); 4637c478bd9Sstevel@tonic-gate disp_dq_free(&mem_info); 4647c478bd9Sstevel@tonic-gate } 4657c478bd9Sstevel@tonic-gate } 4667c478bd9Sstevel@tonic-gate 4677c478bd9Sstevel@tonic-gate /* 4687c478bd9Sstevel@tonic-gate * Free dispatch queue. 4697c478bd9Sstevel@tonic-gate * Used for the kpreempt queues for a removed CPU partition and 4707c478bd9Sstevel@tonic-gate * for the per-CPU queues of deleted CPUs. 4717c478bd9Sstevel@tonic-gate */ 4727c478bd9Sstevel@tonic-gate void 4737c478bd9Sstevel@tonic-gate disp_kp_free(disp_t *dq) 4747c478bd9Sstevel@tonic-gate { 4757c478bd9Sstevel@tonic-gate struct disp_queue_info mem_info; 4767c478bd9Sstevel@tonic-gate 4777c478bd9Sstevel@tonic-gate mem_info.olddispq = dq->disp_q; 4787c478bd9Sstevel@tonic-gate mem_info.olddqactmap = dq->disp_qactmap; 4797c478bd9Sstevel@tonic-gate mem_info.oldnglobpris = dq->disp_npri; 4807c478bd9Sstevel@tonic-gate disp_dq_free(&mem_info); 4817c478bd9Sstevel@tonic-gate } 4827c478bd9Sstevel@tonic-gate 4837c478bd9Sstevel@tonic-gate /* 4847c478bd9Sstevel@tonic-gate * End dispatcher and scheduler initialization. 4857c478bd9Sstevel@tonic-gate */ 4867c478bd9Sstevel@tonic-gate 4877c478bd9Sstevel@tonic-gate /* 4887c478bd9Sstevel@tonic-gate * See if there's anything to do other than remain idle. 4897c478bd9Sstevel@tonic-gate * Return non-zero if there is. 4907c478bd9Sstevel@tonic-gate * 4917c478bd9Sstevel@tonic-gate * This function must be called with high spl, or with 4927c478bd9Sstevel@tonic-gate * kernel preemption disabled to prevent the partition's 4937c478bd9Sstevel@tonic-gate * active cpu list from changing while being traversed. 4947c478bd9Sstevel@tonic-gate * 4957c478bd9Sstevel@tonic-gate */ 4967c478bd9Sstevel@tonic-gate int 4977c478bd9Sstevel@tonic-gate disp_anywork(void) 4987c478bd9Sstevel@tonic-gate { 4997c478bd9Sstevel@tonic-gate cpu_t *cp = CPU; 5007c478bd9Sstevel@tonic-gate cpu_t *ocp; 5017c478bd9Sstevel@tonic-gate 5027c478bd9Sstevel@tonic-gate if (cp->cpu_disp->disp_nrunnable != 0) 5037c478bd9Sstevel@tonic-gate return (1); 5047c478bd9Sstevel@tonic-gate 5057c478bd9Sstevel@tonic-gate if (!(cp->cpu_flags & CPU_OFFLINE)) { 5067c478bd9Sstevel@tonic-gate if (CP_MAXRUNPRI(cp->cpu_part) >= 0) 5077c478bd9Sstevel@tonic-gate return (1); 5087c478bd9Sstevel@tonic-gate 5097c478bd9Sstevel@tonic-gate /* 5107c478bd9Sstevel@tonic-gate * Work can be taken from another CPU if: 5117c478bd9Sstevel@tonic-gate * - There is unbound work on the run queue 5127c478bd9Sstevel@tonic-gate * - That work isn't a thread undergoing a 5137c478bd9Sstevel@tonic-gate * - context switch on an otherwise empty queue. 5147c478bd9Sstevel@tonic-gate * - The CPU isn't running the idle loop. 5157c478bd9Sstevel@tonic-gate */ 5167c478bd9Sstevel@tonic-gate for (ocp = cp->cpu_next_part; ocp != cp; 5177c478bd9Sstevel@tonic-gate ocp = ocp->cpu_next_part) { 5187c478bd9Sstevel@tonic-gate ASSERT(CPU_ACTIVE(ocp)); 5197c478bd9Sstevel@tonic-gate 5207c478bd9Sstevel@tonic-gate if (ocp->cpu_disp->disp_max_unbound_pri != -1 && 5217c478bd9Sstevel@tonic-gate !((ocp->cpu_disp_flags & CPU_DISP_DONTSTEAL) && 5227c478bd9Sstevel@tonic-gate ocp->cpu_disp->disp_nrunnable == 1) && 5237c478bd9Sstevel@tonic-gate ocp->cpu_dispatch_pri != -1) 5247c478bd9Sstevel@tonic-gate return (1); 5257c478bd9Sstevel@tonic-gate } 5267c478bd9Sstevel@tonic-gate } 5277c478bd9Sstevel@tonic-gate return (0); 5287c478bd9Sstevel@tonic-gate } 5297c478bd9Sstevel@tonic-gate 5307c478bd9Sstevel@tonic-gate /* 5317c478bd9Sstevel@tonic-gate * Called when CPU enters the idle loop 5327c478bd9Sstevel@tonic-gate */ 5337c478bd9Sstevel@tonic-gate static void 5347c478bd9Sstevel@tonic-gate idle_enter() 5357c478bd9Sstevel@tonic-gate { 5367c478bd9Sstevel@tonic-gate cpu_t *cp = CPU; 5377c478bd9Sstevel@tonic-gate 538eda89462Sesolom new_cpu_mstate(CMS_IDLE, gethrtime_unscaled()); 5397c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, idlethread, 1); 5407c478bd9Sstevel@tonic-gate set_idle_cpu(cp->cpu_id); /* arch-dependent hook */ 5417c478bd9Sstevel@tonic-gate } 5427c478bd9Sstevel@tonic-gate 5437c478bd9Sstevel@tonic-gate /* 5447c478bd9Sstevel@tonic-gate * Called when CPU exits the idle loop 5457c478bd9Sstevel@tonic-gate */ 5467c478bd9Sstevel@tonic-gate static void 5477c478bd9Sstevel@tonic-gate idle_exit() 5487c478bd9Sstevel@tonic-gate { 5497c478bd9Sstevel@tonic-gate cpu_t *cp = CPU; 5507c478bd9Sstevel@tonic-gate 551eda89462Sesolom new_cpu_mstate(CMS_SYSTEM, gethrtime_unscaled()); 5527c478bd9Sstevel@tonic-gate unset_idle_cpu(cp->cpu_id); /* arch-dependent hook */ 5537c478bd9Sstevel@tonic-gate } 5547c478bd9Sstevel@tonic-gate 5557c478bd9Sstevel@tonic-gate /* 5567c478bd9Sstevel@tonic-gate * Idle loop. 5577c478bd9Sstevel@tonic-gate */ 5587c478bd9Sstevel@tonic-gate void 5597c478bd9Sstevel@tonic-gate idle() 5607c478bd9Sstevel@tonic-gate { 5617c478bd9Sstevel@tonic-gate struct cpu *cp = CPU; /* pointer to this CPU */ 5627c478bd9Sstevel@tonic-gate kthread_t *t; /* taken thread */ 5637c478bd9Sstevel@tonic-gate 5647c478bd9Sstevel@tonic-gate idle_enter(); 5657c478bd9Sstevel@tonic-gate 5667c478bd9Sstevel@tonic-gate /* 5677c478bd9Sstevel@tonic-gate * Uniprocessor version of idle loop. 5687c478bd9Sstevel@tonic-gate * Do this until notified that we're on an actual multiprocessor. 5697c478bd9Sstevel@tonic-gate */ 5707c478bd9Sstevel@tonic-gate while (ncpus == 1) { 5717c478bd9Sstevel@tonic-gate if (cp->cpu_disp->disp_nrunnable == 0) { 5727c478bd9Sstevel@tonic-gate (*idle_cpu)(); 5737c478bd9Sstevel@tonic-gate continue; 5747c478bd9Sstevel@tonic-gate } 5757c478bd9Sstevel@tonic-gate idle_exit(); 5767c478bd9Sstevel@tonic-gate swtch(); 5777c478bd9Sstevel@tonic-gate 5787c478bd9Sstevel@tonic-gate idle_enter(); /* returned from swtch */ 5797c478bd9Sstevel@tonic-gate } 5807c478bd9Sstevel@tonic-gate 5817c478bd9Sstevel@tonic-gate /* 5827c478bd9Sstevel@tonic-gate * Multiprocessor idle loop. 5837c478bd9Sstevel@tonic-gate */ 5847c478bd9Sstevel@tonic-gate for (;;) { 5857c478bd9Sstevel@tonic-gate /* 5867c478bd9Sstevel@tonic-gate * If CPU is completely quiesced by p_online(2), just wait 5877c478bd9Sstevel@tonic-gate * here with minimal bus traffic until put online. 5887c478bd9Sstevel@tonic-gate */ 5897c478bd9Sstevel@tonic-gate while (cp->cpu_flags & CPU_QUIESCED) 5907c478bd9Sstevel@tonic-gate (*idle_cpu)(); 5917c478bd9Sstevel@tonic-gate 5927c478bd9Sstevel@tonic-gate if (cp->cpu_disp->disp_nrunnable != 0) { 5937c478bd9Sstevel@tonic-gate idle_exit(); 5947c478bd9Sstevel@tonic-gate swtch(); 5957c478bd9Sstevel@tonic-gate } else { 5967c478bd9Sstevel@tonic-gate if (cp->cpu_flags & CPU_OFFLINE) 5977c478bd9Sstevel@tonic-gate continue; 5987c478bd9Sstevel@tonic-gate if ((t = disp_getwork(cp)) == NULL) { 5997c478bd9Sstevel@tonic-gate if (cp->cpu_chosen_level != -1) { 6007c478bd9Sstevel@tonic-gate disp_t *dp = cp->cpu_disp; 6017c478bd9Sstevel@tonic-gate disp_t *kpq; 6027c478bd9Sstevel@tonic-gate 6037c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock); 6047c478bd9Sstevel@tonic-gate /* 6057c478bd9Sstevel@tonic-gate * Set kpq under lock to prevent 6067c478bd9Sstevel@tonic-gate * migration between partitions. 6077c478bd9Sstevel@tonic-gate */ 6087c478bd9Sstevel@tonic-gate kpq = &cp->cpu_part->cp_kp_queue; 6097c478bd9Sstevel@tonic-gate if (kpq->disp_maxrunpri == -1) 6107c478bd9Sstevel@tonic-gate cp->cpu_chosen_level = -1; 6117c478bd9Sstevel@tonic-gate disp_lock_exit(&dp->disp_lock); 6127c478bd9Sstevel@tonic-gate } 6137c478bd9Sstevel@tonic-gate (*idle_cpu)(); 6147c478bd9Sstevel@tonic-gate continue; 6157c478bd9Sstevel@tonic-gate } 616685679f7Sakolb /* 617685679f7Sakolb * If there was a thread but we couldn't steal 618685679f7Sakolb * it, then keep trying. 619685679f7Sakolb */ 620685679f7Sakolb if (t == T_DONTSTEAL) 621685679f7Sakolb continue; 6227c478bd9Sstevel@tonic-gate idle_exit(); 6237c478bd9Sstevel@tonic-gate swtch_to(t); 6247c478bd9Sstevel@tonic-gate } 6257c478bd9Sstevel@tonic-gate idle_enter(); /* returned from swtch/swtch_to */ 6267c478bd9Sstevel@tonic-gate } 6277c478bd9Sstevel@tonic-gate } 6287c478bd9Sstevel@tonic-gate 6297c478bd9Sstevel@tonic-gate 6307c478bd9Sstevel@tonic-gate /* 6317c478bd9Sstevel@tonic-gate * Preempt the currently running thread in favor of the highest 6327c478bd9Sstevel@tonic-gate * priority thread. The class of the current thread controls 6337c478bd9Sstevel@tonic-gate * where it goes on the dispatcher queues. If panicking, turn 6347c478bd9Sstevel@tonic-gate * preemption off. 6357c478bd9Sstevel@tonic-gate */ 6367c478bd9Sstevel@tonic-gate void 6377c478bd9Sstevel@tonic-gate preempt() 6387c478bd9Sstevel@tonic-gate { 6397c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 6407c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread); 6417c478bd9Sstevel@tonic-gate 6427c478bd9Sstevel@tonic-gate if (panicstr) 6437c478bd9Sstevel@tonic-gate return; 6447c478bd9Sstevel@tonic-gate 6457c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_PREEMPT_START, "preempt_start"); 6467c478bd9Sstevel@tonic-gate 6477c478bd9Sstevel@tonic-gate thread_lock(t); 6487c478bd9Sstevel@tonic-gate 6497c478bd9Sstevel@tonic-gate if (t->t_state != TS_ONPROC || t->t_disp_queue != CPU->cpu_disp) { 6507c478bd9Sstevel@tonic-gate /* 6517c478bd9Sstevel@tonic-gate * this thread has already been chosen to be run on 6527c478bd9Sstevel@tonic-gate * another CPU. Clear kprunrun on this CPU since we're 6537c478bd9Sstevel@tonic-gate * already headed for swtch(). 6547c478bd9Sstevel@tonic-gate */ 6557c478bd9Sstevel@tonic-gate CPU->cpu_kprunrun = 0; 6567c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(t); 6577c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_PREEMPT_END, "preempt_end"); 6587c478bd9Sstevel@tonic-gate } else { 6597c478bd9Sstevel@tonic-gate if (lwp != NULL) 6607c478bd9Sstevel@tonic-gate lwp->lwp_ru.nivcsw++; 6617c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, inv_swtch, 1); 6627c478bd9Sstevel@tonic-gate THREAD_TRANSITION(t); 6637c478bd9Sstevel@tonic-gate CL_PREEMPT(t); 6647c478bd9Sstevel@tonic-gate DTRACE_SCHED(preempt); 6657c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(t); 6667c478bd9Sstevel@tonic-gate 6677c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_PREEMPT_END, "preempt_end"); 6687c478bd9Sstevel@tonic-gate 6697c478bd9Sstevel@tonic-gate swtch(); /* clears CPU->cpu_runrun via disp() */ 6707c478bd9Sstevel@tonic-gate } 6717c478bd9Sstevel@tonic-gate } 6727c478bd9Sstevel@tonic-gate 6737c478bd9Sstevel@tonic-gate extern kthread_t *thread_unpin(); 6747c478bd9Sstevel@tonic-gate 6757c478bd9Sstevel@tonic-gate /* 6767c478bd9Sstevel@tonic-gate * disp() - find the highest priority thread for this processor to run, and 6777c478bd9Sstevel@tonic-gate * set it in TS_ONPROC state so that resume() can be called to run it. 6787c478bd9Sstevel@tonic-gate */ 6797c478bd9Sstevel@tonic-gate static kthread_t * 6807c478bd9Sstevel@tonic-gate disp() 6817c478bd9Sstevel@tonic-gate { 6827c478bd9Sstevel@tonic-gate cpu_t *cpup; 6837c478bd9Sstevel@tonic-gate disp_t *dp; 6847c478bd9Sstevel@tonic-gate kthread_t *tp; 6857c478bd9Sstevel@tonic-gate dispq_t *dq; 6867c478bd9Sstevel@tonic-gate int maxrunword; 6877c478bd9Sstevel@tonic-gate pri_t pri; 6887c478bd9Sstevel@tonic-gate disp_t *kpq; 6897c478bd9Sstevel@tonic-gate 6907c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_DISP_START, "disp_start"); 6917c478bd9Sstevel@tonic-gate 6927c478bd9Sstevel@tonic-gate cpup = CPU; 6937c478bd9Sstevel@tonic-gate /* 6947c478bd9Sstevel@tonic-gate * Find the highest priority loaded, runnable thread. 6957c478bd9Sstevel@tonic-gate */ 6967c478bd9Sstevel@tonic-gate dp = cpup->cpu_disp; 6977c478bd9Sstevel@tonic-gate 6987c478bd9Sstevel@tonic-gate reschedule: 6997c478bd9Sstevel@tonic-gate /* 7007c478bd9Sstevel@tonic-gate * If there is more important work on the global queue with a better 7017c478bd9Sstevel@tonic-gate * priority than the maximum on this CPU, take it now. 7027c478bd9Sstevel@tonic-gate */ 7037c478bd9Sstevel@tonic-gate kpq = &cpup->cpu_part->cp_kp_queue; 7047c478bd9Sstevel@tonic-gate while ((pri = kpq->disp_maxrunpri) >= 0 && 7057c478bd9Sstevel@tonic-gate pri >= dp->disp_maxrunpri && 7067c478bd9Sstevel@tonic-gate (cpup->cpu_flags & CPU_OFFLINE) == 0 && 7077c478bd9Sstevel@tonic-gate (tp = disp_getbest(kpq)) != NULL) { 7087c478bd9Sstevel@tonic-gate if (disp_ratify(tp, kpq) != NULL) { 7097c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_DISP, TR_DISP_END, 7107c478bd9Sstevel@tonic-gate "disp_end:tid %p", tp); 7117c478bd9Sstevel@tonic-gate return (tp); 7127c478bd9Sstevel@tonic-gate } 7137c478bd9Sstevel@tonic-gate } 7147c478bd9Sstevel@tonic-gate 7157c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock); 7167c478bd9Sstevel@tonic-gate pri = dp->disp_maxrunpri; 7177c478bd9Sstevel@tonic-gate 7187c478bd9Sstevel@tonic-gate /* 7197c478bd9Sstevel@tonic-gate * If there is nothing to run, look at what's runnable on other queues. 7207c478bd9Sstevel@tonic-gate * Choose the idle thread if the CPU is quiesced. 7217c478bd9Sstevel@tonic-gate * Note that CPUs that have the CPU_OFFLINE flag set can still run 7227c478bd9Sstevel@tonic-gate * interrupt threads, which will be the only threads on the CPU's own 7237c478bd9Sstevel@tonic-gate * queue, but cannot run threads from other queues. 7247c478bd9Sstevel@tonic-gate */ 7257c478bd9Sstevel@tonic-gate if (pri == -1) { 7267c478bd9Sstevel@tonic-gate if (!(cpup->cpu_flags & CPU_OFFLINE)) { 7277c478bd9Sstevel@tonic-gate disp_lock_exit(&dp->disp_lock); 728685679f7Sakolb if ((tp = disp_getwork(cpup)) == NULL || 729685679f7Sakolb tp == T_DONTSTEAL) { 7307c478bd9Sstevel@tonic-gate tp = cpup->cpu_idle_thread; 7317c478bd9Sstevel@tonic-gate (void) splhigh(); 7327c478bd9Sstevel@tonic-gate THREAD_ONPROC(tp, cpup); 7337c478bd9Sstevel@tonic-gate cpup->cpu_dispthread = tp; 7347c478bd9Sstevel@tonic-gate cpup->cpu_dispatch_pri = -1; 7357c478bd9Sstevel@tonic-gate cpup->cpu_runrun = cpup->cpu_kprunrun = 0; 7367c478bd9Sstevel@tonic-gate cpup->cpu_chosen_level = -1; 7377c478bd9Sstevel@tonic-gate } 7387c478bd9Sstevel@tonic-gate } else { 7397c478bd9Sstevel@tonic-gate disp_lock_exit_high(&dp->disp_lock); 7407c478bd9Sstevel@tonic-gate tp = cpup->cpu_idle_thread; 7417c478bd9Sstevel@tonic-gate THREAD_ONPROC(tp, cpup); 7427c478bd9Sstevel@tonic-gate cpup->cpu_dispthread = tp; 7437c478bd9Sstevel@tonic-gate cpup->cpu_dispatch_pri = -1; 7447c478bd9Sstevel@tonic-gate cpup->cpu_runrun = cpup->cpu_kprunrun = 0; 7457c478bd9Sstevel@tonic-gate cpup->cpu_chosen_level = -1; 7467c478bd9Sstevel@tonic-gate } 7477c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_DISP, TR_DISP_END, 7487c478bd9Sstevel@tonic-gate "disp_end:tid %p", tp); 7497c478bd9Sstevel@tonic-gate return (tp); 7507c478bd9Sstevel@tonic-gate } 7517c478bd9Sstevel@tonic-gate 7527c478bd9Sstevel@tonic-gate dq = &dp->disp_q[pri]; 7537c478bd9Sstevel@tonic-gate tp = dq->dq_first; 7547c478bd9Sstevel@tonic-gate 7557c478bd9Sstevel@tonic-gate ASSERT(tp != NULL); 7567c478bd9Sstevel@tonic-gate ASSERT(tp->t_schedflag & TS_LOAD); /* thread must be swapped in */ 7577c478bd9Sstevel@tonic-gate 7587c478bd9Sstevel@tonic-gate DTRACE_SCHED2(dequeue, kthread_t *, tp, disp_t *, dp); 7597c478bd9Sstevel@tonic-gate 7607c478bd9Sstevel@tonic-gate /* 7617c478bd9Sstevel@tonic-gate * Found it so remove it from queue. 7627c478bd9Sstevel@tonic-gate */ 7637c478bd9Sstevel@tonic-gate dp->disp_nrunnable--; 7647c478bd9Sstevel@tonic-gate dq->dq_sruncnt--; 7657c478bd9Sstevel@tonic-gate if ((dq->dq_first = tp->t_link) == NULL) { 7667c478bd9Sstevel@tonic-gate ulong_t *dqactmap = dp->disp_qactmap; 7677c478bd9Sstevel@tonic-gate 7687c478bd9Sstevel@tonic-gate ASSERT(dq->dq_sruncnt == 0); 7697c478bd9Sstevel@tonic-gate dq->dq_last = NULL; 7707c478bd9Sstevel@tonic-gate 7717c478bd9Sstevel@tonic-gate /* 7727c478bd9Sstevel@tonic-gate * The queue is empty, so the corresponding bit needs to be 7737c478bd9Sstevel@tonic-gate * turned off in dqactmap. If nrunnable != 0 just took the 7747c478bd9Sstevel@tonic-gate * last runnable thread off the 7757c478bd9Sstevel@tonic-gate * highest queue, so recompute disp_maxrunpri. 7767c478bd9Sstevel@tonic-gate */ 7777c478bd9Sstevel@tonic-gate maxrunword = pri >> BT_ULSHIFT; 7787c478bd9Sstevel@tonic-gate dqactmap[maxrunword] &= ~BT_BIW(pri); 7797c478bd9Sstevel@tonic-gate 7807c478bd9Sstevel@tonic-gate if (dp->disp_nrunnable == 0) { 7817c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = -1; 7827c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = -1; 7837c478bd9Sstevel@tonic-gate } else { 7847c478bd9Sstevel@tonic-gate int ipri; 7857c478bd9Sstevel@tonic-gate 7867c478bd9Sstevel@tonic-gate ipri = bt_gethighbit(dqactmap, maxrunword); 7877c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = ipri; 7887c478bd9Sstevel@tonic-gate if (ipri < dp->disp_max_unbound_pri) 7897c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = ipri; 7907c478bd9Sstevel@tonic-gate } 7917c478bd9Sstevel@tonic-gate } else { 7927c478bd9Sstevel@tonic-gate tp->t_link = NULL; 7937c478bd9Sstevel@tonic-gate } 7947c478bd9Sstevel@tonic-gate 7957c478bd9Sstevel@tonic-gate /* 7967c478bd9Sstevel@tonic-gate * Set TS_DONT_SWAP flag to prevent another processor from swapping 7977c478bd9Sstevel@tonic-gate * out this thread before we have a chance to run it. 7987c478bd9Sstevel@tonic-gate * While running, it is protected against swapping by t_lock. 7997c478bd9Sstevel@tonic-gate */ 8007c478bd9Sstevel@tonic-gate tp->t_schedflag |= TS_DONT_SWAP; 8017c478bd9Sstevel@tonic-gate cpup->cpu_dispthread = tp; /* protected by spl only */ 8027c478bd9Sstevel@tonic-gate cpup->cpu_dispatch_pri = pri; 8037c478bd9Sstevel@tonic-gate ASSERT(pri == DISP_PRIO(tp)); 8047c478bd9Sstevel@tonic-gate thread_onproc(tp, cpup); /* set t_state to TS_ONPROC */ 8057c478bd9Sstevel@tonic-gate disp_lock_exit_high(&dp->disp_lock); /* drop run queue lock */ 8067c478bd9Sstevel@tonic-gate 8077c478bd9Sstevel@tonic-gate ASSERT(tp != NULL); 8087c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_DISP, TR_DISP_END, 8097c478bd9Sstevel@tonic-gate "disp_end:tid %p", tp); 8107c478bd9Sstevel@tonic-gate 8117c478bd9Sstevel@tonic-gate if (disp_ratify(tp, kpq) == NULL) 8127c478bd9Sstevel@tonic-gate goto reschedule; 8137c478bd9Sstevel@tonic-gate 8147c478bd9Sstevel@tonic-gate return (tp); 8157c478bd9Sstevel@tonic-gate } 8167c478bd9Sstevel@tonic-gate 8177c478bd9Sstevel@tonic-gate /* 8187c478bd9Sstevel@tonic-gate * swtch() 8197c478bd9Sstevel@tonic-gate * Find best runnable thread and run it. 8207c478bd9Sstevel@tonic-gate * Called with the current thread already switched to a new state, 8217c478bd9Sstevel@tonic-gate * on a sleep queue, run queue, stopped, and not zombied. 8227c478bd9Sstevel@tonic-gate * May be called at any spl level less than or equal to LOCK_LEVEL. 8237c478bd9Sstevel@tonic-gate * Always drops spl to the base level (spl0()). 8247c478bd9Sstevel@tonic-gate */ 8257c478bd9Sstevel@tonic-gate void 8267c478bd9Sstevel@tonic-gate swtch() 8277c478bd9Sstevel@tonic-gate { 8287c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 8297c478bd9Sstevel@tonic-gate kthread_t *next; 8307c478bd9Sstevel@tonic-gate cpu_t *cp; 8317c478bd9Sstevel@tonic-gate 8327c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_SWTCH_START, "swtch_start"); 8337c478bd9Sstevel@tonic-gate 8347c478bd9Sstevel@tonic-gate if (t->t_flag & T_INTR_THREAD) 8357c478bd9Sstevel@tonic-gate cpu_intr_swtch_enter(t); 8367c478bd9Sstevel@tonic-gate 8377c478bd9Sstevel@tonic-gate if (t->t_intr != NULL) { 8387c478bd9Sstevel@tonic-gate /* 8397c478bd9Sstevel@tonic-gate * We are an interrupt thread. Setup and return 8407c478bd9Sstevel@tonic-gate * the interrupted thread to be resumed. 8417c478bd9Sstevel@tonic-gate */ 8427c478bd9Sstevel@tonic-gate (void) splhigh(); /* block other scheduler action */ 8437c478bd9Sstevel@tonic-gate cp = CPU; /* now protected against migration */ 8447c478bd9Sstevel@tonic-gate ASSERT(CPU_ON_INTR(cp) == 0); /* not called with PIL > 10 */ 8457c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, pswitch, 1); 8467c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, intrblk, 1); 8477c478bd9Sstevel@tonic-gate next = thread_unpin(); 8487c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start"); 8497c478bd9Sstevel@tonic-gate resume_from_intr(next); 8507c478bd9Sstevel@tonic-gate } else { 8517c478bd9Sstevel@tonic-gate #ifdef DEBUG 8527c478bd9Sstevel@tonic-gate if (t->t_state == TS_ONPROC && 8537c478bd9Sstevel@tonic-gate t->t_disp_queue->disp_cpu == CPU && 8547c478bd9Sstevel@tonic-gate t->t_preempt == 0) { 8557c478bd9Sstevel@tonic-gate thread_lock(t); 8567c478bd9Sstevel@tonic-gate ASSERT(t->t_state != TS_ONPROC || 8577c478bd9Sstevel@tonic-gate t->t_disp_queue->disp_cpu != CPU || 8587c478bd9Sstevel@tonic-gate t->t_preempt != 0); /* cannot migrate */ 8597c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(t); 8607c478bd9Sstevel@tonic-gate } 8617c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 8627c478bd9Sstevel@tonic-gate cp = CPU; 8637c478bd9Sstevel@tonic-gate next = disp(); /* returns with spl high */ 8647c478bd9Sstevel@tonic-gate ASSERT(CPU_ON_INTR(cp) == 0); /* not called with PIL > 10 */ 8657c478bd9Sstevel@tonic-gate 8667c478bd9Sstevel@tonic-gate /* OK to steal anything left on run queue */ 8677c478bd9Sstevel@tonic-gate cp->cpu_disp_flags &= ~CPU_DISP_DONTSTEAL; 8687c478bd9Sstevel@tonic-gate 8697c478bd9Sstevel@tonic-gate if (next != t) { 8707c478bd9Sstevel@tonic-gate if (t == cp->cpu_idle_thread) { 871fb2f18f8Sesaxe PG_NRUN_UPDATE(cp, 1); 8727c478bd9Sstevel@tonic-gate } else if (next == cp->cpu_idle_thread) { 873fb2f18f8Sesaxe PG_NRUN_UPDATE(cp, -1); 8747c478bd9Sstevel@tonic-gate } 8757c478bd9Sstevel@tonic-gate 876f2bd4627Sjohansen /* 877f2bd4627Sjohansen * If t was previously in the TS_ONPROC state, 878f2bd4627Sjohansen * setfrontdq and setbackdq won't have set its t_waitrq. 879f2bd4627Sjohansen * Since we now finally know that we're switching away 880f2bd4627Sjohansen * from this thread, set its t_waitrq if it is on a run 881f2bd4627Sjohansen * queue. 882f2bd4627Sjohansen */ 883f2bd4627Sjohansen if ((t->t_state == TS_RUN) && (t->t_waitrq == 0)) { 884f2bd4627Sjohansen t->t_waitrq = gethrtime_unscaled(); 885f2bd4627Sjohansen } 886f2bd4627Sjohansen 887f2bd4627Sjohansen /* 888f2bd4627Sjohansen * restore mstate of thread that we are switching to 889f2bd4627Sjohansen */ 890f2bd4627Sjohansen restore_mstate(next); 891f2bd4627Sjohansen 8927c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, pswitch, 1); 8937c478bd9Sstevel@tonic-gate cp->cpu_last_swtch = t->t_disp_time = lbolt; 8947c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start"); 8957c478bd9Sstevel@tonic-gate 8967c478bd9Sstevel@tonic-gate if (dtrace_vtime_active) 8977c478bd9Sstevel@tonic-gate dtrace_vtime_switch(next); 8987c478bd9Sstevel@tonic-gate 8997c478bd9Sstevel@tonic-gate resume(next); 9007c478bd9Sstevel@tonic-gate /* 9017c478bd9Sstevel@tonic-gate * The TR_RESUME_END and TR_SWTCH_END trace points 9027c478bd9Sstevel@tonic-gate * appear at the end of resume(), because we may not 9037c478bd9Sstevel@tonic-gate * return here 9047c478bd9Sstevel@tonic-gate */ 9057c478bd9Sstevel@tonic-gate } else { 9067c478bd9Sstevel@tonic-gate if (t->t_flag & T_INTR_THREAD) 9077c478bd9Sstevel@tonic-gate cpu_intr_swtch_exit(t); 9087c478bd9Sstevel@tonic-gate 9097c478bd9Sstevel@tonic-gate DTRACE_SCHED(remain__cpu); 9107c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_SWTCH_END, "swtch_end"); 9117c478bd9Sstevel@tonic-gate (void) spl0(); 9127c478bd9Sstevel@tonic-gate } 9137c478bd9Sstevel@tonic-gate } 9147c478bd9Sstevel@tonic-gate } 9157c478bd9Sstevel@tonic-gate 9167c478bd9Sstevel@tonic-gate /* 9177c478bd9Sstevel@tonic-gate * swtch_from_zombie() 9187c478bd9Sstevel@tonic-gate * Special case of swtch(), which allows checks for TS_ZOMB to be 9197c478bd9Sstevel@tonic-gate * eliminated from normal resume. 9207c478bd9Sstevel@tonic-gate * Find best runnable thread and run it. 9217c478bd9Sstevel@tonic-gate * Called with the current thread zombied. 9227c478bd9Sstevel@tonic-gate * Zombies cannot migrate, so CPU references are safe. 9237c478bd9Sstevel@tonic-gate */ 9247c478bd9Sstevel@tonic-gate void 9257c478bd9Sstevel@tonic-gate swtch_from_zombie() 9267c478bd9Sstevel@tonic-gate { 9277c478bd9Sstevel@tonic-gate kthread_t *next; 9287c478bd9Sstevel@tonic-gate cpu_t *cpu = CPU; 9297c478bd9Sstevel@tonic-gate 9307c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_SWTCH_START, "swtch_start"); 9317c478bd9Sstevel@tonic-gate 9327c478bd9Sstevel@tonic-gate ASSERT(curthread->t_state == TS_ZOMB); 9337c478bd9Sstevel@tonic-gate 9347c478bd9Sstevel@tonic-gate next = disp(); /* returns with spl high */ 9357c478bd9Sstevel@tonic-gate ASSERT(CPU_ON_INTR(CPU) == 0); /* not called with PIL > 10 */ 9367c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, pswitch, 1); 9377c478bd9Sstevel@tonic-gate ASSERT(next != curthread); 9387c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start"); 9397c478bd9Sstevel@tonic-gate 9407c478bd9Sstevel@tonic-gate if (next == cpu->cpu_idle_thread) 941fb2f18f8Sesaxe PG_NRUN_UPDATE(cpu, -1); 9427c478bd9Sstevel@tonic-gate 943f2bd4627Sjohansen restore_mstate(next); 944f2bd4627Sjohansen 9457c478bd9Sstevel@tonic-gate if (dtrace_vtime_active) 9467c478bd9Sstevel@tonic-gate dtrace_vtime_switch(next); 9477c478bd9Sstevel@tonic-gate 9487c478bd9Sstevel@tonic-gate resume_from_zombie(next); 9497c478bd9Sstevel@tonic-gate /* 9507c478bd9Sstevel@tonic-gate * The TR_RESUME_END and TR_SWTCH_END trace points 9517c478bd9Sstevel@tonic-gate * appear at the end of resume(), because we certainly will not 9527c478bd9Sstevel@tonic-gate * return here 9537c478bd9Sstevel@tonic-gate */ 9547c478bd9Sstevel@tonic-gate } 9557c478bd9Sstevel@tonic-gate 9567c478bd9Sstevel@tonic-gate #if defined(DEBUG) && (defined(DISP_DEBUG) || defined(lint)) 9577c478bd9Sstevel@tonic-gate static int 9587c478bd9Sstevel@tonic-gate thread_on_queue(kthread_t *tp) 9597c478bd9Sstevel@tonic-gate { 9607c478bd9Sstevel@tonic-gate cpu_t *cp; 9617c478bd9Sstevel@tonic-gate cpu_t *self; 9627c478bd9Sstevel@tonic-gate disp_t *dp; 9637c478bd9Sstevel@tonic-gate 9647c478bd9Sstevel@tonic-gate self = CPU; 9657c478bd9Sstevel@tonic-gate cp = self->cpu_next_onln; 9667c478bd9Sstevel@tonic-gate dp = cp->cpu_disp; 9677c478bd9Sstevel@tonic-gate for (;;) { 9687c478bd9Sstevel@tonic-gate dispq_t *dq; 9697c478bd9Sstevel@tonic-gate dispq_t *eq; 9707c478bd9Sstevel@tonic-gate 9717c478bd9Sstevel@tonic-gate disp_lock_enter_high(&dp->disp_lock); 9727c478bd9Sstevel@tonic-gate for (dq = dp->disp_q, eq = dp->disp_q_limit; dq < eq; ++dq) { 9737c478bd9Sstevel@tonic-gate kthread_t *rp; 9747c478bd9Sstevel@tonic-gate 9757c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL || 9767c478bd9Sstevel@tonic-gate dq->dq_last->t_link == NULL); 9777c478bd9Sstevel@tonic-gate for (rp = dq->dq_first; rp; rp = rp->t_link) 9787c478bd9Sstevel@tonic-gate if (tp == rp) { 9797c478bd9Sstevel@tonic-gate disp_lock_exit_high(&dp->disp_lock); 9807c478bd9Sstevel@tonic-gate return (1); 9817c478bd9Sstevel@tonic-gate } 9827c478bd9Sstevel@tonic-gate } 9837c478bd9Sstevel@tonic-gate disp_lock_exit_high(&dp->disp_lock); 9847c478bd9Sstevel@tonic-gate if (cp == NULL) 9857c478bd9Sstevel@tonic-gate break; 9867c478bd9Sstevel@tonic-gate if (cp == self) { 9877c478bd9Sstevel@tonic-gate cp = NULL; 9887c478bd9Sstevel@tonic-gate dp = &cp->cpu_part->cp_kp_queue; 9897c478bd9Sstevel@tonic-gate } else { 9907c478bd9Sstevel@tonic-gate cp = cp->cpu_next_onln; 9917c478bd9Sstevel@tonic-gate dp = cp->cpu_disp; 9927c478bd9Sstevel@tonic-gate } 9937c478bd9Sstevel@tonic-gate } 9947c478bd9Sstevel@tonic-gate return (0); 9957c478bd9Sstevel@tonic-gate } /* end of thread_on_queue */ 9967c478bd9Sstevel@tonic-gate #else 9977c478bd9Sstevel@tonic-gate 9987c478bd9Sstevel@tonic-gate #define thread_on_queue(tp) 0 /* ASSERT must be !thread_on_queue */ 9997c478bd9Sstevel@tonic-gate 10007c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 10017c478bd9Sstevel@tonic-gate 10027c478bd9Sstevel@tonic-gate /* 10037c478bd9Sstevel@tonic-gate * like swtch(), but switch to a specified thread taken from another CPU. 10047c478bd9Sstevel@tonic-gate * called with spl high.. 10057c478bd9Sstevel@tonic-gate */ 10067c478bd9Sstevel@tonic-gate void 10077c478bd9Sstevel@tonic-gate swtch_to(kthread_t *next) 10087c478bd9Sstevel@tonic-gate { 10097c478bd9Sstevel@tonic-gate cpu_t *cp = CPU; 10107c478bd9Sstevel@tonic-gate 10117c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_SWTCH_START, "swtch_start"); 10127c478bd9Sstevel@tonic-gate 10137c478bd9Sstevel@tonic-gate /* 10147c478bd9Sstevel@tonic-gate * Update context switch statistics. 10157c478bd9Sstevel@tonic-gate */ 10167c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, pswitch, 1); 10177c478bd9Sstevel@tonic-gate 10187c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start"); 10197c478bd9Sstevel@tonic-gate 10207c478bd9Sstevel@tonic-gate if (curthread == cp->cpu_idle_thread) 1021fb2f18f8Sesaxe PG_NRUN_UPDATE(cp, 1); 10227c478bd9Sstevel@tonic-gate 10237c478bd9Sstevel@tonic-gate /* OK to steal anything left on run queue */ 10247c478bd9Sstevel@tonic-gate cp->cpu_disp_flags &= ~CPU_DISP_DONTSTEAL; 10257c478bd9Sstevel@tonic-gate 10267c478bd9Sstevel@tonic-gate /* record last execution time */ 10277c478bd9Sstevel@tonic-gate cp->cpu_last_swtch = curthread->t_disp_time = lbolt; 10287c478bd9Sstevel@tonic-gate 1029f2bd4627Sjohansen /* 1030f2bd4627Sjohansen * If t was previously in the TS_ONPROC state, setfrontdq and setbackdq 1031f2bd4627Sjohansen * won't have set its t_waitrq. Since we now finally know that we're 1032f2bd4627Sjohansen * switching away from this thread, set its t_waitrq if it is on a run 1033f2bd4627Sjohansen * queue. 1034f2bd4627Sjohansen */ 1035f2bd4627Sjohansen if ((curthread->t_state == TS_RUN) && (curthread->t_waitrq == 0)) { 1036f2bd4627Sjohansen curthread->t_waitrq = gethrtime_unscaled(); 1037f2bd4627Sjohansen } 1038f2bd4627Sjohansen 1039f2bd4627Sjohansen /* restore next thread to previously running microstate */ 1040f2bd4627Sjohansen restore_mstate(next); 1041f2bd4627Sjohansen 10427c478bd9Sstevel@tonic-gate if (dtrace_vtime_active) 10437c478bd9Sstevel@tonic-gate dtrace_vtime_switch(next); 10447c478bd9Sstevel@tonic-gate 10457c478bd9Sstevel@tonic-gate resume(next); 10467c478bd9Sstevel@tonic-gate /* 10477c478bd9Sstevel@tonic-gate * The TR_RESUME_END and TR_SWTCH_END trace points 10487c478bd9Sstevel@tonic-gate * appear at the end of resume(), because we may not 10497c478bd9Sstevel@tonic-gate * return here 10507c478bd9Sstevel@tonic-gate */ 10517c478bd9Sstevel@tonic-gate } 10527c478bd9Sstevel@tonic-gate 10537c478bd9Sstevel@tonic-gate 10547c478bd9Sstevel@tonic-gate 10557c478bd9Sstevel@tonic-gate #define CPU_IDLING(pri) ((pri) == -1) 10567c478bd9Sstevel@tonic-gate 10577c478bd9Sstevel@tonic-gate static void 10587c478bd9Sstevel@tonic-gate cpu_resched(cpu_t *cp, pri_t tpri) 10597c478bd9Sstevel@tonic-gate { 10607c478bd9Sstevel@tonic-gate int call_poke_cpu = 0; 10617c478bd9Sstevel@tonic-gate pri_t cpupri = cp->cpu_dispatch_pri; 10627c478bd9Sstevel@tonic-gate 10637c478bd9Sstevel@tonic-gate if (!CPU_IDLING(cpupri) && (cpupri < tpri)) { 10647c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_DISP, TR_CPU_RESCHED, 10657c478bd9Sstevel@tonic-gate "CPU_RESCHED:Tpri %d Cpupri %d", tpri, cpupri); 10667c478bd9Sstevel@tonic-gate if (tpri >= upreemptpri && cp->cpu_runrun == 0) { 10677c478bd9Sstevel@tonic-gate cp->cpu_runrun = 1; 10687c478bd9Sstevel@tonic-gate aston(cp->cpu_dispthread); 10697c478bd9Sstevel@tonic-gate if (tpri < kpreemptpri && cp != CPU) 10707c478bd9Sstevel@tonic-gate call_poke_cpu = 1; 10717c478bd9Sstevel@tonic-gate } 10727c478bd9Sstevel@tonic-gate if (tpri >= kpreemptpri && cp->cpu_kprunrun == 0) { 10737c478bd9Sstevel@tonic-gate cp->cpu_kprunrun = 1; 10747c478bd9Sstevel@tonic-gate if (cp != CPU) 10757c478bd9Sstevel@tonic-gate call_poke_cpu = 1; 10767c478bd9Sstevel@tonic-gate } 10777c478bd9Sstevel@tonic-gate } 10787c478bd9Sstevel@tonic-gate 10797c478bd9Sstevel@tonic-gate /* 10807c478bd9Sstevel@tonic-gate * Propagate cpu_runrun, and cpu_kprunrun to global visibility. 10817c478bd9Sstevel@tonic-gate */ 10827c478bd9Sstevel@tonic-gate membar_enter(); 10837c478bd9Sstevel@tonic-gate 10847c478bd9Sstevel@tonic-gate if (call_poke_cpu) 10857c478bd9Sstevel@tonic-gate poke_cpu(cp->cpu_id); 10867c478bd9Sstevel@tonic-gate } 10877c478bd9Sstevel@tonic-gate 10887c478bd9Sstevel@tonic-gate /* 1089fb2f18f8Sesaxe * Perform multi-level CMT load balancing of running threads. 1090fb2f18f8Sesaxe * tp is the thread being enqueued 1091fb2f18f8Sesaxe * cp is the hint CPU (chosen by cpu_choose()). 10927c478bd9Sstevel@tonic-gate */ 10937c478bd9Sstevel@tonic-gate static cpu_t * 1094fb2f18f8Sesaxe cmt_balance(kthread_t *tp, cpu_t *cp) 10957c478bd9Sstevel@tonic-gate { 1096*d129bde2Sesaxe int hint, i, cpu, nsiblings; 1097fb2f18f8Sesaxe int self = 0; 1098fb2f18f8Sesaxe group_t *cmt_pgs, *siblings; 1099fb2f18f8Sesaxe pg_cmt_t *pg, *pg_tmp, *tpg = NULL; 1100fb2f18f8Sesaxe int pg_nrun, tpg_nrun; 1101fb2f18f8Sesaxe int level = 0; 1102fb2f18f8Sesaxe cpu_t *newcp; 11037c478bd9Sstevel@tonic-gate 1104fb2f18f8Sesaxe ASSERT(THREAD_LOCK_HELD(tp)); 11057c478bd9Sstevel@tonic-gate 1106fb2f18f8Sesaxe cmt_pgs = &cp->cpu_pg->cmt_pgs; 1107fb2f18f8Sesaxe 1108fb2f18f8Sesaxe if (GROUP_SIZE(cmt_pgs) == 0) 1109fb2f18f8Sesaxe return (cp); /* nothing to do */ 1110fb2f18f8Sesaxe 1111fb2f18f8Sesaxe if (tp == curthread) 1112fb2f18f8Sesaxe self = 1; 11137c478bd9Sstevel@tonic-gate 11147c478bd9Sstevel@tonic-gate /* 1115fb2f18f8Sesaxe * Balance across siblings in the CPUs CMT lineage 11167c478bd9Sstevel@tonic-gate */ 11177c478bd9Sstevel@tonic-gate do { 1118fb2f18f8Sesaxe pg = GROUP_ACCESS(cmt_pgs, level); 11197c478bd9Sstevel@tonic-gate 1120*d129bde2Sesaxe siblings = pg->cmt_siblings; 1121*d129bde2Sesaxe nsiblings = GROUP_SIZE(siblings); /* self inclusive */ 1122*d129bde2Sesaxe if (nsiblings == 1) 1123*d129bde2Sesaxe continue; /* nobody to balance against */ 1124*d129bde2Sesaxe 1125fb2f18f8Sesaxe pg_nrun = pg->cmt_nrunning; 1126fb2f18f8Sesaxe if (self && 1127fb2f18f8Sesaxe bitset_in_set(&pg->cmt_cpus_actv_set, CPU->cpu_seqid)) 1128fb2f18f8Sesaxe pg_nrun--; /* Ignore curthread's effect */ 1129fb2f18f8Sesaxe 1130fb2f18f8Sesaxe hint = pg->cmt_hint; 11317c478bd9Sstevel@tonic-gate /* 1132fb2f18f8Sesaxe * Check for validity of the hint 1133fb2f18f8Sesaxe * It should reference a valid sibling 11347c478bd9Sstevel@tonic-gate */ 1135*d129bde2Sesaxe if (hint >= nsiblings) 1136fb2f18f8Sesaxe hint = pg->cmt_hint = 0; 1137fb2f18f8Sesaxe else 1138fb2f18f8Sesaxe pg->cmt_hint++; 11397c478bd9Sstevel@tonic-gate 11407c478bd9Sstevel@tonic-gate /* 1141fb2f18f8Sesaxe * Find a balancing candidate from among our siblings 1142fb2f18f8Sesaxe * "hint" is a hint for where to start looking 11437c478bd9Sstevel@tonic-gate */ 1144fb2f18f8Sesaxe i = hint; 1145fb2f18f8Sesaxe do { 1146*d129bde2Sesaxe ASSERT(i < nsiblings); 1147fb2f18f8Sesaxe pg_tmp = GROUP_ACCESS(siblings, i); 1148fb2f18f8Sesaxe 1149fb2f18f8Sesaxe /* 1150fb2f18f8Sesaxe * The candidate must not be us, and must 1151fb2f18f8Sesaxe * have some CPU resources in the thread's 1152fb2f18f8Sesaxe * partition 1153fb2f18f8Sesaxe */ 1154fb2f18f8Sesaxe if (pg_tmp != pg && 1155fb2f18f8Sesaxe bitset_in_set(&tp->t_cpupart->cp_cmt_pgs, 1156fb2f18f8Sesaxe ((pg_t *)pg_tmp)->pg_id)) { 1157fb2f18f8Sesaxe tpg = pg_tmp; 11587c478bd9Sstevel@tonic-gate break; 1159fb2f18f8Sesaxe } 11607c478bd9Sstevel@tonic-gate 1161*d129bde2Sesaxe if (++i >= nsiblings) 1162fb2f18f8Sesaxe i = 0; 1163fb2f18f8Sesaxe } while (i != hint); 1164fb2f18f8Sesaxe 1165fb2f18f8Sesaxe if (!tpg) 1166fb2f18f8Sesaxe continue; /* no candidates at this level */ 1167fb2f18f8Sesaxe 1168fb2f18f8Sesaxe /* 1169fb2f18f8Sesaxe * Check if the balancing target is underloaded 1170fb2f18f8Sesaxe * Decide to balance if the target is running fewer 1171fb2f18f8Sesaxe * threads, or if it's running the same number of threads 1172fb2f18f8Sesaxe * with more online CPUs 1173fb2f18f8Sesaxe */ 1174fb2f18f8Sesaxe tpg_nrun = tpg->cmt_nrunning; 1175fb2f18f8Sesaxe if (pg_nrun > tpg_nrun || 1176fb2f18f8Sesaxe (pg_nrun == tpg_nrun && 1177fb2f18f8Sesaxe (GROUP_SIZE(&tpg->cmt_cpus_actv) > 1178fb2f18f8Sesaxe GROUP_SIZE(&pg->cmt_cpus_actv)))) { 1179fb2f18f8Sesaxe break; 1180fb2f18f8Sesaxe } 1181fb2f18f8Sesaxe tpg = NULL; 1182fb2f18f8Sesaxe } while (++level < GROUP_SIZE(cmt_pgs)); 1183fb2f18f8Sesaxe 1184fb2f18f8Sesaxe 1185fb2f18f8Sesaxe if (tpg) { 1186fb2f18f8Sesaxe /* 1187fb2f18f8Sesaxe * Select an idle CPU from the target PG 1188fb2f18f8Sesaxe */ 1189fb2f18f8Sesaxe for (cpu = 0; cpu < GROUP_SIZE(&tpg->cmt_cpus_actv); cpu++) { 1190fb2f18f8Sesaxe newcp = GROUP_ACCESS(&tpg->cmt_cpus_actv, cpu); 1191fb2f18f8Sesaxe if (newcp->cpu_part == tp->t_cpupart && 1192fb2f18f8Sesaxe newcp->cpu_dispatch_pri == -1) { 1193fb2f18f8Sesaxe cp = newcp; 1194fb2f18f8Sesaxe break; 1195fb2f18f8Sesaxe } 1196fb2f18f8Sesaxe } 1197fb2f18f8Sesaxe } 1198fb2f18f8Sesaxe 11997c478bd9Sstevel@tonic-gate return (cp); 12007c478bd9Sstevel@tonic-gate } 12017c478bd9Sstevel@tonic-gate 12027c478bd9Sstevel@tonic-gate /* 12037c478bd9Sstevel@tonic-gate * setbackdq() keeps runqs balanced such that the difference in length 12047c478bd9Sstevel@tonic-gate * between the chosen runq and the next one is no more than RUNQ_MAX_DIFF. 12057c478bd9Sstevel@tonic-gate * For threads with priorities below RUNQ_MATCH_PRI levels, the runq's lengths 12067c478bd9Sstevel@tonic-gate * must match. When per-thread TS_RUNQMATCH flag is set, setbackdq() will 12077c478bd9Sstevel@tonic-gate * try to keep runqs perfectly balanced regardless of the thread priority. 12087c478bd9Sstevel@tonic-gate */ 12097c478bd9Sstevel@tonic-gate #define RUNQ_MATCH_PRI 16 /* pri below which queue lengths must match */ 12107c478bd9Sstevel@tonic-gate #define RUNQ_MAX_DIFF 2 /* maximum runq length difference */ 12117c478bd9Sstevel@tonic-gate #define RUNQ_LEN(cp, pri) ((cp)->cpu_disp->disp_q[pri].dq_sruncnt) 12127c478bd9Sstevel@tonic-gate 12137c478bd9Sstevel@tonic-gate /* 12147c478bd9Sstevel@tonic-gate * Put the specified thread on the back of the dispatcher 12157c478bd9Sstevel@tonic-gate * queue corresponding to its current priority. 12167c478bd9Sstevel@tonic-gate * 12177c478bd9Sstevel@tonic-gate * Called with the thread in transition, onproc or stopped state 12187c478bd9Sstevel@tonic-gate * and locked (transition implies locked) and at high spl. 12197c478bd9Sstevel@tonic-gate * Returns with the thread in TS_RUN state and still locked. 12207c478bd9Sstevel@tonic-gate */ 12217c478bd9Sstevel@tonic-gate void 12227c478bd9Sstevel@tonic-gate setbackdq(kthread_t *tp) 12237c478bd9Sstevel@tonic-gate { 12247c478bd9Sstevel@tonic-gate dispq_t *dq; 12257c478bd9Sstevel@tonic-gate disp_t *dp; 12267c478bd9Sstevel@tonic-gate cpu_t *cp; 12277c478bd9Sstevel@tonic-gate pri_t tpri; 12287c478bd9Sstevel@tonic-gate int bound; 12297c478bd9Sstevel@tonic-gate 12307c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 12317c478bd9Sstevel@tonic-gate ASSERT((tp->t_schedflag & TS_ALLSTART) == 0); 12327c478bd9Sstevel@tonic-gate ASSERT(!thread_on_queue(tp)); /* make sure tp isn't on a runq */ 12337c478bd9Sstevel@tonic-gate 12347c478bd9Sstevel@tonic-gate /* 12357c478bd9Sstevel@tonic-gate * If thread is "swapped" or on the swap queue don't 12367c478bd9Sstevel@tonic-gate * queue it, but wake sched. 12377c478bd9Sstevel@tonic-gate */ 12387c478bd9Sstevel@tonic-gate if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) { 12397c478bd9Sstevel@tonic-gate disp_swapped_setrun(tp); 12407c478bd9Sstevel@tonic-gate return; 12417c478bd9Sstevel@tonic-gate } 12427c478bd9Sstevel@tonic-gate 12437c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 12447c478bd9Sstevel@tonic-gate if (ncpus == 1) 12457c478bd9Sstevel@tonic-gate cp = tp->t_cpu; 12467c478bd9Sstevel@tonic-gate else if (!tp->t_bound_cpu && !tp->t_weakbound_cpu) { 12477c478bd9Sstevel@tonic-gate if (tpri >= kpqpri) { 12487c478bd9Sstevel@tonic-gate setkpdq(tp, SETKP_BACK); 12497c478bd9Sstevel@tonic-gate return; 12507c478bd9Sstevel@tonic-gate } 12517c478bd9Sstevel@tonic-gate /* 12527c478bd9Sstevel@tonic-gate * Let cpu_choose suggest a CPU. 12537c478bd9Sstevel@tonic-gate */ 12547c478bd9Sstevel@tonic-gate cp = cpu_choose(tp, tpri); 12557c478bd9Sstevel@tonic-gate 12567c478bd9Sstevel@tonic-gate if (tp->t_cpupart == cp->cpu_part) { 12577c478bd9Sstevel@tonic-gate int qlen; 12587c478bd9Sstevel@tonic-gate 12597c478bd9Sstevel@tonic-gate /* 1260fb2f18f8Sesaxe * Perform any CMT load balancing 12617c478bd9Sstevel@tonic-gate */ 1262fb2f18f8Sesaxe cp = cmt_balance(tp, cp); 12637c478bd9Sstevel@tonic-gate 12647c478bd9Sstevel@tonic-gate /* 12657c478bd9Sstevel@tonic-gate * Balance across the run queues 12667c478bd9Sstevel@tonic-gate */ 12677c478bd9Sstevel@tonic-gate qlen = RUNQ_LEN(cp, tpri); 12687c478bd9Sstevel@tonic-gate if (tpri >= RUNQ_MATCH_PRI && 12697c478bd9Sstevel@tonic-gate !(tp->t_schedflag & TS_RUNQMATCH)) 12707c478bd9Sstevel@tonic-gate qlen -= RUNQ_MAX_DIFF; 12717c478bd9Sstevel@tonic-gate if (qlen > 0) { 1272685679f7Sakolb cpu_t *newcp; 12737c478bd9Sstevel@tonic-gate 1274685679f7Sakolb if (tp->t_lpl->lpl_lgrpid == LGRP_ROOTID) { 1275685679f7Sakolb newcp = cp->cpu_next_part; 1276685679f7Sakolb } else if ((newcp = cp->cpu_next_lpl) == cp) { 1277685679f7Sakolb newcp = cp->cpu_next_part; 12787c478bd9Sstevel@tonic-gate } 1279685679f7Sakolb 1280685679f7Sakolb if (RUNQ_LEN(newcp, tpri) < qlen) { 1281685679f7Sakolb DTRACE_PROBE3(runq__balance, 1282685679f7Sakolb kthread_t *, tp, 1283685679f7Sakolb cpu_t *, cp, cpu_t *, newcp); 1284685679f7Sakolb cp = newcp; 1285685679f7Sakolb } 12867c478bd9Sstevel@tonic-gate } 12877c478bd9Sstevel@tonic-gate } else { 12887c478bd9Sstevel@tonic-gate /* 12897c478bd9Sstevel@tonic-gate * Migrate to a cpu in the new partition. 12907c478bd9Sstevel@tonic-gate */ 12917c478bd9Sstevel@tonic-gate cp = disp_lowpri_cpu(tp->t_cpupart->cp_cpulist, 12927c478bd9Sstevel@tonic-gate tp->t_lpl, tp->t_pri, NULL); 12937c478bd9Sstevel@tonic-gate } 12947c478bd9Sstevel@tonic-gate bound = 0; 12957c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0); 12967c478bd9Sstevel@tonic-gate } else { 12977c478bd9Sstevel@tonic-gate /* 12987c478bd9Sstevel@tonic-gate * It is possible that t_weakbound_cpu != t_bound_cpu (for 12997c478bd9Sstevel@tonic-gate * a short time until weak binding that existed when the 13007c478bd9Sstevel@tonic-gate * strong binding was established has dropped) so we must 13017c478bd9Sstevel@tonic-gate * favour weak binding over strong. 13027c478bd9Sstevel@tonic-gate */ 13037c478bd9Sstevel@tonic-gate cp = tp->t_weakbound_cpu ? 13047c478bd9Sstevel@tonic-gate tp->t_weakbound_cpu : tp->t_bound_cpu; 13057c478bd9Sstevel@tonic-gate bound = 1; 13067c478bd9Sstevel@tonic-gate } 1307f2bd4627Sjohansen /* 1308f2bd4627Sjohansen * A thread that is ONPROC may be temporarily placed on the run queue 1309f2bd4627Sjohansen * but then chosen to run again by disp. If the thread we're placing on 1310f2bd4627Sjohansen * the queue is in TS_ONPROC state, don't set its t_waitrq until a 1311f2bd4627Sjohansen * replacement process is actually scheduled in swtch(). In this 1312f2bd4627Sjohansen * situation, curthread is the only thread that could be in the ONPROC 1313f2bd4627Sjohansen * state. 1314f2bd4627Sjohansen */ 1315f2bd4627Sjohansen if ((tp != curthread) && (tp->t_waitrq == 0)) { 1316f2bd4627Sjohansen hrtime_t curtime; 1317f2bd4627Sjohansen 1318f2bd4627Sjohansen curtime = gethrtime_unscaled(); 1319f2bd4627Sjohansen (void) cpu_update_pct(tp, curtime); 1320f2bd4627Sjohansen tp->t_waitrq = curtime; 1321f2bd4627Sjohansen } else { 1322f2bd4627Sjohansen (void) cpu_update_pct(tp, gethrtime_unscaled()); 1323f2bd4627Sjohansen } 1324f2bd4627Sjohansen 13257c478bd9Sstevel@tonic-gate dp = cp->cpu_disp; 13267c478bd9Sstevel@tonic-gate disp_lock_enter_high(&dp->disp_lock); 13277c478bd9Sstevel@tonic-gate 13287c478bd9Sstevel@tonic-gate DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, 0); 13297c478bd9Sstevel@tonic-gate TRACE_3(TR_FAC_DISP, TR_BACKQ, "setbackdq:pri %d cpu %p tid %p", 13307c478bd9Sstevel@tonic-gate tpri, cp, tp); 13317c478bd9Sstevel@tonic-gate 13327c478bd9Sstevel@tonic-gate #ifndef NPROBE 13337c478bd9Sstevel@tonic-gate /* Kernel probe */ 13347c478bd9Sstevel@tonic-gate if (tnf_tracing_active) 13357c478bd9Sstevel@tonic-gate tnf_thread_queue(tp, cp, tpri); 13367c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 13377c478bd9Sstevel@tonic-gate 13387c478bd9Sstevel@tonic-gate ASSERT(tpri >= 0 && tpri < dp->disp_npri); 13397c478bd9Sstevel@tonic-gate 13407c478bd9Sstevel@tonic-gate THREAD_RUN(tp, &dp->disp_lock); /* set t_state to TS_RUN */ 13417c478bd9Sstevel@tonic-gate tp->t_disp_queue = dp; 13427c478bd9Sstevel@tonic-gate tp->t_link = NULL; 13437c478bd9Sstevel@tonic-gate 13447c478bd9Sstevel@tonic-gate dq = &dp->disp_q[tpri]; 13457c478bd9Sstevel@tonic-gate dp->disp_nrunnable++; 1346685679f7Sakolb if (!bound) 1347685679f7Sakolb dp->disp_steal = 0; 13487c478bd9Sstevel@tonic-gate membar_enter(); 13497c478bd9Sstevel@tonic-gate 13507c478bd9Sstevel@tonic-gate if (dq->dq_sruncnt++ != 0) { 13517c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first != NULL); 13527c478bd9Sstevel@tonic-gate dq->dq_last->t_link = tp; 13537c478bd9Sstevel@tonic-gate dq->dq_last = tp; 13547c478bd9Sstevel@tonic-gate } else { 13557c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first == NULL); 13567c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL); 13577c478bd9Sstevel@tonic-gate dq->dq_first = dq->dq_last = tp; 13587c478bd9Sstevel@tonic-gate BT_SET(dp->disp_qactmap, tpri); 13597c478bd9Sstevel@tonic-gate if (tpri > dp->disp_maxrunpri) { 13607c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = tpri; 13617c478bd9Sstevel@tonic-gate membar_enter(); 13627c478bd9Sstevel@tonic-gate cpu_resched(cp, tpri); 13637c478bd9Sstevel@tonic-gate } 13647c478bd9Sstevel@tonic-gate } 13657c478bd9Sstevel@tonic-gate 13667c478bd9Sstevel@tonic-gate if (!bound && tpri > dp->disp_max_unbound_pri) { 13677c478bd9Sstevel@tonic-gate if (tp == curthread && dp->disp_max_unbound_pri == -1 && 13687c478bd9Sstevel@tonic-gate cp == CPU) { 13697c478bd9Sstevel@tonic-gate /* 13707c478bd9Sstevel@tonic-gate * If there are no other unbound threads on the 13717c478bd9Sstevel@tonic-gate * run queue, don't allow other CPUs to steal 13727c478bd9Sstevel@tonic-gate * this thread while we are in the middle of a 13737c478bd9Sstevel@tonic-gate * context switch. We may just switch to it 13747c478bd9Sstevel@tonic-gate * again right away. CPU_DISP_DONTSTEAL is cleared 13757c478bd9Sstevel@tonic-gate * in swtch and swtch_to. 13767c478bd9Sstevel@tonic-gate */ 13777c478bd9Sstevel@tonic-gate cp->cpu_disp_flags |= CPU_DISP_DONTSTEAL; 13787c478bd9Sstevel@tonic-gate } 13797c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = tpri; 13807c478bd9Sstevel@tonic-gate } 13817c478bd9Sstevel@tonic-gate (*disp_enq_thread)(cp, bound); 13827c478bd9Sstevel@tonic-gate } 13837c478bd9Sstevel@tonic-gate 13847c478bd9Sstevel@tonic-gate /* 13857c478bd9Sstevel@tonic-gate * Put the specified thread on the front of the dispatcher 13867c478bd9Sstevel@tonic-gate * queue corresponding to its current priority. 13877c478bd9Sstevel@tonic-gate * 13887c478bd9Sstevel@tonic-gate * Called with the thread in transition, onproc or stopped state 13897c478bd9Sstevel@tonic-gate * and locked (transition implies locked) and at high spl. 13907c478bd9Sstevel@tonic-gate * Returns with the thread in TS_RUN state and still locked. 13917c478bd9Sstevel@tonic-gate */ 13927c478bd9Sstevel@tonic-gate void 13937c478bd9Sstevel@tonic-gate setfrontdq(kthread_t *tp) 13947c478bd9Sstevel@tonic-gate { 13957c478bd9Sstevel@tonic-gate disp_t *dp; 13967c478bd9Sstevel@tonic-gate dispq_t *dq; 13977c478bd9Sstevel@tonic-gate cpu_t *cp; 13987c478bd9Sstevel@tonic-gate pri_t tpri; 13997c478bd9Sstevel@tonic-gate int bound; 14007c478bd9Sstevel@tonic-gate 14017c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 14027c478bd9Sstevel@tonic-gate ASSERT((tp->t_schedflag & TS_ALLSTART) == 0); 14037c478bd9Sstevel@tonic-gate ASSERT(!thread_on_queue(tp)); /* make sure tp isn't on a runq */ 14047c478bd9Sstevel@tonic-gate 14057c478bd9Sstevel@tonic-gate /* 14067c478bd9Sstevel@tonic-gate * If thread is "swapped" or on the swap queue don't 14077c478bd9Sstevel@tonic-gate * queue it, but wake sched. 14087c478bd9Sstevel@tonic-gate */ 14097c478bd9Sstevel@tonic-gate if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) { 14107c478bd9Sstevel@tonic-gate disp_swapped_setrun(tp); 14117c478bd9Sstevel@tonic-gate return; 14127c478bd9Sstevel@tonic-gate } 14137c478bd9Sstevel@tonic-gate 14147c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 14157c478bd9Sstevel@tonic-gate if (ncpus == 1) 14167c478bd9Sstevel@tonic-gate cp = tp->t_cpu; 14177c478bd9Sstevel@tonic-gate else if (!tp->t_bound_cpu && !tp->t_weakbound_cpu) { 14187c478bd9Sstevel@tonic-gate if (tpri >= kpqpri) { 14197c478bd9Sstevel@tonic-gate setkpdq(tp, SETKP_FRONT); 14207c478bd9Sstevel@tonic-gate return; 14217c478bd9Sstevel@tonic-gate } 14227c478bd9Sstevel@tonic-gate cp = tp->t_cpu; 14237c478bd9Sstevel@tonic-gate if (tp->t_cpupart == cp->cpu_part) { 14247c478bd9Sstevel@tonic-gate /* 14257c478bd9Sstevel@tonic-gate * If we are of higher or equal priority than 14267c478bd9Sstevel@tonic-gate * the highest priority runnable thread of 14277c478bd9Sstevel@tonic-gate * the current CPU, just pick this CPU. Otherwise 14287c478bd9Sstevel@tonic-gate * Let cpu_choose() select the CPU. If this cpu 14297c478bd9Sstevel@tonic-gate * is the target of an offline request then do not 14307c478bd9Sstevel@tonic-gate * pick it - a thread_nomigrate() on the in motion 14317c478bd9Sstevel@tonic-gate * cpu relies on this when it forces a preempt. 14327c478bd9Sstevel@tonic-gate */ 14337c478bd9Sstevel@tonic-gate if (tpri < cp->cpu_disp->disp_maxrunpri || 14347c478bd9Sstevel@tonic-gate cp == cpu_inmotion) 14357c478bd9Sstevel@tonic-gate cp = cpu_choose(tp, tpri); 14367c478bd9Sstevel@tonic-gate } else { 14377c478bd9Sstevel@tonic-gate /* 14387c478bd9Sstevel@tonic-gate * Migrate to a cpu in the new partition. 14397c478bd9Sstevel@tonic-gate */ 14407c478bd9Sstevel@tonic-gate cp = disp_lowpri_cpu(tp->t_cpupart->cp_cpulist, 14417c478bd9Sstevel@tonic-gate tp->t_lpl, tp->t_pri, NULL); 14427c478bd9Sstevel@tonic-gate } 14437c478bd9Sstevel@tonic-gate bound = 0; 14447c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0); 14457c478bd9Sstevel@tonic-gate } else { 14467c478bd9Sstevel@tonic-gate /* 14477c478bd9Sstevel@tonic-gate * It is possible that t_weakbound_cpu != t_bound_cpu (for 14487c478bd9Sstevel@tonic-gate * a short time until weak binding that existed when the 14497c478bd9Sstevel@tonic-gate * strong binding was established has dropped) so we must 14507c478bd9Sstevel@tonic-gate * favour weak binding over strong. 14517c478bd9Sstevel@tonic-gate */ 14527c478bd9Sstevel@tonic-gate cp = tp->t_weakbound_cpu ? 14537c478bd9Sstevel@tonic-gate tp->t_weakbound_cpu : tp->t_bound_cpu; 14547c478bd9Sstevel@tonic-gate bound = 1; 14557c478bd9Sstevel@tonic-gate } 1456f2bd4627Sjohansen 1457f2bd4627Sjohansen /* 1458f2bd4627Sjohansen * A thread that is ONPROC may be temporarily placed on the run queue 1459f2bd4627Sjohansen * but then chosen to run again by disp. If the thread we're placing on 1460f2bd4627Sjohansen * the queue is in TS_ONPROC state, don't set its t_waitrq until a 1461f2bd4627Sjohansen * replacement process is actually scheduled in swtch(). In this 1462f2bd4627Sjohansen * situation, curthread is the only thread that could be in the ONPROC 1463f2bd4627Sjohansen * state. 1464f2bd4627Sjohansen */ 1465f2bd4627Sjohansen if ((tp != curthread) && (tp->t_waitrq == 0)) { 1466f2bd4627Sjohansen hrtime_t curtime; 1467f2bd4627Sjohansen 1468f2bd4627Sjohansen curtime = gethrtime_unscaled(); 1469f2bd4627Sjohansen (void) cpu_update_pct(tp, curtime); 1470f2bd4627Sjohansen tp->t_waitrq = curtime; 1471f2bd4627Sjohansen } else { 1472f2bd4627Sjohansen (void) cpu_update_pct(tp, gethrtime_unscaled()); 1473f2bd4627Sjohansen } 1474f2bd4627Sjohansen 14757c478bd9Sstevel@tonic-gate dp = cp->cpu_disp; 14767c478bd9Sstevel@tonic-gate disp_lock_enter_high(&dp->disp_lock); 14777c478bd9Sstevel@tonic-gate 14787c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_DISP, TR_FRONTQ, "frontq:pri %d tid %p", tpri, tp); 14797c478bd9Sstevel@tonic-gate DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, 1); 14807c478bd9Sstevel@tonic-gate 14817c478bd9Sstevel@tonic-gate #ifndef NPROBE 14827c478bd9Sstevel@tonic-gate /* Kernel probe */ 14837c478bd9Sstevel@tonic-gate if (tnf_tracing_active) 14847c478bd9Sstevel@tonic-gate tnf_thread_queue(tp, cp, tpri); 14857c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 14867c478bd9Sstevel@tonic-gate 14877c478bd9Sstevel@tonic-gate ASSERT(tpri >= 0 && tpri < dp->disp_npri); 14887c478bd9Sstevel@tonic-gate 14897c478bd9Sstevel@tonic-gate THREAD_RUN(tp, &dp->disp_lock); /* set TS_RUN state and lock */ 14907c478bd9Sstevel@tonic-gate tp->t_disp_queue = dp; 14917c478bd9Sstevel@tonic-gate 14927c478bd9Sstevel@tonic-gate dq = &dp->disp_q[tpri]; 14937c478bd9Sstevel@tonic-gate dp->disp_nrunnable++; 1494685679f7Sakolb if (!bound) 1495685679f7Sakolb dp->disp_steal = 0; 14967c478bd9Sstevel@tonic-gate membar_enter(); 14977c478bd9Sstevel@tonic-gate 14987c478bd9Sstevel@tonic-gate if (dq->dq_sruncnt++ != 0) { 14997c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last != NULL); 15007c478bd9Sstevel@tonic-gate tp->t_link = dq->dq_first; 15017c478bd9Sstevel@tonic-gate dq->dq_first = tp; 15027c478bd9Sstevel@tonic-gate } else { 15037c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL); 15047c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first == NULL); 15057c478bd9Sstevel@tonic-gate tp->t_link = NULL; 15067c478bd9Sstevel@tonic-gate dq->dq_first = dq->dq_last = tp; 15077c478bd9Sstevel@tonic-gate BT_SET(dp->disp_qactmap, tpri); 15087c478bd9Sstevel@tonic-gate if (tpri > dp->disp_maxrunpri) { 15097c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = tpri; 15107c478bd9Sstevel@tonic-gate membar_enter(); 15117c478bd9Sstevel@tonic-gate cpu_resched(cp, tpri); 15127c478bd9Sstevel@tonic-gate } 15137c478bd9Sstevel@tonic-gate } 15147c478bd9Sstevel@tonic-gate 15157c478bd9Sstevel@tonic-gate if (!bound && tpri > dp->disp_max_unbound_pri) { 15167c478bd9Sstevel@tonic-gate if (tp == curthread && dp->disp_max_unbound_pri == -1 && 15177c478bd9Sstevel@tonic-gate cp == CPU) { 15187c478bd9Sstevel@tonic-gate /* 15197c478bd9Sstevel@tonic-gate * If there are no other unbound threads on the 15207c478bd9Sstevel@tonic-gate * run queue, don't allow other CPUs to steal 15217c478bd9Sstevel@tonic-gate * this thread while we are in the middle of a 15227c478bd9Sstevel@tonic-gate * context switch. We may just switch to it 15237c478bd9Sstevel@tonic-gate * again right away. CPU_DISP_DONTSTEAL is cleared 15247c478bd9Sstevel@tonic-gate * in swtch and swtch_to. 15257c478bd9Sstevel@tonic-gate */ 15267c478bd9Sstevel@tonic-gate cp->cpu_disp_flags |= CPU_DISP_DONTSTEAL; 15277c478bd9Sstevel@tonic-gate } 15287c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = tpri; 15297c478bd9Sstevel@tonic-gate } 15307c478bd9Sstevel@tonic-gate (*disp_enq_thread)(cp, bound); 15317c478bd9Sstevel@tonic-gate } 15327c478bd9Sstevel@tonic-gate 15337c478bd9Sstevel@tonic-gate /* 15347c478bd9Sstevel@tonic-gate * Put a high-priority unbound thread on the kp queue 15357c478bd9Sstevel@tonic-gate */ 15367c478bd9Sstevel@tonic-gate static void 15377c478bd9Sstevel@tonic-gate setkpdq(kthread_t *tp, int borf) 15387c478bd9Sstevel@tonic-gate { 15397c478bd9Sstevel@tonic-gate dispq_t *dq; 15407c478bd9Sstevel@tonic-gate disp_t *dp; 15417c478bd9Sstevel@tonic-gate cpu_t *cp; 15427c478bd9Sstevel@tonic-gate pri_t tpri; 15437c478bd9Sstevel@tonic-gate 15447c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 15457c478bd9Sstevel@tonic-gate 15467c478bd9Sstevel@tonic-gate dp = &tp->t_cpupart->cp_kp_queue; 15477c478bd9Sstevel@tonic-gate disp_lock_enter_high(&dp->disp_lock); 15487c478bd9Sstevel@tonic-gate 15497c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_DISP, TR_FRONTQ, "frontq:pri %d tid %p", tpri, tp); 15507c478bd9Sstevel@tonic-gate 15517c478bd9Sstevel@tonic-gate ASSERT(tpri >= 0 && tpri < dp->disp_npri); 15527c478bd9Sstevel@tonic-gate DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, borf); 15537c478bd9Sstevel@tonic-gate THREAD_RUN(tp, &dp->disp_lock); /* set t_state to TS_RUN */ 15547c478bd9Sstevel@tonic-gate tp->t_disp_queue = dp; 15557c478bd9Sstevel@tonic-gate dp->disp_nrunnable++; 15567c478bd9Sstevel@tonic-gate dq = &dp->disp_q[tpri]; 15577c478bd9Sstevel@tonic-gate 15587c478bd9Sstevel@tonic-gate if (dq->dq_sruncnt++ != 0) { 15597c478bd9Sstevel@tonic-gate if (borf == SETKP_BACK) { 15607c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first != NULL); 15617c478bd9Sstevel@tonic-gate tp->t_link = NULL; 15627c478bd9Sstevel@tonic-gate dq->dq_last->t_link = tp; 15637c478bd9Sstevel@tonic-gate dq->dq_last = tp; 15647c478bd9Sstevel@tonic-gate } else { 15657c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last != NULL); 15667c478bd9Sstevel@tonic-gate tp->t_link = dq->dq_first; 15677c478bd9Sstevel@tonic-gate dq->dq_first = tp; 15687c478bd9Sstevel@tonic-gate } 15697c478bd9Sstevel@tonic-gate } else { 15707c478bd9Sstevel@tonic-gate if (borf == SETKP_BACK) { 15717c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first == NULL); 15727c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL); 15737c478bd9Sstevel@tonic-gate dq->dq_first = dq->dq_last = tp; 15747c478bd9Sstevel@tonic-gate } else { 15757c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL); 15767c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first == NULL); 15777c478bd9Sstevel@tonic-gate tp->t_link = NULL; 15787c478bd9Sstevel@tonic-gate dq->dq_first = dq->dq_last = tp; 15797c478bd9Sstevel@tonic-gate } 15807c478bd9Sstevel@tonic-gate BT_SET(dp->disp_qactmap, tpri); 15817c478bd9Sstevel@tonic-gate if (tpri > dp->disp_max_unbound_pri) 15827c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = tpri; 15837c478bd9Sstevel@tonic-gate if (tpri > dp->disp_maxrunpri) { 15847c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = tpri; 15857c478bd9Sstevel@tonic-gate membar_enter(); 15867c478bd9Sstevel@tonic-gate } 15877c478bd9Sstevel@tonic-gate } 15887c478bd9Sstevel@tonic-gate 15897c478bd9Sstevel@tonic-gate cp = tp->t_cpu; 15907c478bd9Sstevel@tonic-gate if (tp->t_cpupart != cp->cpu_part) { 15917c478bd9Sstevel@tonic-gate /* migrate to a cpu in the new partition */ 15927c478bd9Sstevel@tonic-gate cp = tp->t_cpupart->cp_cpulist; 15937c478bd9Sstevel@tonic-gate } 15947c478bd9Sstevel@tonic-gate cp = disp_lowpri_cpu(cp, tp->t_lpl, tp->t_pri, NULL); 15957c478bd9Sstevel@tonic-gate disp_lock_enter_high(&cp->cpu_disp->disp_lock); 15967c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0); 15977c478bd9Sstevel@tonic-gate 15987c478bd9Sstevel@tonic-gate #ifndef NPROBE 15997c478bd9Sstevel@tonic-gate /* Kernel probe */ 16007c478bd9Sstevel@tonic-gate if (tnf_tracing_active) 16017c478bd9Sstevel@tonic-gate tnf_thread_queue(tp, cp, tpri); 16027c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 16037c478bd9Sstevel@tonic-gate 16047c478bd9Sstevel@tonic-gate if (cp->cpu_chosen_level < tpri) 16057c478bd9Sstevel@tonic-gate cp->cpu_chosen_level = tpri; 16067c478bd9Sstevel@tonic-gate cpu_resched(cp, tpri); 16077c478bd9Sstevel@tonic-gate disp_lock_exit_high(&cp->cpu_disp->disp_lock); 16087c478bd9Sstevel@tonic-gate (*disp_enq_thread)(cp, 0); 16097c478bd9Sstevel@tonic-gate } 16107c478bd9Sstevel@tonic-gate 16117c478bd9Sstevel@tonic-gate /* 16127c478bd9Sstevel@tonic-gate * Remove a thread from the dispatcher queue if it is on it. 16137c478bd9Sstevel@tonic-gate * It is not an error if it is not found but we return whether 16147c478bd9Sstevel@tonic-gate * or not it was found in case the caller wants to check. 16157c478bd9Sstevel@tonic-gate */ 16167c478bd9Sstevel@tonic-gate int 16177c478bd9Sstevel@tonic-gate dispdeq(kthread_t *tp) 16187c478bd9Sstevel@tonic-gate { 16197c478bd9Sstevel@tonic-gate disp_t *dp; 16207c478bd9Sstevel@tonic-gate dispq_t *dq; 16217c478bd9Sstevel@tonic-gate kthread_t *rp; 16227c478bd9Sstevel@tonic-gate kthread_t *trp; 16237c478bd9Sstevel@tonic-gate kthread_t **ptp; 16247c478bd9Sstevel@tonic-gate int tpri; 16257c478bd9Sstevel@tonic-gate 16267c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 16277c478bd9Sstevel@tonic-gate 16287c478bd9Sstevel@tonic-gate if (tp->t_state != TS_RUN) 16297c478bd9Sstevel@tonic-gate return (0); 16307c478bd9Sstevel@tonic-gate 16317c478bd9Sstevel@tonic-gate /* 16327c478bd9Sstevel@tonic-gate * The thread is "swapped" or is on the swap queue and 16337c478bd9Sstevel@tonic-gate * hence no longer on the run queue, so return true. 16347c478bd9Sstevel@tonic-gate */ 16357c478bd9Sstevel@tonic-gate if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) 16367c478bd9Sstevel@tonic-gate return (1); 16377c478bd9Sstevel@tonic-gate 16387c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 16397c478bd9Sstevel@tonic-gate dp = tp->t_disp_queue; 16407c478bd9Sstevel@tonic-gate ASSERT(tpri < dp->disp_npri); 16417c478bd9Sstevel@tonic-gate dq = &dp->disp_q[tpri]; 16427c478bd9Sstevel@tonic-gate ptp = &dq->dq_first; 16437c478bd9Sstevel@tonic-gate rp = *ptp; 16447c478bd9Sstevel@tonic-gate trp = NULL; 16457c478bd9Sstevel@tonic-gate 16467c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL || dq->dq_last->t_link == NULL); 16477c478bd9Sstevel@tonic-gate 16487c478bd9Sstevel@tonic-gate /* 16497c478bd9Sstevel@tonic-gate * Search for thread in queue. 16507c478bd9Sstevel@tonic-gate * Double links would simplify this at the expense of disp/setrun. 16517c478bd9Sstevel@tonic-gate */ 16527c478bd9Sstevel@tonic-gate while (rp != tp && rp != NULL) { 16537c478bd9Sstevel@tonic-gate trp = rp; 16547c478bd9Sstevel@tonic-gate ptp = &trp->t_link; 16557c478bd9Sstevel@tonic-gate rp = trp->t_link; 16567c478bd9Sstevel@tonic-gate } 16577c478bd9Sstevel@tonic-gate 16587c478bd9Sstevel@tonic-gate if (rp == NULL) { 16597c478bd9Sstevel@tonic-gate panic("dispdeq: thread not on queue"); 16607c478bd9Sstevel@tonic-gate } 16617c478bd9Sstevel@tonic-gate 16627c478bd9Sstevel@tonic-gate DTRACE_SCHED2(dequeue, kthread_t *, tp, disp_t *, dp); 16637c478bd9Sstevel@tonic-gate 16647c478bd9Sstevel@tonic-gate /* 16657c478bd9Sstevel@tonic-gate * Found it so remove it from queue. 16667c478bd9Sstevel@tonic-gate */ 16677c478bd9Sstevel@tonic-gate if ((*ptp = rp->t_link) == NULL) 16687c478bd9Sstevel@tonic-gate dq->dq_last = trp; 16697c478bd9Sstevel@tonic-gate 16707c478bd9Sstevel@tonic-gate dp->disp_nrunnable--; 16717c478bd9Sstevel@tonic-gate if (--dq->dq_sruncnt == 0) { 16727c478bd9Sstevel@tonic-gate dp->disp_qactmap[tpri >> BT_ULSHIFT] &= ~BT_BIW(tpri); 16737c478bd9Sstevel@tonic-gate if (dp->disp_nrunnable == 0) { 16747c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = -1; 16757c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = -1; 16767c478bd9Sstevel@tonic-gate } else if (tpri == dp->disp_maxrunpri) { 16777c478bd9Sstevel@tonic-gate int ipri; 16787c478bd9Sstevel@tonic-gate 16797c478bd9Sstevel@tonic-gate ipri = bt_gethighbit(dp->disp_qactmap, 16807c478bd9Sstevel@tonic-gate dp->disp_maxrunpri >> BT_ULSHIFT); 16817c478bd9Sstevel@tonic-gate if (ipri < dp->disp_max_unbound_pri) 16827c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = ipri; 16837c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = ipri; 16847c478bd9Sstevel@tonic-gate } 16857c478bd9Sstevel@tonic-gate } 16867c478bd9Sstevel@tonic-gate tp->t_link = NULL; 16877c478bd9Sstevel@tonic-gate THREAD_TRANSITION(tp); /* put in intermediate state */ 16887c478bd9Sstevel@tonic-gate return (1); 16897c478bd9Sstevel@tonic-gate } 16907c478bd9Sstevel@tonic-gate 16917c478bd9Sstevel@tonic-gate 16927c478bd9Sstevel@tonic-gate /* 16937c478bd9Sstevel@tonic-gate * dq_sruninc and dq_srundec are public functions for 16947c478bd9Sstevel@tonic-gate * incrementing/decrementing the sruncnts when a thread on 16957c478bd9Sstevel@tonic-gate * a dispatcher queue is made schedulable/unschedulable by 16967c478bd9Sstevel@tonic-gate * resetting the TS_LOAD flag. 16977c478bd9Sstevel@tonic-gate * 16987c478bd9Sstevel@tonic-gate * The caller MUST have the thread lock and therefore the dispatcher 16997c478bd9Sstevel@tonic-gate * queue lock so that the operation which changes 17007c478bd9Sstevel@tonic-gate * the flag, the operation that checks the status of the thread to 17017c478bd9Sstevel@tonic-gate * determine if it's on a disp queue AND the call to this function 17027c478bd9Sstevel@tonic-gate * are one atomic operation with respect to interrupts. 17037c478bd9Sstevel@tonic-gate */ 17047c478bd9Sstevel@tonic-gate 17057c478bd9Sstevel@tonic-gate /* 17067c478bd9Sstevel@tonic-gate * Called by sched AFTER TS_LOAD flag is set on a swapped, runnable thread. 17077c478bd9Sstevel@tonic-gate */ 17087c478bd9Sstevel@tonic-gate void 17097c478bd9Sstevel@tonic-gate dq_sruninc(kthread_t *t) 17107c478bd9Sstevel@tonic-gate { 17117c478bd9Sstevel@tonic-gate ASSERT(t->t_state == TS_RUN); 17127c478bd9Sstevel@tonic-gate ASSERT(t->t_schedflag & TS_LOAD); 17137c478bd9Sstevel@tonic-gate 17147c478bd9Sstevel@tonic-gate THREAD_TRANSITION(t); 17157c478bd9Sstevel@tonic-gate setfrontdq(t); 17167c478bd9Sstevel@tonic-gate } 17177c478bd9Sstevel@tonic-gate 17187c478bd9Sstevel@tonic-gate /* 17197c478bd9Sstevel@tonic-gate * See comment on calling conventions above. 17207c478bd9Sstevel@tonic-gate * Called by sched BEFORE TS_LOAD flag is cleared on a runnable thread. 17217c478bd9Sstevel@tonic-gate */ 17227c478bd9Sstevel@tonic-gate void 17237c478bd9Sstevel@tonic-gate dq_srundec(kthread_t *t) 17247c478bd9Sstevel@tonic-gate { 17257c478bd9Sstevel@tonic-gate ASSERT(t->t_schedflag & TS_LOAD); 17267c478bd9Sstevel@tonic-gate 17277c478bd9Sstevel@tonic-gate (void) dispdeq(t); 17287c478bd9Sstevel@tonic-gate disp_swapped_enq(t); 17297c478bd9Sstevel@tonic-gate } 17307c478bd9Sstevel@tonic-gate 17317c478bd9Sstevel@tonic-gate /* 17327c478bd9Sstevel@tonic-gate * Change the dispatcher lock of thread to the "swapped_lock" 17337c478bd9Sstevel@tonic-gate * and return with thread lock still held. 17347c478bd9Sstevel@tonic-gate * 17357c478bd9Sstevel@tonic-gate * Called with thread_lock held, in transition state, and at high spl. 17367c478bd9Sstevel@tonic-gate */ 17377c478bd9Sstevel@tonic-gate void 17387c478bd9Sstevel@tonic-gate disp_swapped_enq(kthread_t *tp) 17397c478bd9Sstevel@tonic-gate { 17407c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 17417c478bd9Sstevel@tonic-gate ASSERT(tp->t_schedflag & TS_LOAD); 17427c478bd9Sstevel@tonic-gate 17437c478bd9Sstevel@tonic-gate switch (tp->t_state) { 17447c478bd9Sstevel@tonic-gate case TS_RUN: 17457c478bd9Sstevel@tonic-gate disp_lock_enter_high(&swapped_lock); 17467c478bd9Sstevel@tonic-gate THREAD_SWAP(tp, &swapped_lock); /* set TS_RUN state and lock */ 17477c478bd9Sstevel@tonic-gate break; 17487c478bd9Sstevel@tonic-gate case TS_ONPROC: 17497c478bd9Sstevel@tonic-gate disp_lock_enter_high(&swapped_lock); 17507c478bd9Sstevel@tonic-gate THREAD_TRANSITION(tp); 17517c478bd9Sstevel@tonic-gate wake_sched_sec = 1; /* tell clock to wake sched */ 17527c478bd9Sstevel@tonic-gate THREAD_SWAP(tp, &swapped_lock); /* set TS_RUN state and lock */ 17537c478bd9Sstevel@tonic-gate break; 17547c478bd9Sstevel@tonic-gate default: 17557c478bd9Sstevel@tonic-gate panic("disp_swapped: tp: %p bad t_state", (void *)tp); 17567c478bd9Sstevel@tonic-gate } 17577c478bd9Sstevel@tonic-gate } 17587c478bd9Sstevel@tonic-gate 17597c478bd9Sstevel@tonic-gate /* 17607c478bd9Sstevel@tonic-gate * This routine is called by setbackdq/setfrontdq if the thread is 17617c478bd9Sstevel@tonic-gate * not loaded or loaded and on the swap queue. 17627c478bd9Sstevel@tonic-gate * 17637c478bd9Sstevel@tonic-gate * Thread state TS_SLEEP implies that a swapped thread 17647c478bd9Sstevel@tonic-gate * has been woken up and needs to be swapped in by the swapper. 17657c478bd9Sstevel@tonic-gate * 17667c478bd9Sstevel@tonic-gate * Thread state TS_RUN, it implies that the priority of a swapped 17677c478bd9Sstevel@tonic-gate * thread is being increased by scheduling class (e.g. ts_update). 17687c478bd9Sstevel@tonic-gate */ 17697c478bd9Sstevel@tonic-gate static void 17707c478bd9Sstevel@tonic-gate disp_swapped_setrun(kthread_t *tp) 17717c478bd9Sstevel@tonic-gate { 17727c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 17737c478bd9Sstevel@tonic-gate ASSERT((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD); 17747c478bd9Sstevel@tonic-gate 17757c478bd9Sstevel@tonic-gate switch (tp->t_state) { 17767c478bd9Sstevel@tonic-gate case TS_SLEEP: 17777c478bd9Sstevel@tonic-gate disp_lock_enter_high(&swapped_lock); 17787c478bd9Sstevel@tonic-gate /* 17797c478bd9Sstevel@tonic-gate * Wakeup sched immediately (i.e., next tick) if the 17807c478bd9Sstevel@tonic-gate * thread priority is above maxclsyspri. 17817c478bd9Sstevel@tonic-gate */ 17827c478bd9Sstevel@tonic-gate if (DISP_PRIO(tp) > maxclsyspri) 17837c478bd9Sstevel@tonic-gate wake_sched = 1; 17847c478bd9Sstevel@tonic-gate else 17857c478bd9Sstevel@tonic-gate wake_sched_sec = 1; 17867c478bd9Sstevel@tonic-gate THREAD_RUN(tp, &swapped_lock); /* set TS_RUN state and lock */ 17877c478bd9Sstevel@tonic-gate break; 17887c478bd9Sstevel@tonic-gate case TS_RUN: /* called from ts_update */ 17897c478bd9Sstevel@tonic-gate break; 17907c478bd9Sstevel@tonic-gate default: 17917c478bd9Sstevel@tonic-gate panic("disp_swapped_setrun: tp: %p bad t_state", tp); 17927c478bd9Sstevel@tonic-gate } 17937c478bd9Sstevel@tonic-gate } 17947c478bd9Sstevel@tonic-gate 17957c478bd9Sstevel@tonic-gate 17967c478bd9Sstevel@tonic-gate /* 17977c478bd9Sstevel@tonic-gate * Make a thread give up its processor. Find the processor on 17987c478bd9Sstevel@tonic-gate * which this thread is executing, and have that processor 17997c478bd9Sstevel@tonic-gate * preempt. 18007c478bd9Sstevel@tonic-gate */ 18017c478bd9Sstevel@tonic-gate void 18027c478bd9Sstevel@tonic-gate cpu_surrender(kthread_t *tp) 18037c478bd9Sstevel@tonic-gate { 18047c478bd9Sstevel@tonic-gate cpu_t *cpup; 18057c478bd9Sstevel@tonic-gate int max_pri; 18067c478bd9Sstevel@tonic-gate int max_run_pri; 18077c478bd9Sstevel@tonic-gate klwp_t *lwp; 18087c478bd9Sstevel@tonic-gate 18097c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 18107c478bd9Sstevel@tonic-gate 18117c478bd9Sstevel@tonic-gate if (tp->t_state != TS_ONPROC) 18127c478bd9Sstevel@tonic-gate return; 18137c478bd9Sstevel@tonic-gate cpup = tp->t_disp_queue->disp_cpu; /* CPU thread dispatched to */ 18147c478bd9Sstevel@tonic-gate max_pri = cpup->cpu_disp->disp_maxrunpri; /* best pri of that CPU */ 18157c478bd9Sstevel@tonic-gate max_run_pri = CP_MAXRUNPRI(cpup->cpu_part); 18167c478bd9Sstevel@tonic-gate if (max_pri < max_run_pri) 18177c478bd9Sstevel@tonic-gate max_pri = max_run_pri; 18187c478bd9Sstevel@tonic-gate 18197c478bd9Sstevel@tonic-gate cpup->cpu_runrun = 1; 18207c478bd9Sstevel@tonic-gate if (max_pri >= kpreemptpri && cpup->cpu_kprunrun == 0) { 18217c478bd9Sstevel@tonic-gate cpup->cpu_kprunrun = 1; 18227c478bd9Sstevel@tonic-gate } 18237c478bd9Sstevel@tonic-gate 18247c478bd9Sstevel@tonic-gate /* 18257c478bd9Sstevel@tonic-gate * Propagate cpu_runrun, and cpu_kprunrun to global visibility. 18267c478bd9Sstevel@tonic-gate */ 18277c478bd9Sstevel@tonic-gate membar_enter(); 18287c478bd9Sstevel@tonic-gate 18297c478bd9Sstevel@tonic-gate DTRACE_SCHED1(surrender, kthread_t *, tp); 18307c478bd9Sstevel@tonic-gate 18317c478bd9Sstevel@tonic-gate /* 18327c478bd9Sstevel@tonic-gate * Make the target thread take an excursion through trap() 18337c478bd9Sstevel@tonic-gate * to do preempt() (unless we're already in trap or post_syscall, 18347c478bd9Sstevel@tonic-gate * calling cpu_surrender via CL_TRAPRET). 18357c478bd9Sstevel@tonic-gate */ 18367c478bd9Sstevel@tonic-gate if (tp != curthread || (lwp = tp->t_lwp) == NULL || 18377c478bd9Sstevel@tonic-gate lwp->lwp_state != LWP_USER) { 18387c478bd9Sstevel@tonic-gate aston(tp); 18397c478bd9Sstevel@tonic-gate if (cpup != CPU) 18407c478bd9Sstevel@tonic-gate poke_cpu(cpup->cpu_id); 18417c478bd9Sstevel@tonic-gate } 18427c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_DISP, TR_CPU_SURRENDER, 18437c478bd9Sstevel@tonic-gate "cpu_surrender:tid %p cpu %p", tp, cpup); 18447c478bd9Sstevel@tonic-gate } 18457c478bd9Sstevel@tonic-gate 18467c478bd9Sstevel@tonic-gate 18477c478bd9Sstevel@tonic-gate /* 18487c478bd9Sstevel@tonic-gate * Commit to and ratify a scheduling decision 18497c478bd9Sstevel@tonic-gate */ 18507c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 18517c478bd9Sstevel@tonic-gate static kthread_t * 18527c478bd9Sstevel@tonic-gate disp_ratify(kthread_t *tp, disp_t *kpq) 18537c478bd9Sstevel@tonic-gate { 18547c478bd9Sstevel@tonic-gate pri_t tpri, maxpri; 18557c478bd9Sstevel@tonic-gate pri_t maxkpri; 18567c478bd9Sstevel@tonic-gate cpu_t *cpup; 18577c478bd9Sstevel@tonic-gate 18587c478bd9Sstevel@tonic-gate ASSERT(tp != NULL); 18597c478bd9Sstevel@tonic-gate /* 18607c478bd9Sstevel@tonic-gate * Commit to, then ratify scheduling decision 18617c478bd9Sstevel@tonic-gate */ 18627c478bd9Sstevel@tonic-gate cpup = CPU; 18637c478bd9Sstevel@tonic-gate if (cpup->cpu_runrun != 0) 18647c478bd9Sstevel@tonic-gate cpup->cpu_runrun = 0; 18657c478bd9Sstevel@tonic-gate if (cpup->cpu_kprunrun != 0) 18667c478bd9Sstevel@tonic-gate cpup->cpu_kprunrun = 0; 18677c478bd9Sstevel@tonic-gate if (cpup->cpu_chosen_level != -1) 18687c478bd9Sstevel@tonic-gate cpup->cpu_chosen_level = -1; 18697c478bd9Sstevel@tonic-gate membar_enter(); 18707c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 18717c478bd9Sstevel@tonic-gate maxpri = cpup->cpu_disp->disp_maxrunpri; 18727c478bd9Sstevel@tonic-gate maxkpri = kpq->disp_maxrunpri; 18737c478bd9Sstevel@tonic-gate if (maxpri < maxkpri) 18747c478bd9Sstevel@tonic-gate maxpri = maxkpri; 18757c478bd9Sstevel@tonic-gate if (tpri < maxpri) { 18767c478bd9Sstevel@tonic-gate /* 18777c478bd9Sstevel@tonic-gate * should have done better 18787c478bd9Sstevel@tonic-gate * put this one back and indicate to try again 18797c478bd9Sstevel@tonic-gate */ 18807c478bd9Sstevel@tonic-gate cpup->cpu_dispthread = curthread; /* fixup dispthread */ 18817c478bd9Sstevel@tonic-gate cpup->cpu_dispatch_pri = DISP_PRIO(curthread); 18827c478bd9Sstevel@tonic-gate thread_lock_high(tp); 18837c478bd9Sstevel@tonic-gate THREAD_TRANSITION(tp); 18847c478bd9Sstevel@tonic-gate setfrontdq(tp); 18857c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(tp); 18867c478bd9Sstevel@tonic-gate 18877c478bd9Sstevel@tonic-gate tp = NULL; 18887c478bd9Sstevel@tonic-gate } 18897c478bd9Sstevel@tonic-gate return (tp); 18907c478bd9Sstevel@tonic-gate } 18917c478bd9Sstevel@tonic-gate 18927c478bd9Sstevel@tonic-gate /* 18937c478bd9Sstevel@tonic-gate * See if there is any work on the dispatcher queue for other CPUs. 18947c478bd9Sstevel@tonic-gate * If there is, dequeue the best thread and return. 18957c478bd9Sstevel@tonic-gate */ 18967c478bd9Sstevel@tonic-gate static kthread_t * 18977c478bd9Sstevel@tonic-gate disp_getwork(cpu_t *cp) 18987c478bd9Sstevel@tonic-gate { 18997c478bd9Sstevel@tonic-gate cpu_t *ocp; /* other CPU */ 19007c478bd9Sstevel@tonic-gate cpu_t *ocp_start; 19017c478bd9Sstevel@tonic-gate cpu_t *tcp; /* target local CPU */ 19027c478bd9Sstevel@tonic-gate kthread_t *tp; 1903685679f7Sakolb kthread_t *retval = NULL; 19047c478bd9Sstevel@tonic-gate pri_t maxpri; 19057c478bd9Sstevel@tonic-gate disp_t *kpq; /* kp queue for this partition */ 19067c478bd9Sstevel@tonic-gate lpl_t *lpl, *lpl_leaf; 19077c478bd9Sstevel@tonic-gate int hint, leafidx; 1908685679f7Sakolb hrtime_t stealtime; 19097c478bd9Sstevel@tonic-gate 19107c478bd9Sstevel@tonic-gate maxpri = -1; 19117c478bd9Sstevel@tonic-gate tcp = NULL; 19127c478bd9Sstevel@tonic-gate 19137c478bd9Sstevel@tonic-gate kpq = &cp->cpu_part->cp_kp_queue; 19147c478bd9Sstevel@tonic-gate while (kpq->disp_maxrunpri >= 0) { 19157c478bd9Sstevel@tonic-gate /* 19167c478bd9Sstevel@tonic-gate * Try to take a thread from the kp_queue. 19177c478bd9Sstevel@tonic-gate */ 19187c478bd9Sstevel@tonic-gate tp = (disp_getbest(kpq)); 19197c478bd9Sstevel@tonic-gate if (tp) 19207c478bd9Sstevel@tonic-gate return (disp_ratify(tp, kpq)); 19217c478bd9Sstevel@tonic-gate } 19227c478bd9Sstevel@tonic-gate 1923ab761399Sesaxe kpreempt_disable(); /* protect the cpu_active list */ 19247c478bd9Sstevel@tonic-gate 19257c478bd9Sstevel@tonic-gate /* 19267c478bd9Sstevel@tonic-gate * Try to find something to do on another CPU's run queue. 19277c478bd9Sstevel@tonic-gate * Loop through all other CPUs looking for the one with the highest 19287c478bd9Sstevel@tonic-gate * priority unbound thread. 19297c478bd9Sstevel@tonic-gate * 19307c478bd9Sstevel@tonic-gate * On NUMA machines, the partition's CPUs are consulted in order of 19317c478bd9Sstevel@tonic-gate * distance from the current CPU. This way, the first available 19327c478bd9Sstevel@tonic-gate * work found is also the closest, and will suffer the least 19337c478bd9Sstevel@tonic-gate * from being migrated. 19347c478bd9Sstevel@tonic-gate */ 19357c478bd9Sstevel@tonic-gate lpl = lpl_leaf = cp->cpu_lpl; 19367c478bd9Sstevel@tonic-gate hint = leafidx = 0; 19377c478bd9Sstevel@tonic-gate 19387c478bd9Sstevel@tonic-gate /* 19397c478bd9Sstevel@tonic-gate * This loop traverses the lpl hierarchy. Higher level lpls represent 19407c478bd9Sstevel@tonic-gate * broader levels of locality 19417c478bd9Sstevel@tonic-gate */ 19427c478bd9Sstevel@tonic-gate do { 19437c478bd9Sstevel@tonic-gate /* This loop iterates over the lpl's leaves */ 19447c478bd9Sstevel@tonic-gate do { 19457c478bd9Sstevel@tonic-gate if (lpl_leaf != cp->cpu_lpl) 19467c478bd9Sstevel@tonic-gate ocp = lpl_leaf->lpl_cpus; 19477c478bd9Sstevel@tonic-gate else 19487c478bd9Sstevel@tonic-gate ocp = cp->cpu_next_lpl; 19497c478bd9Sstevel@tonic-gate 19507c478bd9Sstevel@tonic-gate /* This loop iterates over the CPUs in the leaf */ 19517c478bd9Sstevel@tonic-gate ocp_start = ocp; 19527c478bd9Sstevel@tonic-gate do { 19537c478bd9Sstevel@tonic-gate pri_t pri; 19547c478bd9Sstevel@tonic-gate 19557c478bd9Sstevel@tonic-gate ASSERT(CPU_ACTIVE(ocp)); 19567c478bd9Sstevel@tonic-gate 19577c478bd9Sstevel@tonic-gate /* 195839bac370Sesaxe * End our stroll around this lpl if: 19597c478bd9Sstevel@tonic-gate * 19607c478bd9Sstevel@tonic-gate * - Something became runnable on the local 196139bac370Sesaxe * queue...which also ends our stroll around 196239bac370Sesaxe * the partition. 19637c478bd9Sstevel@tonic-gate * 196439bac370Sesaxe * - We happen across another idle CPU. 196539bac370Sesaxe * Since it is patrolling the next portion 196639bac370Sesaxe * of the lpl's list (assuming it's not 196739bac370Sesaxe * halted), move to the next higher level 196839bac370Sesaxe * of locality. 19697c478bd9Sstevel@tonic-gate */ 197039bac370Sesaxe if (cp->cpu_disp->disp_nrunnable != 0) { 197139bac370Sesaxe kpreempt_enable(); 197239bac370Sesaxe return (NULL); 197339bac370Sesaxe } 19747c478bd9Sstevel@tonic-gate if (ocp->cpu_dispatch_pri == -1) { 19757c478bd9Sstevel@tonic-gate if (ocp->cpu_disp_flags & 19767c478bd9Sstevel@tonic-gate CPU_DISP_HALTED) 19777c478bd9Sstevel@tonic-gate continue; 197839bac370Sesaxe else 19797c478bd9Sstevel@tonic-gate break; 19807c478bd9Sstevel@tonic-gate } 19817c478bd9Sstevel@tonic-gate 19827c478bd9Sstevel@tonic-gate /* 19837c478bd9Sstevel@tonic-gate * If there's only one thread and the CPU 19847c478bd9Sstevel@tonic-gate * is in the middle of a context switch, 19857c478bd9Sstevel@tonic-gate * or it's currently running the idle thread, 19867c478bd9Sstevel@tonic-gate * don't steal it. 19877c478bd9Sstevel@tonic-gate */ 19887c478bd9Sstevel@tonic-gate if ((ocp->cpu_disp_flags & 19897c478bd9Sstevel@tonic-gate CPU_DISP_DONTSTEAL) && 19907c478bd9Sstevel@tonic-gate ocp->cpu_disp->disp_nrunnable == 1) 19917c478bd9Sstevel@tonic-gate continue; 19927c478bd9Sstevel@tonic-gate 19937c478bd9Sstevel@tonic-gate pri = ocp->cpu_disp->disp_max_unbound_pri; 19947c478bd9Sstevel@tonic-gate if (pri > maxpri) { 1995685679f7Sakolb /* 1996685679f7Sakolb * Don't steal threads that we attempted 1997fb2f18f8Sesaxe * to steal recently until they're ready 1998fb2f18f8Sesaxe * to be stolen again. 1999685679f7Sakolb */ 2000685679f7Sakolb stealtime = ocp->cpu_disp->disp_steal; 2001685679f7Sakolb if (stealtime == 0 || 2002685679f7Sakolb stealtime - gethrtime() <= 0) { 20037c478bd9Sstevel@tonic-gate maxpri = pri; 20047c478bd9Sstevel@tonic-gate tcp = ocp; 2005685679f7Sakolb } else { 2006685679f7Sakolb /* 2007685679f7Sakolb * Don't update tcp, just set 2008685679f7Sakolb * the retval to T_DONTSTEAL, so 2009685679f7Sakolb * that if no acceptable CPUs 2010685679f7Sakolb * are found the return value 2011685679f7Sakolb * will be T_DONTSTEAL rather 2012685679f7Sakolb * then NULL. 2013685679f7Sakolb */ 2014685679f7Sakolb retval = T_DONTSTEAL; 2015685679f7Sakolb } 20167c478bd9Sstevel@tonic-gate } 20177c478bd9Sstevel@tonic-gate } while ((ocp = ocp->cpu_next_lpl) != ocp_start); 20187c478bd9Sstevel@tonic-gate 20197c478bd9Sstevel@tonic-gate if ((lpl_leaf = lpl->lpl_rset[++leafidx]) == NULL) { 20207c478bd9Sstevel@tonic-gate leafidx = 0; 20217c478bd9Sstevel@tonic-gate lpl_leaf = lpl->lpl_rset[leafidx]; 20227c478bd9Sstevel@tonic-gate } 20237c478bd9Sstevel@tonic-gate } while (leafidx != hint); 20247c478bd9Sstevel@tonic-gate 20257c478bd9Sstevel@tonic-gate hint = leafidx = lpl->lpl_hint; 20267c478bd9Sstevel@tonic-gate if ((lpl = lpl->lpl_parent) != NULL) 20277c478bd9Sstevel@tonic-gate lpl_leaf = lpl->lpl_rset[hint]; 20287c478bd9Sstevel@tonic-gate } while (!tcp && lpl); 20297c478bd9Sstevel@tonic-gate 2030ab761399Sesaxe kpreempt_enable(); 20317c478bd9Sstevel@tonic-gate 20327c478bd9Sstevel@tonic-gate /* 20337c478bd9Sstevel@tonic-gate * If another queue looks good, and there is still nothing on 20347c478bd9Sstevel@tonic-gate * the local queue, try to transfer one or more threads 20357c478bd9Sstevel@tonic-gate * from it to our queue. 20367c478bd9Sstevel@tonic-gate */ 20377c478bd9Sstevel@tonic-gate if (tcp && cp->cpu_disp->disp_nrunnable == 0) { 2038685679f7Sakolb tp = disp_getbest(tcp->cpu_disp); 2039685679f7Sakolb if (tp == NULL || tp == T_DONTSTEAL) 2040685679f7Sakolb return (tp); 20417c478bd9Sstevel@tonic-gate return (disp_ratify(tp, kpq)); 20427c478bd9Sstevel@tonic-gate } 2043685679f7Sakolb return (retval); 20447c478bd9Sstevel@tonic-gate } 20457c478bd9Sstevel@tonic-gate 20467c478bd9Sstevel@tonic-gate 20477c478bd9Sstevel@tonic-gate /* 20487c478bd9Sstevel@tonic-gate * disp_fix_unbound_pri() 20497c478bd9Sstevel@tonic-gate * Determines the maximum priority of unbound threads on the queue. 20507c478bd9Sstevel@tonic-gate * The priority is kept for the queue, but is only increased, never 20517c478bd9Sstevel@tonic-gate * reduced unless some CPU is looking for something on that queue. 20527c478bd9Sstevel@tonic-gate * 20537c478bd9Sstevel@tonic-gate * The priority argument is the known upper limit. 20547c478bd9Sstevel@tonic-gate * 20557c478bd9Sstevel@tonic-gate * Perhaps this should be kept accurately, but that probably means 20567c478bd9Sstevel@tonic-gate * separate bitmaps for bound and unbound threads. Since only idled 20577c478bd9Sstevel@tonic-gate * CPUs will have to do this recalculation, it seems better this way. 20587c478bd9Sstevel@tonic-gate */ 20597c478bd9Sstevel@tonic-gate static void 20607c478bd9Sstevel@tonic-gate disp_fix_unbound_pri(disp_t *dp, pri_t pri) 20617c478bd9Sstevel@tonic-gate { 20627c478bd9Sstevel@tonic-gate kthread_t *tp; 20637c478bd9Sstevel@tonic-gate dispq_t *dq; 20647c478bd9Sstevel@tonic-gate ulong_t *dqactmap = dp->disp_qactmap; 20657c478bd9Sstevel@tonic-gate ulong_t mapword; 20667c478bd9Sstevel@tonic-gate int wx; 20677c478bd9Sstevel@tonic-gate 20687c478bd9Sstevel@tonic-gate ASSERT(DISP_LOCK_HELD(&dp->disp_lock)); 20697c478bd9Sstevel@tonic-gate 20707c478bd9Sstevel@tonic-gate ASSERT(pri >= 0); /* checked by caller */ 20717c478bd9Sstevel@tonic-gate 20727c478bd9Sstevel@tonic-gate /* 20737c478bd9Sstevel@tonic-gate * Start the search at the next lowest priority below the supplied 20747c478bd9Sstevel@tonic-gate * priority. This depends on the bitmap implementation. 20757c478bd9Sstevel@tonic-gate */ 20767c478bd9Sstevel@tonic-gate do { 20777c478bd9Sstevel@tonic-gate wx = pri >> BT_ULSHIFT; /* index of word in map */ 20787c478bd9Sstevel@tonic-gate 20797c478bd9Sstevel@tonic-gate /* 20807c478bd9Sstevel@tonic-gate * Form mask for all lower priorities in the word. 20817c478bd9Sstevel@tonic-gate */ 20827c478bd9Sstevel@tonic-gate mapword = dqactmap[wx] & (BT_BIW(pri) - 1); 20837c478bd9Sstevel@tonic-gate 20847c478bd9Sstevel@tonic-gate /* 20857c478bd9Sstevel@tonic-gate * Get next lower active priority. 20867c478bd9Sstevel@tonic-gate */ 20877c478bd9Sstevel@tonic-gate if (mapword != 0) { 20887c478bd9Sstevel@tonic-gate pri = (wx << BT_ULSHIFT) + highbit(mapword) - 1; 20897c478bd9Sstevel@tonic-gate } else if (wx > 0) { 20907c478bd9Sstevel@tonic-gate pri = bt_gethighbit(dqactmap, wx - 1); /* sign extend */ 20917c478bd9Sstevel@tonic-gate if (pri < 0) 20927c478bd9Sstevel@tonic-gate break; 20937c478bd9Sstevel@tonic-gate } else { 20947c478bd9Sstevel@tonic-gate pri = -1; 20957c478bd9Sstevel@tonic-gate break; 20967c478bd9Sstevel@tonic-gate } 20977c478bd9Sstevel@tonic-gate 20987c478bd9Sstevel@tonic-gate /* 20997c478bd9Sstevel@tonic-gate * Search the queue for unbound, runnable threads. 21007c478bd9Sstevel@tonic-gate */ 21017c478bd9Sstevel@tonic-gate dq = &dp->disp_q[pri]; 21027c478bd9Sstevel@tonic-gate tp = dq->dq_first; 21037c478bd9Sstevel@tonic-gate 21047c478bd9Sstevel@tonic-gate while (tp && (tp->t_bound_cpu || tp->t_weakbound_cpu)) { 21057c478bd9Sstevel@tonic-gate tp = tp->t_link; 21067c478bd9Sstevel@tonic-gate } 21077c478bd9Sstevel@tonic-gate 21087c478bd9Sstevel@tonic-gate /* 21097c478bd9Sstevel@tonic-gate * If a thread was found, set the priority and return. 21107c478bd9Sstevel@tonic-gate */ 21117c478bd9Sstevel@tonic-gate } while (tp == NULL); 21127c478bd9Sstevel@tonic-gate 21137c478bd9Sstevel@tonic-gate /* 21147c478bd9Sstevel@tonic-gate * pri holds the maximum unbound thread priority or -1. 21157c478bd9Sstevel@tonic-gate */ 21167c478bd9Sstevel@tonic-gate if (dp->disp_max_unbound_pri != pri) 21177c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = pri; 21187c478bd9Sstevel@tonic-gate } 21197c478bd9Sstevel@tonic-gate 21207c478bd9Sstevel@tonic-gate /* 21217c478bd9Sstevel@tonic-gate * disp_adjust_unbound_pri() - thread is becoming unbound, so we should 21227c478bd9Sstevel@tonic-gate * check if the CPU to which is was previously bound should have 21237c478bd9Sstevel@tonic-gate * its disp_max_unbound_pri increased. 21247c478bd9Sstevel@tonic-gate */ 21257c478bd9Sstevel@tonic-gate void 21267c478bd9Sstevel@tonic-gate disp_adjust_unbound_pri(kthread_t *tp) 21277c478bd9Sstevel@tonic-gate { 21287c478bd9Sstevel@tonic-gate disp_t *dp; 21297c478bd9Sstevel@tonic-gate pri_t tpri; 21307c478bd9Sstevel@tonic-gate 21317c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp)); 21327c478bd9Sstevel@tonic-gate 21337c478bd9Sstevel@tonic-gate /* 21347c478bd9Sstevel@tonic-gate * Don't do anything if the thread is not bound, or 21357c478bd9Sstevel@tonic-gate * currently not runnable or swapped out. 21367c478bd9Sstevel@tonic-gate */ 21377c478bd9Sstevel@tonic-gate if (tp->t_bound_cpu == NULL || 21387c478bd9Sstevel@tonic-gate tp->t_state != TS_RUN || 21397c478bd9Sstevel@tonic-gate tp->t_schedflag & TS_ON_SWAPQ) 21407c478bd9Sstevel@tonic-gate return; 21417c478bd9Sstevel@tonic-gate 21427c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp); 21437c478bd9Sstevel@tonic-gate dp = tp->t_bound_cpu->cpu_disp; 21447c478bd9Sstevel@tonic-gate ASSERT(tpri >= 0 && tpri < dp->disp_npri); 21457c478bd9Sstevel@tonic-gate if (tpri > dp->disp_max_unbound_pri) 21467c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = tpri; 21477c478bd9Sstevel@tonic-gate } 21487c478bd9Sstevel@tonic-gate 21497c478bd9Sstevel@tonic-gate /* 2150685679f7Sakolb * disp_getbest() 2151685679f7Sakolb * De-queue the highest priority unbound runnable thread. 2152685679f7Sakolb * Returns with the thread unlocked and onproc but at splhigh (like disp()). 2153685679f7Sakolb * Returns NULL if nothing found. 2154685679f7Sakolb * Returns T_DONTSTEAL if the thread was not stealable. 2155685679f7Sakolb * so that the caller will try again later. 21567c478bd9Sstevel@tonic-gate * 2157685679f7Sakolb * Passed a pointer to a dispatch queue not associated with this CPU, and 2158685679f7Sakolb * its type. 21597c478bd9Sstevel@tonic-gate */ 21607c478bd9Sstevel@tonic-gate static kthread_t * 21617c478bd9Sstevel@tonic-gate disp_getbest(disp_t *dp) 21627c478bd9Sstevel@tonic-gate { 21637c478bd9Sstevel@tonic-gate kthread_t *tp; 21647c478bd9Sstevel@tonic-gate dispq_t *dq; 21657c478bd9Sstevel@tonic-gate pri_t pri; 2166685679f7Sakolb cpu_t *cp, *tcp; 2167685679f7Sakolb boolean_t allbound; 21687c478bd9Sstevel@tonic-gate 21697c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock); 21707c478bd9Sstevel@tonic-gate 21717c478bd9Sstevel@tonic-gate /* 21727c478bd9Sstevel@tonic-gate * If there is nothing to run, or the CPU is in the middle of a 21737c478bd9Sstevel@tonic-gate * context switch of the only thread, return NULL. 21747c478bd9Sstevel@tonic-gate */ 2175685679f7Sakolb tcp = dp->disp_cpu; 2176685679f7Sakolb cp = CPU; 21777c478bd9Sstevel@tonic-gate pri = dp->disp_max_unbound_pri; 21787c478bd9Sstevel@tonic-gate if (pri == -1 || 2179685679f7Sakolb (tcp != NULL && (tcp->cpu_disp_flags & CPU_DISP_DONTSTEAL) && 2180685679f7Sakolb tcp->cpu_disp->disp_nrunnable == 1)) { 21817c478bd9Sstevel@tonic-gate disp_lock_exit_nopreempt(&dp->disp_lock); 21827c478bd9Sstevel@tonic-gate return (NULL); 21837c478bd9Sstevel@tonic-gate } 21847c478bd9Sstevel@tonic-gate 21857c478bd9Sstevel@tonic-gate dq = &dp->disp_q[pri]; 2186685679f7Sakolb 21877c478bd9Sstevel@tonic-gate 21887c478bd9Sstevel@tonic-gate /* 2189685679f7Sakolb * Assume that all threads are bound on this queue, and change it 2190685679f7Sakolb * later when we find out that it is not the case. 21917c478bd9Sstevel@tonic-gate */ 2192685679f7Sakolb allbound = B_TRUE; 2193685679f7Sakolb for (tp = dq->dq_first; tp != NULL; tp = tp->t_link) { 2194685679f7Sakolb hrtime_t now, nosteal, rqtime; 2195685679f7Sakolb 2196685679f7Sakolb /* 2197685679f7Sakolb * Skip over bound threads which could be here even 2198685679f7Sakolb * though disp_max_unbound_pri indicated this level. 2199685679f7Sakolb */ 2200685679f7Sakolb if (tp->t_bound_cpu || tp->t_weakbound_cpu) 2201685679f7Sakolb continue; 2202685679f7Sakolb 2203685679f7Sakolb /* 2204685679f7Sakolb * We've got some unbound threads on this queue, so turn 2205685679f7Sakolb * the allbound flag off now. 2206685679f7Sakolb */ 2207685679f7Sakolb allbound = B_FALSE; 2208685679f7Sakolb 2209685679f7Sakolb /* 2210685679f7Sakolb * The thread is a candidate for stealing from its run queue. We 2211685679f7Sakolb * don't want to steal threads that became runnable just a 2212685679f7Sakolb * moment ago. This improves CPU affinity for threads that get 2213685679f7Sakolb * preempted for short periods of time and go back on the run 2214685679f7Sakolb * queue. 2215685679f7Sakolb * 2216685679f7Sakolb * We want to let it stay on its run queue if it was only placed 2217685679f7Sakolb * there recently and it was running on the same CPU before that 2218685679f7Sakolb * to preserve its cache investment. For the thread to remain on 2219685679f7Sakolb * its run queue, ALL of the following conditions must be 2220685679f7Sakolb * satisfied: 2221685679f7Sakolb * 2222685679f7Sakolb * - the disp queue should not be the kernel preemption queue 2223685679f7Sakolb * - delayed idle stealing should not be disabled 2224685679f7Sakolb * - nosteal_nsec should be non-zero 2225685679f7Sakolb * - it should run with user priority 2226685679f7Sakolb * - it should be on the run queue of the CPU where it was 2227685679f7Sakolb * running before being placed on the run queue 2228685679f7Sakolb * - it should be the only thread on the run queue (to prevent 2229685679f7Sakolb * extra scheduling latency for other threads) 2230685679f7Sakolb * - it should sit on the run queue for less than per-chip 2231685679f7Sakolb * nosteal interval or global nosteal interval 2232685679f7Sakolb * - in case of CPUs with shared cache it should sit in a run 2233685679f7Sakolb * queue of a CPU from a different chip 2234685679f7Sakolb * 2235685679f7Sakolb * The checks are arranged so that the ones that are faster are 2236685679f7Sakolb * placed earlier. 2237685679f7Sakolb */ 2238685679f7Sakolb if (tcp == NULL || 2239685679f7Sakolb pri >= minclsyspri || 2240685679f7Sakolb tp->t_cpu != tcp) 2241685679f7Sakolb break; 2242685679f7Sakolb 2243685679f7Sakolb /* 2244fb2f18f8Sesaxe * Steal immediately if, due to CMT processor architecture 2245fb2f18f8Sesaxe * migraiton between cp and tcp would incur no performance 2246fb2f18f8Sesaxe * penalty. 2247685679f7Sakolb */ 2248fb2f18f8Sesaxe if (pg_cmt_can_migrate(cp, tcp)) 2249685679f7Sakolb break; 2250685679f7Sakolb 2251fb2f18f8Sesaxe nosteal = nosteal_nsec; 2252fb2f18f8Sesaxe if (nosteal == 0) 2253685679f7Sakolb break; 2254685679f7Sakolb 2255685679f7Sakolb /* 2256685679f7Sakolb * Calculate time spent sitting on run queue 2257685679f7Sakolb */ 2258685679f7Sakolb now = gethrtime_unscaled(); 2259685679f7Sakolb rqtime = now - tp->t_waitrq; 2260685679f7Sakolb scalehrtime(&rqtime); 2261685679f7Sakolb 2262685679f7Sakolb /* 2263685679f7Sakolb * Steal immediately if the time spent on this run queue is more 2264685679f7Sakolb * than allowed nosteal delay. 2265685679f7Sakolb * 2266685679f7Sakolb * Negative rqtime check is needed here to avoid infinite 2267685679f7Sakolb * stealing delays caused by unlikely but not impossible 2268685679f7Sakolb * drifts between CPU times on different CPUs. 2269685679f7Sakolb */ 2270685679f7Sakolb if (rqtime > nosteal || rqtime < 0) 2271685679f7Sakolb break; 2272685679f7Sakolb 2273685679f7Sakolb DTRACE_PROBE4(nosteal, kthread_t *, tp, 2274685679f7Sakolb cpu_t *, tcp, cpu_t *, cp, hrtime_t, rqtime); 2275685679f7Sakolb scalehrtime(&now); 2276685679f7Sakolb /* 2277685679f7Sakolb * Calculate when this thread becomes stealable 2278685679f7Sakolb */ 2279685679f7Sakolb now += (nosteal - rqtime); 2280685679f7Sakolb 2281685679f7Sakolb /* 2282685679f7Sakolb * Calculate time when some thread becomes stealable 2283685679f7Sakolb */ 2284685679f7Sakolb if (now < dp->disp_steal) 2285685679f7Sakolb dp->disp_steal = now; 22867c478bd9Sstevel@tonic-gate } 22877c478bd9Sstevel@tonic-gate 22887c478bd9Sstevel@tonic-gate /* 22897c478bd9Sstevel@tonic-gate * If there were no unbound threads on this queue, find the queue 2290685679f7Sakolb * where they are and then return later. The value of 2291685679f7Sakolb * disp_max_unbound_pri is not always accurate because it isn't 2292685679f7Sakolb * reduced until another idle CPU looks for work. 2293685679f7Sakolb */ 2294685679f7Sakolb if (allbound) 2295685679f7Sakolb disp_fix_unbound_pri(dp, pri); 2296685679f7Sakolb 2297685679f7Sakolb /* 2298685679f7Sakolb * If we reached the end of the queue and found no unbound threads 2299685679f7Sakolb * then return NULL so that other CPUs will be considered. If there 2300685679f7Sakolb * are unbound threads but they cannot yet be stolen, then 2301685679f7Sakolb * return T_DONTSTEAL and try again later. 23027c478bd9Sstevel@tonic-gate */ 23037c478bd9Sstevel@tonic-gate if (tp == NULL) { 23047c478bd9Sstevel@tonic-gate disp_lock_exit_nopreempt(&dp->disp_lock); 2305685679f7Sakolb return (allbound ? NULL : T_DONTSTEAL); 23067c478bd9Sstevel@tonic-gate } 23077c478bd9Sstevel@tonic-gate 23087c478bd9Sstevel@tonic-gate /* 23097c478bd9Sstevel@tonic-gate * Found a runnable, unbound thread, so remove it from queue. 23107c478bd9Sstevel@tonic-gate * dispdeq() requires that we have the thread locked, and we do, 23117c478bd9Sstevel@tonic-gate * by virtue of holding the dispatch queue lock. dispdeq() will 23127c478bd9Sstevel@tonic-gate * put the thread in transition state, thereby dropping the dispq 23137c478bd9Sstevel@tonic-gate * lock. 23147c478bd9Sstevel@tonic-gate */ 2315685679f7Sakolb 23167c478bd9Sstevel@tonic-gate #ifdef DEBUG 23177c478bd9Sstevel@tonic-gate { 23187c478bd9Sstevel@tonic-gate int thread_was_on_queue; 23197c478bd9Sstevel@tonic-gate 23207c478bd9Sstevel@tonic-gate thread_was_on_queue = dispdeq(tp); /* drops disp_lock */ 23217c478bd9Sstevel@tonic-gate ASSERT(thread_was_on_queue); 23227c478bd9Sstevel@tonic-gate } 2323685679f7Sakolb 23247c478bd9Sstevel@tonic-gate #else /* DEBUG */ 23257c478bd9Sstevel@tonic-gate (void) dispdeq(tp); /* drops disp_lock */ 23267c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 23277c478bd9Sstevel@tonic-gate 2328685679f7Sakolb /* 2329685679f7Sakolb * Reset the disp_queue steal time - we do not know what is the smallest 2330685679f7Sakolb * value across the queue is. 2331685679f7Sakolb */ 2332685679f7Sakolb dp->disp_steal = 0; 2333685679f7Sakolb 23347c478bd9Sstevel@tonic-gate tp->t_schedflag |= TS_DONT_SWAP; 23357c478bd9Sstevel@tonic-gate 23367c478bd9Sstevel@tonic-gate /* 23377c478bd9Sstevel@tonic-gate * Setup thread to run on the current CPU. 23387c478bd9Sstevel@tonic-gate */ 23397c478bd9Sstevel@tonic-gate tp->t_disp_queue = cp->cpu_disp; 23407c478bd9Sstevel@tonic-gate 23417c478bd9Sstevel@tonic-gate cp->cpu_dispthread = tp; /* protected by spl only */ 23427c478bd9Sstevel@tonic-gate cp->cpu_dispatch_pri = pri; 23437c478bd9Sstevel@tonic-gate ASSERT(pri == DISP_PRIO(tp)); 23447c478bd9Sstevel@tonic-gate 2345685679f7Sakolb DTRACE_PROBE3(steal, kthread_t *, tp, cpu_t *, tcp, cpu_t *, cp); 2346685679f7Sakolb 23477c478bd9Sstevel@tonic-gate thread_onproc(tp, cp); /* set t_state to TS_ONPROC */ 23487c478bd9Sstevel@tonic-gate 23497c478bd9Sstevel@tonic-gate /* 23507c478bd9Sstevel@tonic-gate * Return with spl high so that swtch() won't need to raise it. 23517c478bd9Sstevel@tonic-gate * The disp_lock was dropped by dispdeq(). 23527c478bd9Sstevel@tonic-gate */ 23537c478bd9Sstevel@tonic-gate 23547c478bd9Sstevel@tonic-gate return (tp); 23557c478bd9Sstevel@tonic-gate } 23567c478bd9Sstevel@tonic-gate 23577c478bd9Sstevel@tonic-gate /* 23587c478bd9Sstevel@tonic-gate * disp_bound_common() - common routine for higher level functions 23597c478bd9Sstevel@tonic-gate * that check for bound threads under certain conditions. 23607c478bd9Sstevel@tonic-gate * If 'threadlistsafe' is set then there is no need to acquire 23617c478bd9Sstevel@tonic-gate * pidlock to stop the thread list from changing (eg, if 23627c478bd9Sstevel@tonic-gate * disp_bound_* is called with cpus paused). 23637c478bd9Sstevel@tonic-gate */ 23647c478bd9Sstevel@tonic-gate static int 23657c478bd9Sstevel@tonic-gate disp_bound_common(cpu_t *cp, int threadlistsafe, int flag) 23667c478bd9Sstevel@tonic-gate { 23677c478bd9Sstevel@tonic-gate int found = 0; 23687c478bd9Sstevel@tonic-gate kthread_t *tp; 23697c478bd9Sstevel@tonic-gate 23707c478bd9Sstevel@tonic-gate ASSERT(flag); 23717c478bd9Sstevel@tonic-gate 23727c478bd9Sstevel@tonic-gate if (!threadlistsafe) 23737c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 23747c478bd9Sstevel@tonic-gate tp = curthread; /* faster than allthreads */ 23757c478bd9Sstevel@tonic-gate do { 23767c478bd9Sstevel@tonic-gate if (tp->t_state != TS_FREE) { 23777c478bd9Sstevel@tonic-gate /* 23787c478bd9Sstevel@tonic-gate * If an interrupt thread is busy, but the 23797c478bd9Sstevel@tonic-gate * caller doesn't care (i.e. BOUND_INTR is off), 23807c478bd9Sstevel@tonic-gate * then just ignore it and continue through. 23817c478bd9Sstevel@tonic-gate */ 23827c478bd9Sstevel@tonic-gate if ((tp->t_flag & T_INTR_THREAD) && 23837c478bd9Sstevel@tonic-gate !(flag & BOUND_INTR)) 23847c478bd9Sstevel@tonic-gate continue; 23857c478bd9Sstevel@tonic-gate 23867c478bd9Sstevel@tonic-gate /* 23877c478bd9Sstevel@tonic-gate * Skip the idle thread for the CPU 23887c478bd9Sstevel@tonic-gate * we're about to set offline. 23897c478bd9Sstevel@tonic-gate */ 23907c478bd9Sstevel@tonic-gate if (tp == cp->cpu_idle_thread) 23917c478bd9Sstevel@tonic-gate continue; 23927c478bd9Sstevel@tonic-gate 23937c478bd9Sstevel@tonic-gate /* 23947c478bd9Sstevel@tonic-gate * Skip the pause thread for the CPU 23957c478bd9Sstevel@tonic-gate * we're about to set offline. 23967c478bd9Sstevel@tonic-gate */ 23977c478bd9Sstevel@tonic-gate if (tp == cp->cpu_pause_thread) 23987c478bd9Sstevel@tonic-gate continue; 23997c478bd9Sstevel@tonic-gate 24007c478bd9Sstevel@tonic-gate if ((flag & BOUND_CPU) && 24017c478bd9Sstevel@tonic-gate (tp->t_bound_cpu == cp || 24027c478bd9Sstevel@tonic-gate tp->t_bind_cpu == cp->cpu_id || 24037c478bd9Sstevel@tonic-gate tp->t_weakbound_cpu == cp)) { 24047c478bd9Sstevel@tonic-gate found = 1; 24057c478bd9Sstevel@tonic-gate break; 24067c478bd9Sstevel@tonic-gate } 24077c478bd9Sstevel@tonic-gate 24087c478bd9Sstevel@tonic-gate if ((flag & BOUND_PARTITION) && 24097c478bd9Sstevel@tonic-gate (tp->t_cpupart == cp->cpu_part)) { 24107c478bd9Sstevel@tonic-gate found = 1; 24117c478bd9Sstevel@tonic-gate break; 24127c478bd9Sstevel@tonic-gate } 24137c478bd9Sstevel@tonic-gate } 24147c478bd9Sstevel@tonic-gate } while ((tp = tp->t_next) != curthread && found == 0); 24157c478bd9Sstevel@tonic-gate if (!threadlistsafe) 24167c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 24177c478bd9Sstevel@tonic-gate return (found); 24187c478bd9Sstevel@tonic-gate } 24197c478bd9Sstevel@tonic-gate 24207c478bd9Sstevel@tonic-gate /* 24217c478bd9Sstevel@tonic-gate * disp_bound_threads - return nonzero if threads are bound to the processor. 24227c478bd9Sstevel@tonic-gate * Called infrequently. Keep this simple. 24237c478bd9Sstevel@tonic-gate * Includes threads that are asleep or stopped but not onproc. 24247c478bd9Sstevel@tonic-gate */ 24257c478bd9Sstevel@tonic-gate int 24267c478bd9Sstevel@tonic-gate disp_bound_threads(cpu_t *cp, int threadlistsafe) 24277c478bd9Sstevel@tonic-gate { 24287c478bd9Sstevel@tonic-gate return (disp_bound_common(cp, threadlistsafe, BOUND_CPU)); 24297c478bd9Sstevel@tonic-gate } 24307c478bd9Sstevel@tonic-gate 24317c478bd9Sstevel@tonic-gate /* 24327c478bd9Sstevel@tonic-gate * disp_bound_anythreads - return nonzero if _any_ threads are bound 24337c478bd9Sstevel@tonic-gate * to the given processor, including interrupt threads. 24347c478bd9Sstevel@tonic-gate */ 24357c478bd9Sstevel@tonic-gate int 24367c478bd9Sstevel@tonic-gate disp_bound_anythreads(cpu_t *cp, int threadlistsafe) 24377c478bd9Sstevel@tonic-gate { 24387c478bd9Sstevel@tonic-gate return (disp_bound_common(cp, threadlistsafe, BOUND_CPU | BOUND_INTR)); 24397c478bd9Sstevel@tonic-gate } 24407c478bd9Sstevel@tonic-gate 24417c478bd9Sstevel@tonic-gate /* 24427c478bd9Sstevel@tonic-gate * disp_bound_partition - return nonzero if threads are bound to the same 24437c478bd9Sstevel@tonic-gate * partition as the processor. 24447c478bd9Sstevel@tonic-gate * Called infrequently. Keep this simple. 24457c478bd9Sstevel@tonic-gate * Includes threads that are asleep or stopped but not onproc. 24467c478bd9Sstevel@tonic-gate */ 24477c478bd9Sstevel@tonic-gate int 24487c478bd9Sstevel@tonic-gate disp_bound_partition(cpu_t *cp, int threadlistsafe) 24497c478bd9Sstevel@tonic-gate { 24507c478bd9Sstevel@tonic-gate return (disp_bound_common(cp, threadlistsafe, BOUND_PARTITION)); 24517c478bd9Sstevel@tonic-gate } 24527c478bd9Sstevel@tonic-gate 24537c478bd9Sstevel@tonic-gate /* 24547c478bd9Sstevel@tonic-gate * disp_cpu_inactive - make a CPU inactive by moving all of its unbound 24557c478bd9Sstevel@tonic-gate * threads to other CPUs. 24567c478bd9Sstevel@tonic-gate */ 24577c478bd9Sstevel@tonic-gate void 24587c478bd9Sstevel@tonic-gate disp_cpu_inactive(cpu_t *cp) 24597c478bd9Sstevel@tonic-gate { 24607c478bd9Sstevel@tonic-gate kthread_t *tp; 24617c478bd9Sstevel@tonic-gate disp_t *dp = cp->cpu_disp; 24627c478bd9Sstevel@tonic-gate dispq_t *dq; 24637c478bd9Sstevel@tonic-gate pri_t pri; 24647c478bd9Sstevel@tonic-gate int wasonq; 24657c478bd9Sstevel@tonic-gate 24667c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock); 24677c478bd9Sstevel@tonic-gate while ((pri = dp->disp_max_unbound_pri) != -1) { 24687c478bd9Sstevel@tonic-gate dq = &dp->disp_q[pri]; 24697c478bd9Sstevel@tonic-gate tp = dq->dq_first; 24707c478bd9Sstevel@tonic-gate 24717c478bd9Sstevel@tonic-gate /* 24727c478bd9Sstevel@tonic-gate * Skip over bound threads. 24737c478bd9Sstevel@tonic-gate */ 24747c478bd9Sstevel@tonic-gate while (tp != NULL && tp->t_bound_cpu != NULL) { 24757c478bd9Sstevel@tonic-gate tp = tp->t_link; 24767c478bd9Sstevel@tonic-gate } 24777c478bd9Sstevel@tonic-gate 24787c478bd9Sstevel@tonic-gate if (tp == NULL) { 24797c478bd9Sstevel@tonic-gate /* disp_max_unbound_pri must be inaccurate, so fix it */ 24807c478bd9Sstevel@tonic-gate disp_fix_unbound_pri(dp, pri); 24817c478bd9Sstevel@tonic-gate continue; 24827c478bd9Sstevel@tonic-gate } 24837c478bd9Sstevel@tonic-gate 24847c478bd9Sstevel@tonic-gate wasonq = dispdeq(tp); /* drops disp_lock */ 24857c478bd9Sstevel@tonic-gate ASSERT(wasonq); 24867c478bd9Sstevel@tonic-gate ASSERT(tp->t_weakbound_cpu == NULL); 24877c478bd9Sstevel@tonic-gate 24887c478bd9Sstevel@tonic-gate setbackdq(tp); 24897c478bd9Sstevel@tonic-gate /* 24907c478bd9Sstevel@tonic-gate * Called from cpu_offline: 24917c478bd9Sstevel@tonic-gate * 24927c478bd9Sstevel@tonic-gate * cp has already been removed from the list of active cpus 24937c478bd9Sstevel@tonic-gate * and tp->t_cpu has been changed so there is no risk of 24947c478bd9Sstevel@tonic-gate * tp ending up back on cp. 24957c478bd9Sstevel@tonic-gate * 24967c478bd9Sstevel@tonic-gate * Called from cpupart_move_cpu: 24977c478bd9Sstevel@tonic-gate * 24987c478bd9Sstevel@tonic-gate * The cpu has moved to a new cpupart. Any threads that 24997c478bd9Sstevel@tonic-gate * were on it's dispatch queues before the move remain 25007c478bd9Sstevel@tonic-gate * in the old partition and can't run in the new partition. 25017c478bd9Sstevel@tonic-gate */ 25027c478bd9Sstevel@tonic-gate ASSERT(tp->t_cpu != cp); 25037c478bd9Sstevel@tonic-gate thread_unlock(tp); 25047c478bd9Sstevel@tonic-gate 25057c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock); 25067c478bd9Sstevel@tonic-gate } 25077c478bd9Sstevel@tonic-gate disp_lock_exit(&dp->disp_lock); 25087c478bd9Sstevel@tonic-gate } 25097c478bd9Sstevel@tonic-gate 25107c478bd9Sstevel@tonic-gate /* 25117c478bd9Sstevel@tonic-gate * disp_lowpri_cpu - find CPU running the lowest priority thread. 25127c478bd9Sstevel@tonic-gate * The hint passed in is used as a starting point so we don't favor 25137c478bd9Sstevel@tonic-gate * CPU 0 or any other CPU. The caller should pass in the most recently 25147c478bd9Sstevel@tonic-gate * used CPU for the thread. 25157c478bd9Sstevel@tonic-gate * 25167c478bd9Sstevel@tonic-gate * The lgroup and priority are used to determine the best CPU to run on 25177c478bd9Sstevel@tonic-gate * in a NUMA machine. The lgroup specifies which CPUs are closest while 25187c478bd9Sstevel@tonic-gate * the thread priority will indicate whether the thread will actually run 25197c478bd9Sstevel@tonic-gate * there. To pick the best CPU, the CPUs inside and outside of the given 25207c478bd9Sstevel@tonic-gate * lgroup which are running the lowest priority threads are found. The 25217c478bd9Sstevel@tonic-gate * remote CPU is chosen only if the thread will not run locally on a CPU 25227c478bd9Sstevel@tonic-gate * within the lgroup, but will run on the remote CPU. If the thread 25237c478bd9Sstevel@tonic-gate * cannot immediately run on any CPU, the best local CPU will be chosen. 25247c478bd9Sstevel@tonic-gate * 25257c478bd9Sstevel@tonic-gate * The lpl specified also identifies the cpu partition from which 25267c478bd9Sstevel@tonic-gate * disp_lowpri_cpu should select a CPU. 25277c478bd9Sstevel@tonic-gate * 25287c478bd9Sstevel@tonic-gate * curcpu is used to indicate that disp_lowpri_cpu is being called on 25297c478bd9Sstevel@tonic-gate * behalf of the current thread. (curthread is looking for a new cpu) 25307c478bd9Sstevel@tonic-gate * In this case, cpu_dispatch_pri for this thread's cpu should be 25317c478bd9Sstevel@tonic-gate * ignored. 25327c478bd9Sstevel@tonic-gate * 25337c478bd9Sstevel@tonic-gate * If a cpu is the target of an offline request then try to avoid it. 25347c478bd9Sstevel@tonic-gate * 25357c478bd9Sstevel@tonic-gate * This function must be called at either high SPL, or with preemption 25367c478bd9Sstevel@tonic-gate * disabled, so that the "hint" CPU cannot be removed from the online 25377c478bd9Sstevel@tonic-gate * CPU list while we are traversing it. 25387c478bd9Sstevel@tonic-gate */ 25397c478bd9Sstevel@tonic-gate cpu_t * 25407c478bd9Sstevel@tonic-gate disp_lowpri_cpu(cpu_t *hint, lpl_t *lpl, pri_t tpri, cpu_t *curcpu) 25417c478bd9Sstevel@tonic-gate { 25427c478bd9Sstevel@tonic-gate cpu_t *bestcpu; 25437c478bd9Sstevel@tonic-gate cpu_t *besthomecpu; 25447c478bd9Sstevel@tonic-gate cpu_t *cp, *cpstart; 25457c478bd9Sstevel@tonic-gate 25467c478bd9Sstevel@tonic-gate pri_t bestpri; 25477c478bd9Sstevel@tonic-gate pri_t cpupri; 25487c478bd9Sstevel@tonic-gate 25497c478bd9Sstevel@tonic-gate klgrpset_t done; 25507c478bd9Sstevel@tonic-gate klgrpset_t cur_set; 25517c478bd9Sstevel@tonic-gate 25527c478bd9Sstevel@tonic-gate lpl_t *lpl_iter, *lpl_leaf; 25537c478bd9Sstevel@tonic-gate int i; 25547c478bd9Sstevel@tonic-gate 25557c478bd9Sstevel@tonic-gate /* 25567c478bd9Sstevel@tonic-gate * Scan for a CPU currently running the lowest priority thread. 25577c478bd9Sstevel@tonic-gate * Cannot get cpu_lock here because it is adaptive. 25587c478bd9Sstevel@tonic-gate * We do not require lock on CPU list. 25597c478bd9Sstevel@tonic-gate */ 25607c478bd9Sstevel@tonic-gate ASSERT(hint != NULL); 25617c478bd9Sstevel@tonic-gate ASSERT(lpl != NULL); 25627c478bd9Sstevel@tonic-gate ASSERT(lpl->lpl_ncpu > 0); 25637c478bd9Sstevel@tonic-gate 25647c478bd9Sstevel@tonic-gate /* 25657c478bd9Sstevel@tonic-gate * First examine local CPUs. Note that it's possible the hint CPU 25667c478bd9Sstevel@tonic-gate * passed in in remote to the specified home lgroup. If our priority 25677c478bd9Sstevel@tonic-gate * isn't sufficient enough such that we can run immediately at home, 25687c478bd9Sstevel@tonic-gate * then examine CPUs remote to our home lgroup. 25697c478bd9Sstevel@tonic-gate * We would like to give preference to CPUs closest to "home". 25707c478bd9Sstevel@tonic-gate * If we can't find a CPU where we'll run at a given level 25717c478bd9Sstevel@tonic-gate * of locality, we expand our search to include the next level. 25727c478bd9Sstevel@tonic-gate */ 25737c478bd9Sstevel@tonic-gate bestcpu = besthomecpu = NULL; 25747c478bd9Sstevel@tonic-gate klgrpset_clear(done); 25757c478bd9Sstevel@tonic-gate /* start with lpl we were passed */ 25767c478bd9Sstevel@tonic-gate 25777c478bd9Sstevel@tonic-gate lpl_iter = lpl; 25787c478bd9Sstevel@tonic-gate 25797c478bd9Sstevel@tonic-gate do { 25807c478bd9Sstevel@tonic-gate 25817c478bd9Sstevel@tonic-gate bestpri = SHRT_MAX; 25827c478bd9Sstevel@tonic-gate klgrpset_clear(cur_set); 25837c478bd9Sstevel@tonic-gate 25847c478bd9Sstevel@tonic-gate for (i = 0; i < lpl_iter->lpl_nrset; i++) { 25857c478bd9Sstevel@tonic-gate lpl_leaf = lpl_iter->lpl_rset[i]; 25867c478bd9Sstevel@tonic-gate if (klgrpset_ismember(done, lpl_leaf->lpl_lgrpid)) 25877c478bd9Sstevel@tonic-gate continue; 25887c478bd9Sstevel@tonic-gate 25897c478bd9Sstevel@tonic-gate klgrpset_add(cur_set, lpl_leaf->lpl_lgrpid); 25907c478bd9Sstevel@tonic-gate 25917c478bd9Sstevel@tonic-gate if (hint->cpu_lpl == lpl_leaf) 25927c478bd9Sstevel@tonic-gate cp = cpstart = hint; 25937c478bd9Sstevel@tonic-gate else 25947c478bd9Sstevel@tonic-gate cp = cpstart = lpl_leaf->lpl_cpus; 25957c478bd9Sstevel@tonic-gate 25967c478bd9Sstevel@tonic-gate do { 25977c478bd9Sstevel@tonic-gate if (cp == curcpu) 25987c478bd9Sstevel@tonic-gate cpupri = -1; 25997c478bd9Sstevel@tonic-gate else if (cp == cpu_inmotion) 26007c478bd9Sstevel@tonic-gate cpupri = SHRT_MAX; 26017c478bd9Sstevel@tonic-gate else 26027c478bd9Sstevel@tonic-gate cpupri = cp->cpu_dispatch_pri; 26037c478bd9Sstevel@tonic-gate if (cp->cpu_disp->disp_maxrunpri > cpupri) 26047c478bd9Sstevel@tonic-gate cpupri = cp->cpu_disp->disp_maxrunpri; 26057c478bd9Sstevel@tonic-gate if (cp->cpu_chosen_level > cpupri) 26067c478bd9Sstevel@tonic-gate cpupri = cp->cpu_chosen_level; 26077c478bd9Sstevel@tonic-gate if (cpupri < bestpri) { 26087c478bd9Sstevel@tonic-gate if (CPU_IDLING(cpupri)) { 26097c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & 26107c478bd9Sstevel@tonic-gate CPU_QUIESCED) == 0); 26117c478bd9Sstevel@tonic-gate return (cp); 26127c478bd9Sstevel@tonic-gate } 26137c478bd9Sstevel@tonic-gate bestcpu = cp; 26147c478bd9Sstevel@tonic-gate bestpri = cpupri; 26157c478bd9Sstevel@tonic-gate } 26167c478bd9Sstevel@tonic-gate } while ((cp = cp->cpu_next_lpl) != cpstart); 26177c478bd9Sstevel@tonic-gate } 26187c478bd9Sstevel@tonic-gate 26197c478bd9Sstevel@tonic-gate if (bestcpu && (tpri > bestpri)) { 26207c478bd9Sstevel@tonic-gate ASSERT((bestcpu->cpu_flags & CPU_QUIESCED) == 0); 26217c478bd9Sstevel@tonic-gate return (bestcpu); 26227c478bd9Sstevel@tonic-gate } 26237c478bd9Sstevel@tonic-gate if (besthomecpu == NULL) 26247c478bd9Sstevel@tonic-gate besthomecpu = bestcpu; 26257c478bd9Sstevel@tonic-gate /* 26267c478bd9Sstevel@tonic-gate * Add the lgrps we just considered to the "done" set 26277c478bd9Sstevel@tonic-gate */ 26287c478bd9Sstevel@tonic-gate klgrpset_or(done, cur_set); 26297c478bd9Sstevel@tonic-gate 26307c478bd9Sstevel@tonic-gate } while ((lpl_iter = lpl_iter->lpl_parent) != NULL); 26317c478bd9Sstevel@tonic-gate 26327c478bd9Sstevel@tonic-gate /* 26337c478bd9Sstevel@tonic-gate * The specified priority isn't high enough to run immediately 26347c478bd9Sstevel@tonic-gate * anywhere, so just return the best CPU from the home lgroup. 26357c478bd9Sstevel@tonic-gate */ 26367c478bd9Sstevel@tonic-gate ASSERT((besthomecpu->cpu_flags & CPU_QUIESCED) == 0); 26377c478bd9Sstevel@tonic-gate return (besthomecpu); 26387c478bd9Sstevel@tonic-gate } 26397c478bd9Sstevel@tonic-gate 26407c478bd9Sstevel@tonic-gate /* 26417c478bd9Sstevel@tonic-gate * This routine provides the generic idle cpu function for all processors. 26427c478bd9Sstevel@tonic-gate * If a processor has some specific code to execute when idle (say, to stop 26437c478bd9Sstevel@tonic-gate * the pipeline and save power) then that routine should be defined in the 26447c478bd9Sstevel@tonic-gate * processors specific code (module_xx.c) and the global variable idle_cpu 26457c478bd9Sstevel@tonic-gate * set to that function. 26467c478bd9Sstevel@tonic-gate */ 26477c478bd9Sstevel@tonic-gate static void 26487c478bd9Sstevel@tonic-gate generic_idle_cpu(void) 26497c478bd9Sstevel@tonic-gate { 26507c478bd9Sstevel@tonic-gate } 26517c478bd9Sstevel@tonic-gate 26527c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 26537c478bd9Sstevel@tonic-gate static void 26547c478bd9Sstevel@tonic-gate generic_enq_thread(cpu_t *cpu, int bound) 26557c478bd9Sstevel@tonic-gate { 26567c478bd9Sstevel@tonic-gate } 26577c478bd9Sstevel@tonic-gate 26587c478bd9Sstevel@tonic-gate /* 26597c478bd9Sstevel@tonic-gate * Select a CPU for this thread to run on. Choose t->t_cpu unless: 26607c478bd9Sstevel@tonic-gate * - t->t_cpu is not in this thread's assigned lgrp 26617c478bd9Sstevel@tonic-gate * - the time since the thread last came off t->t_cpu exceeds the 26627c478bd9Sstevel@tonic-gate * rechoose time for this cpu (ignore this if t is curthread in 26637c478bd9Sstevel@tonic-gate * which case it's on CPU and t->t_disp_time is inaccurate) 26647c478bd9Sstevel@tonic-gate * - t->t_cpu is presently the target of an offline or partition move 26657c478bd9Sstevel@tonic-gate * request 26667c478bd9Sstevel@tonic-gate */ 26677c478bd9Sstevel@tonic-gate static cpu_t * 26687c478bd9Sstevel@tonic-gate cpu_choose(kthread_t *t, pri_t tpri) 26697c478bd9Sstevel@tonic-gate { 26707c478bd9Sstevel@tonic-gate ASSERT(tpri < kpqpri); 26717c478bd9Sstevel@tonic-gate 2672fb2f18f8Sesaxe if ((((lbolt - t->t_disp_time) > rechoose_interval) && 26737c478bd9Sstevel@tonic-gate t != curthread) || t->t_cpu == cpu_inmotion) { 26747c478bd9Sstevel@tonic-gate return (disp_lowpri_cpu(t->t_cpu, t->t_lpl, tpri, NULL)); 26757c478bd9Sstevel@tonic-gate } 26767c478bd9Sstevel@tonic-gate 26777c478bd9Sstevel@tonic-gate /* 26787c478bd9Sstevel@tonic-gate * Take a trip through disp_lowpri_cpu() if the thread was 26797c478bd9Sstevel@tonic-gate * running outside it's home lgroup 26807c478bd9Sstevel@tonic-gate */ 26817c478bd9Sstevel@tonic-gate if (!klgrpset_ismember(t->t_lpl->lpl_lgrp->lgrp_set[LGRP_RSRC_CPU], 26827c478bd9Sstevel@tonic-gate t->t_cpu->cpu_lpl->lpl_lgrpid)) { 26837c478bd9Sstevel@tonic-gate return (disp_lowpri_cpu(t->t_cpu, t->t_lpl, tpri, 26847c478bd9Sstevel@tonic-gate (t == curthread) ? t->t_cpu : NULL)); 26857c478bd9Sstevel@tonic-gate } 26867c478bd9Sstevel@tonic-gate return (t->t_cpu); 26877c478bd9Sstevel@tonic-gate } 2688