1*7c478bd9Sstevel@tonic-gate /* 2*7c478bd9Sstevel@tonic-gate * CDDL HEADER START 3*7c478bd9Sstevel@tonic-gate * 4*7c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*7c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*7c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*7c478bd9Sstevel@tonic-gate * with the License. 8*7c478bd9Sstevel@tonic-gate * 9*7c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*7c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*7c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 12*7c478bd9Sstevel@tonic-gate * and limitations under the License. 13*7c478bd9Sstevel@tonic-gate * 14*7c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*7c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*7c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*7c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*7c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*7c478bd9Sstevel@tonic-gate * 20*7c478bd9Sstevel@tonic-gate * CDDL HEADER END 21*7c478bd9Sstevel@tonic-gate */ 22*7c478bd9Sstevel@tonic-gate /* 23*7c478bd9Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*7c478bd9Sstevel@tonic-gate * Use is subject to license terms. 25*7c478bd9Sstevel@tonic-gate */ 26*7c478bd9Sstevel@tonic-gate 27*7c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*7c478bd9Sstevel@tonic-gate 29*7c478bd9Sstevel@tonic-gate #include <sys/types.h> 30*7c478bd9Sstevel@tonic-gate #include <sys/param.h> 31*7c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 32*7c478bd9Sstevel@tonic-gate #include <sys/signal.h> 33*7c478bd9Sstevel@tonic-gate #include <sys/stack.h> 34*7c478bd9Sstevel@tonic-gate #include <sys/pcb.h> 35*7c478bd9Sstevel@tonic-gate #include <sys/user.h> 36*7c478bd9Sstevel@tonic-gate #include <sys/systm.h> 37*7c478bd9Sstevel@tonic-gate #include <sys/sysinfo.h> 38*7c478bd9Sstevel@tonic-gate #include <sys/var.h> 39*7c478bd9Sstevel@tonic-gate #include <sys/errno.h> 40*7c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 41*7c478bd9Sstevel@tonic-gate #include <sys/cred.h> 42*7c478bd9Sstevel@tonic-gate #include <sys/resource.h> 43*7c478bd9Sstevel@tonic-gate #include <sys/task.h> 44*7c478bd9Sstevel@tonic-gate #include <sys/project.h> 45*7c478bd9Sstevel@tonic-gate #include <sys/proc.h> 46*7c478bd9Sstevel@tonic-gate #include <sys/debug.h> 47*7c478bd9Sstevel@tonic-gate #include <sys/inline.h> 48*7c478bd9Sstevel@tonic-gate #include <sys/disp.h> 49*7c478bd9Sstevel@tonic-gate #include <sys/class.h> 50*7c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 51*7c478bd9Sstevel@tonic-gate #include <vm/seg_kp.h> 52*7c478bd9Sstevel@tonic-gate #include <sys/machlock.h> 53*7c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 54*7c478bd9Sstevel@tonic-gate #include <sys/varargs.h> 55*7c478bd9Sstevel@tonic-gate #include <sys/turnstile.h> 56*7c478bd9Sstevel@tonic-gate #include <sys/poll.h> 57*7c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 58*7c478bd9Sstevel@tonic-gate #include <sys/callb.h> 59*7c478bd9Sstevel@tonic-gate #include <c2/audit.h> 60*7c478bd9Sstevel@tonic-gate #include <sys/tnf.h> 61*7c478bd9Sstevel@tonic-gate #include <sys/sobject.h> 62*7c478bd9Sstevel@tonic-gate #include <sys/cpupart.h> 63*7c478bd9Sstevel@tonic-gate #include <sys/pset.h> 64*7c478bd9Sstevel@tonic-gate #include <sys/door.h> 65*7c478bd9Sstevel@tonic-gate #include <sys/spl.h> 66*7c478bd9Sstevel@tonic-gate #include <sys/copyops.h> 67*7c478bd9Sstevel@tonic-gate #include <sys/rctl.h> 68*7c478bd9Sstevel@tonic-gate #include <sys/pool.h> 69*7c478bd9Sstevel@tonic-gate #include <sys/zone.h> 70*7c478bd9Sstevel@tonic-gate #include <sys/cpc_impl.h> 71*7c478bd9Sstevel@tonic-gate #include <sys/sdt.h> 72*7c478bd9Sstevel@tonic-gate #include <sys/reboot.h> 73*7c478bd9Sstevel@tonic-gate #include <sys/kdi.h> 74*7c478bd9Sstevel@tonic-gate 75*7c478bd9Sstevel@tonic-gate struct kmem_cache *thread_cache; /* cache of free threads */ 76*7c478bd9Sstevel@tonic-gate struct kmem_cache *lwp_cache; /* cache of free lwps */ 77*7c478bd9Sstevel@tonic-gate struct kmem_cache *turnstile_cache; /* cache of free turnstiles */ 78*7c478bd9Sstevel@tonic-gate 79*7c478bd9Sstevel@tonic-gate /* 80*7c478bd9Sstevel@tonic-gate * allthreads is only for use by kmem_readers. All kernel loops can use 81*7c478bd9Sstevel@tonic-gate * the current thread as a start/end point. 82*7c478bd9Sstevel@tonic-gate */ 83*7c478bd9Sstevel@tonic-gate static kthread_t *allthreads = &t0; /* circular list of all threads */ 84*7c478bd9Sstevel@tonic-gate 85*7c478bd9Sstevel@tonic-gate static kcondvar_t reaper_cv; /* synchronization var */ 86*7c478bd9Sstevel@tonic-gate kthread_t *thread_deathrow; /* circular list of reapable threads */ 87*7c478bd9Sstevel@tonic-gate kthread_t *lwp_deathrow; /* circular list of reapable threads */ 88*7c478bd9Sstevel@tonic-gate kmutex_t reaplock; /* protects lwp and thread deathrows */ 89*7c478bd9Sstevel@tonic-gate kmutex_t thread_free_lock; /* protects clock from reaper */ 90*7c478bd9Sstevel@tonic-gate int thread_reapcnt = 0; /* number of threads on deathrow */ 91*7c478bd9Sstevel@tonic-gate int lwp_reapcnt = 0; /* number of lwps on deathrow */ 92*7c478bd9Sstevel@tonic-gate int reaplimit = 16; /* delay reaping until reaplimit */ 93*7c478bd9Sstevel@tonic-gate 94*7c478bd9Sstevel@tonic-gate extern int nthread; 95*7c478bd9Sstevel@tonic-gate 96*7c478bd9Sstevel@tonic-gate id_t syscid; /* system scheduling class ID */ 97*7c478bd9Sstevel@tonic-gate void *segkp_thread; /* cookie for segkp pool */ 98*7c478bd9Sstevel@tonic-gate 99*7c478bd9Sstevel@tonic-gate int lwp_cache_sz = 32; 100*7c478bd9Sstevel@tonic-gate int t_cache_sz = 8; 101*7c478bd9Sstevel@tonic-gate static kt_did_t next_t_id = 1; 102*7c478bd9Sstevel@tonic-gate 103*7c478bd9Sstevel@tonic-gate /* 104*7c478bd9Sstevel@tonic-gate * Min/Max stack sizes for stack size parameters 105*7c478bd9Sstevel@tonic-gate */ 106*7c478bd9Sstevel@tonic-gate #define MAX_STKSIZE (32 * DEFAULTSTKSZ) 107*7c478bd9Sstevel@tonic-gate #define MIN_STKSIZE DEFAULTSTKSZ 108*7c478bd9Sstevel@tonic-gate 109*7c478bd9Sstevel@tonic-gate /* 110*7c478bd9Sstevel@tonic-gate * default_stksize overrides lwp_default_stksize if it is set. 111*7c478bd9Sstevel@tonic-gate */ 112*7c478bd9Sstevel@tonic-gate int default_stksize; 113*7c478bd9Sstevel@tonic-gate int lwp_default_stksize; 114*7c478bd9Sstevel@tonic-gate 115*7c478bd9Sstevel@tonic-gate static zone_key_t zone_thread_key; 116*7c478bd9Sstevel@tonic-gate 117*7c478bd9Sstevel@tonic-gate /* 118*7c478bd9Sstevel@tonic-gate * forward declarations for internal thread specific data (tsd) 119*7c478bd9Sstevel@tonic-gate */ 120*7c478bd9Sstevel@tonic-gate static void *tsd_realloc(void *, size_t, size_t); 121*7c478bd9Sstevel@tonic-gate 122*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 123*7c478bd9Sstevel@tonic-gate static int 124*7c478bd9Sstevel@tonic-gate turnstile_constructor(void *buf, void *cdrarg, int kmflags) 125*7c478bd9Sstevel@tonic-gate { 126*7c478bd9Sstevel@tonic-gate bzero(buf, sizeof (turnstile_t)); 127*7c478bd9Sstevel@tonic-gate return (0); 128*7c478bd9Sstevel@tonic-gate } 129*7c478bd9Sstevel@tonic-gate 130*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 131*7c478bd9Sstevel@tonic-gate static void 132*7c478bd9Sstevel@tonic-gate turnstile_destructor(void *buf, void *cdrarg) 133*7c478bd9Sstevel@tonic-gate { 134*7c478bd9Sstevel@tonic-gate turnstile_t *ts = buf; 135*7c478bd9Sstevel@tonic-gate 136*7c478bd9Sstevel@tonic-gate ASSERT(ts->ts_free == NULL); 137*7c478bd9Sstevel@tonic-gate ASSERT(ts->ts_waiters == 0); 138*7c478bd9Sstevel@tonic-gate ASSERT(ts->ts_inheritor == NULL); 139*7c478bd9Sstevel@tonic-gate ASSERT(ts->ts_sleepq[0].sq_first == NULL); 140*7c478bd9Sstevel@tonic-gate ASSERT(ts->ts_sleepq[1].sq_first == NULL); 141*7c478bd9Sstevel@tonic-gate } 142*7c478bd9Sstevel@tonic-gate 143*7c478bd9Sstevel@tonic-gate void 144*7c478bd9Sstevel@tonic-gate thread_init(void) 145*7c478bd9Sstevel@tonic-gate { 146*7c478bd9Sstevel@tonic-gate kthread_t *tp; 147*7c478bd9Sstevel@tonic-gate extern char sys_name[]; 148*7c478bd9Sstevel@tonic-gate extern void idle(); 149*7c478bd9Sstevel@tonic-gate struct cpu *cpu = CPU; 150*7c478bd9Sstevel@tonic-gate 151*7c478bd9Sstevel@tonic-gate mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL)); 152*7c478bd9Sstevel@tonic-gate 153*7c478bd9Sstevel@tonic-gate #if defined(__i386) || defined(__amd64) 154*7c478bd9Sstevel@tonic-gate thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t), 155*7c478bd9Sstevel@tonic-gate PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0); 156*7c478bd9Sstevel@tonic-gate 157*7c478bd9Sstevel@tonic-gate /* 158*7c478bd9Sstevel@tonic-gate * "struct _klwp" includes a "struct pcb", which includes a 159*7c478bd9Sstevel@tonic-gate * "struct fpu", which needs to be 16-byte aligned on amd64 160*7c478bd9Sstevel@tonic-gate * (and even on i386 for fxsave/fxrstor). 161*7c478bd9Sstevel@tonic-gate */ 162*7c478bd9Sstevel@tonic-gate lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t), 163*7c478bd9Sstevel@tonic-gate 16, NULL, NULL, NULL, NULL, NULL, 0); 164*7c478bd9Sstevel@tonic-gate #else 165*7c478bd9Sstevel@tonic-gate /* 166*7c478bd9Sstevel@tonic-gate * Allocate thread structures from static_arena. This prevents 167*7c478bd9Sstevel@tonic-gate * issues where a thread tries to relocate its own thread 168*7c478bd9Sstevel@tonic-gate * structure and touches it after the mapping has been suspended. 169*7c478bd9Sstevel@tonic-gate */ 170*7c478bd9Sstevel@tonic-gate thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t), 171*7c478bd9Sstevel@tonic-gate PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0); 172*7c478bd9Sstevel@tonic-gate 173*7c478bd9Sstevel@tonic-gate lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t), 174*7c478bd9Sstevel@tonic-gate 0, NULL, NULL, NULL, NULL, NULL, 0); 175*7c478bd9Sstevel@tonic-gate #endif 176*7c478bd9Sstevel@tonic-gate 177*7c478bd9Sstevel@tonic-gate turnstile_cache = kmem_cache_create("turnstile_cache", 178*7c478bd9Sstevel@tonic-gate sizeof (turnstile_t), 0, 179*7c478bd9Sstevel@tonic-gate turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0); 180*7c478bd9Sstevel@tonic-gate 181*7c478bd9Sstevel@tonic-gate cred_init(); 182*7c478bd9Sstevel@tonic-gate 183*7c478bd9Sstevel@tonic-gate rctl_init(); 184*7c478bd9Sstevel@tonic-gate project_init(); 185*7c478bd9Sstevel@tonic-gate zone_init(); 186*7c478bd9Sstevel@tonic-gate task_init(); 187*7c478bd9Sstevel@tonic-gate pool_init(); 188*7c478bd9Sstevel@tonic-gate 189*7c478bd9Sstevel@tonic-gate curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP); 190*7c478bd9Sstevel@tonic-gate 191*7c478bd9Sstevel@tonic-gate /* 192*7c478bd9Sstevel@tonic-gate * Originally, we had two parameters to set default stack 193*7c478bd9Sstevel@tonic-gate * size: one for lwp's (lwp_default_stksize), and one for 194*7c478bd9Sstevel@tonic-gate * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz). 195*7c478bd9Sstevel@tonic-gate * Now we have a third parameter that overrides both if it is 196*7c478bd9Sstevel@tonic-gate * set to a legal stack size, called default_stksize. 197*7c478bd9Sstevel@tonic-gate */ 198*7c478bd9Sstevel@tonic-gate 199*7c478bd9Sstevel@tonic-gate if (default_stksize == 0) { 200*7c478bd9Sstevel@tonic-gate default_stksize = DEFAULTSTKSZ; 201*7c478bd9Sstevel@tonic-gate } else if (default_stksize % PAGESIZE != 0 || 202*7c478bd9Sstevel@tonic-gate default_stksize > MAX_STKSIZE || 203*7c478bd9Sstevel@tonic-gate default_stksize < MIN_STKSIZE) { 204*7c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "Illegal stack size. Using %d", 205*7c478bd9Sstevel@tonic-gate (int)DEFAULTSTKSZ); 206*7c478bd9Sstevel@tonic-gate default_stksize = DEFAULTSTKSZ; 207*7c478bd9Sstevel@tonic-gate } else { 208*7c478bd9Sstevel@tonic-gate lwp_default_stksize = default_stksize; 209*7c478bd9Sstevel@tonic-gate } 210*7c478bd9Sstevel@tonic-gate 211*7c478bd9Sstevel@tonic-gate if (lwp_default_stksize == 0) { 212*7c478bd9Sstevel@tonic-gate lwp_default_stksize = default_stksize; 213*7c478bd9Sstevel@tonic-gate } else if (lwp_default_stksize % PAGESIZE != 0 || 214*7c478bd9Sstevel@tonic-gate lwp_default_stksize > MAX_STKSIZE || 215*7c478bd9Sstevel@tonic-gate lwp_default_stksize < MIN_STKSIZE) { 216*7c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "Illegal stack size. Using %d", 217*7c478bd9Sstevel@tonic-gate default_stksize); 218*7c478bd9Sstevel@tonic-gate lwp_default_stksize = default_stksize; 219*7c478bd9Sstevel@tonic-gate } 220*7c478bd9Sstevel@tonic-gate 221*7c478bd9Sstevel@tonic-gate segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz, 222*7c478bd9Sstevel@tonic-gate lwp_default_stksize, 223*7c478bd9Sstevel@tonic-gate (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED)); 224*7c478bd9Sstevel@tonic-gate 225*7c478bd9Sstevel@tonic-gate segkp_thread = segkp_cache_init(segkp, t_cache_sz, 226*7c478bd9Sstevel@tonic-gate default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON); 227*7c478bd9Sstevel@tonic-gate 228*7c478bd9Sstevel@tonic-gate (void) getcid(sys_name, &syscid); 229*7c478bd9Sstevel@tonic-gate curthread->t_cid = syscid; /* current thread is t0 */ 230*7c478bd9Sstevel@tonic-gate 231*7c478bd9Sstevel@tonic-gate /* 232*7c478bd9Sstevel@tonic-gate * Set up the first CPU's idle thread. 233*7c478bd9Sstevel@tonic-gate * It runs whenever the CPU has nothing worthwhile to do. 234*7c478bd9Sstevel@tonic-gate */ 235*7c478bd9Sstevel@tonic-gate tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1); 236*7c478bd9Sstevel@tonic-gate cpu->cpu_idle_thread = tp; 237*7c478bd9Sstevel@tonic-gate tp->t_preempt = 1; 238*7c478bd9Sstevel@tonic-gate tp->t_disp_queue = cpu->cpu_disp; 239*7c478bd9Sstevel@tonic-gate ASSERT(tp->t_disp_queue != NULL); 240*7c478bd9Sstevel@tonic-gate tp->t_bound_cpu = cpu; 241*7c478bd9Sstevel@tonic-gate tp->t_affinitycnt = 1; 242*7c478bd9Sstevel@tonic-gate 243*7c478bd9Sstevel@tonic-gate /* 244*7c478bd9Sstevel@tonic-gate * Registering a thread in the callback table is usually 245*7c478bd9Sstevel@tonic-gate * done in the initialization code of the thread. In this 246*7c478bd9Sstevel@tonic-gate * case, we do it right after thread creation to avoid 247*7c478bd9Sstevel@tonic-gate * blocking idle thread while registering itself. It also 248*7c478bd9Sstevel@tonic-gate * avoids the possibility of reregistration in case a CPU 249*7c478bd9Sstevel@tonic-gate * restarts its idle thread. 250*7c478bd9Sstevel@tonic-gate */ 251*7c478bd9Sstevel@tonic-gate CALLB_CPR_INIT_SAFE(tp, "idle"); 252*7c478bd9Sstevel@tonic-gate 253*7c478bd9Sstevel@tonic-gate /* 254*7c478bd9Sstevel@tonic-gate * Finish initializing the kernel memory allocator now that 255*7c478bd9Sstevel@tonic-gate * thread_create() is available. 256*7c478bd9Sstevel@tonic-gate */ 257*7c478bd9Sstevel@tonic-gate kmem_thread_init(); 258*7c478bd9Sstevel@tonic-gate 259*7c478bd9Sstevel@tonic-gate if (boothowto & RB_DEBUG) 260*7c478bd9Sstevel@tonic-gate kdi_dvec_thravail(); 261*7c478bd9Sstevel@tonic-gate } 262*7c478bd9Sstevel@tonic-gate 263*7c478bd9Sstevel@tonic-gate /* 264*7c478bd9Sstevel@tonic-gate * Create a thread. 265*7c478bd9Sstevel@tonic-gate * 266*7c478bd9Sstevel@tonic-gate * thread_create() blocks for memory if necessary. It never fails. 267*7c478bd9Sstevel@tonic-gate * 268*7c478bd9Sstevel@tonic-gate * If stk is NULL, the thread is created at the base of the stack 269*7c478bd9Sstevel@tonic-gate * and cannot be swapped. 270*7c478bd9Sstevel@tonic-gate */ 271*7c478bd9Sstevel@tonic-gate kthread_t * 272*7c478bd9Sstevel@tonic-gate thread_create( 273*7c478bd9Sstevel@tonic-gate caddr_t stk, 274*7c478bd9Sstevel@tonic-gate size_t stksize, 275*7c478bd9Sstevel@tonic-gate void (*proc)(), 276*7c478bd9Sstevel@tonic-gate void *arg, 277*7c478bd9Sstevel@tonic-gate size_t len, 278*7c478bd9Sstevel@tonic-gate proc_t *pp, 279*7c478bd9Sstevel@tonic-gate int state, 280*7c478bd9Sstevel@tonic-gate pri_t pri) 281*7c478bd9Sstevel@tonic-gate { 282*7c478bd9Sstevel@tonic-gate kthread_t *t; 283*7c478bd9Sstevel@tonic-gate extern struct classfuncs sys_classfuncs; 284*7c478bd9Sstevel@tonic-gate turnstile_t *ts; 285*7c478bd9Sstevel@tonic-gate #if defined(__ia64) 286*7c478bd9Sstevel@tonic-gate size_t regstksize; 287*7c478bd9Sstevel@tonic-gate #endif 288*7c478bd9Sstevel@tonic-gate 289*7c478bd9Sstevel@tonic-gate /* 290*7c478bd9Sstevel@tonic-gate * Every thread keeps a turnstile around in case it needs to block. 291*7c478bd9Sstevel@tonic-gate * The only reason the turnstile is not simply part of the thread 292*7c478bd9Sstevel@tonic-gate * structure is that we may have to break the association whenever 293*7c478bd9Sstevel@tonic-gate * more than one thread blocks on a given synchronization object. 294*7c478bd9Sstevel@tonic-gate * From a memory-management standpoint, turnstiles are like the 295*7c478bd9Sstevel@tonic-gate * "attached mblks" that hang off dblks in the streams allocator. 296*7c478bd9Sstevel@tonic-gate */ 297*7c478bd9Sstevel@tonic-gate ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP); 298*7c478bd9Sstevel@tonic-gate 299*7c478bd9Sstevel@tonic-gate if (stk == NULL) { 300*7c478bd9Sstevel@tonic-gate /* 301*7c478bd9Sstevel@tonic-gate * alloc both thread and stack in segkp chunk 302*7c478bd9Sstevel@tonic-gate */ 303*7c478bd9Sstevel@tonic-gate 304*7c478bd9Sstevel@tonic-gate if (stksize < default_stksize) 305*7c478bd9Sstevel@tonic-gate stksize = default_stksize; 306*7c478bd9Sstevel@tonic-gate 307*7c478bd9Sstevel@tonic-gate if (stksize == default_stksize) { 308*7c478bd9Sstevel@tonic-gate stk = (caddr_t)segkp_cache_get(segkp_thread); 309*7c478bd9Sstevel@tonic-gate } else { 310*7c478bd9Sstevel@tonic-gate stksize = roundup(stksize, PAGESIZE); 311*7c478bd9Sstevel@tonic-gate stk = (caddr_t)segkp_get(segkp, stksize, 312*7c478bd9Sstevel@tonic-gate (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED)); 313*7c478bd9Sstevel@tonic-gate } 314*7c478bd9Sstevel@tonic-gate 315*7c478bd9Sstevel@tonic-gate ASSERT(stk != NULL); 316*7c478bd9Sstevel@tonic-gate 317*7c478bd9Sstevel@tonic-gate /* 318*7c478bd9Sstevel@tonic-gate * The machine-dependent mutex code may require that 319*7c478bd9Sstevel@tonic-gate * thread pointers (since they may be used for mutex owner 320*7c478bd9Sstevel@tonic-gate * fields) have certain alignment requirements. 321*7c478bd9Sstevel@tonic-gate * PTR24_ALIGN is the size of the alignment quanta. 322*7c478bd9Sstevel@tonic-gate * XXX - assumes stack grows toward low addresses. 323*7c478bd9Sstevel@tonic-gate */ 324*7c478bd9Sstevel@tonic-gate if (stksize <= sizeof (kthread_t) + PTR24_ALIGN) 325*7c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "thread_create: proposed stack size" 326*7c478bd9Sstevel@tonic-gate " too small to hold thread."); 327*7c478bd9Sstevel@tonic-gate #ifdef STACK_GROWTH_DOWN 328*7c478bd9Sstevel@tonic-gate #if defined(__ia64) 329*7c478bd9Sstevel@tonic-gate /* "stksize / 2" may need to be adjusted */ 330*7c478bd9Sstevel@tonic-gate stksize = stksize / 2; /* needs to match below */ 331*7c478bd9Sstevel@tonic-gate regstksize = stksize; 332*7c478bd9Sstevel@tonic-gate #endif 333*7c478bd9Sstevel@tonic-gate stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1); 334*7c478bd9Sstevel@tonic-gate stksize &= -PTR24_ALIGN; /* make thread aligned */ 335*7c478bd9Sstevel@tonic-gate t = (kthread_t *)(stk + stksize); 336*7c478bd9Sstevel@tonic-gate bzero(t, sizeof (kthread_t)); 337*7c478bd9Sstevel@tonic-gate #ifdef C2_AUDIT 338*7c478bd9Sstevel@tonic-gate if (audit_active) 339*7c478bd9Sstevel@tonic-gate audit_thread_create(t); 340*7c478bd9Sstevel@tonic-gate #endif 341*7c478bd9Sstevel@tonic-gate t->t_stk = stk + stksize; 342*7c478bd9Sstevel@tonic-gate t->t_stkbase = stk; 343*7c478bd9Sstevel@tonic-gate #if defined(__ia64) 344*7c478bd9Sstevel@tonic-gate t->t_regstk = stk + regstksize; 345*7c478bd9Sstevel@tonic-gate t->t_stksize = regstksize * 2; /* needs to match above */ 346*7c478bd9Sstevel@tonic-gate #endif 347*7c478bd9Sstevel@tonic-gate #else /* stack grows to larger addresses */ 348*7c478bd9Sstevel@tonic-gate stksize -= SA(sizeof (kthread_t)); 349*7c478bd9Sstevel@tonic-gate t = (kthread_t *)(stk); 350*7c478bd9Sstevel@tonic-gate bzero(t, sizeof (kthread_t)); 351*7c478bd9Sstevel@tonic-gate t->t_stk = stk + sizeof (kthread_t); 352*7c478bd9Sstevel@tonic-gate t->t_stkbase = stk + stksize + sizeof (kthread_t); 353*7c478bd9Sstevel@tonic-gate #endif /* STACK_GROWTH_DOWN */ 354*7c478bd9Sstevel@tonic-gate t->t_flag |= T_TALLOCSTK; 355*7c478bd9Sstevel@tonic-gate t->t_swap = stk; 356*7c478bd9Sstevel@tonic-gate } else { 357*7c478bd9Sstevel@tonic-gate t = kmem_cache_alloc(thread_cache, KM_SLEEP); 358*7c478bd9Sstevel@tonic-gate bzero(t, sizeof (kthread_t)); 359*7c478bd9Sstevel@tonic-gate ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0); 360*7c478bd9Sstevel@tonic-gate #ifdef C2_AUDIT 361*7c478bd9Sstevel@tonic-gate if (audit_active) 362*7c478bd9Sstevel@tonic-gate audit_thread_create(t); 363*7c478bd9Sstevel@tonic-gate #endif 364*7c478bd9Sstevel@tonic-gate /* 365*7c478bd9Sstevel@tonic-gate * Initialize t_stk to the kernel stack pointer to use 366*7c478bd9Sstevel@tonic-gate * upon entry to the kernel 367*7c478bd9Sstevel@tonic-gate */ 368*7c478bd9Sstevel@tonic-gate #ifdef STACK_GROWTH_DOWN 369*7c478bd9Sstevel@tonic-gate #if defined(__ia64) 370*7c478bd9Sstevel@tonic-gate /* "stksize / 2" may need to be adjusted */ 371*7c478bd9Sstevel@tonic-gate t->t_stk = stk + (stksize / 2); /* grows down */ 372*7c478bd9Sstevel@tonic-gate t->t_regstk = t->t_stk; /* grows up from same place */ 373*7c478bd9Sstevel@tonic-gate t->t_stkbase = stk; 374*7c478bd9Sstevel@tonic-gate t->t_stksize = stksize; 375*7c478bd9Sstevel@tonic-gate #else 376*7c478bd9Sstevel@tonic-gate t->t_stk = stk + stksize; 377*7c478bd9Sstevel@tonic-gate t->t_stkbase = stk; 378*7c478bd9Sstevel@tonic-gate #endif 379*7c478bd9Sstevel@tonic-gate #else 380*7c478bd9Sstevel@tonic-gate t->t_stk = stk; /* 3b2-like */ 381*7c478bd9Sstevel@tonic-gate t->t_stkbase = stk + stksize; 382*7c478bd9Sstevel@tonic-gate #endif /* STACK_GROWTH_DOWN */ 383*7c478bd9Sstevel@tonic-gate } 384*7c478bd9Sstevel@tonic-gate 385*7c478bd9Sstevel@tonic-gate /* set default stack flag */ 386*7c478bd9Sstevel@tonic-gate if (stksize == lwp_default_stksize) 387*7c478bd9Sstevel@tonic-gate t->t_flag |= T_DFLTSTK; 388*7c478bd9Sstevel@tonic-gate 389*7c478bd9Sstevel@tonic-gate t->t_ts = ts; 390*7c478bd9Sstevel@tonic-gate 391*7c478bd9Sstevel@tonic-gate /* 392*7c478bd9Sstevel@tonic-gate * p_cred could be NULL if it thread_create is called before cred_init 393*7c478bd9Sstevel@tonic-gate * is called in main. 394*7c478bd9Sstevel@tonic-gate */ 395*7c478bd9Sstevel@tonic-gate mutex_enter(&pp->p_crlock); 396*7c478bd9Sstevel@tonic-gate if (pp->p_cred) 397*7c478bd9Sstevel@tonic-gate crhold(t->t_cred = pp->p_cred); 398*7c478bd9Sstevel@tonic-gate mutex_exit(&pp->p_crlock); 399*7c478bd9Sstevel@tonic-gate t->t_start = gethrestime_sec(); 400*7c478bd9Sstevel@tonic-gate t->t_startpc = proc; 401*7c478bd9Sstevel@tonic-gate t->t_procp = pp; 402*7c478bd9Sstevel@tonic-gate t->t_clfuncs = &sys_classfuncs.thread; 403*7c478bd9Sstevel@tonic-gate t->t_cid = syscid; 404*7c478bd9Sstevel@tonic-gate t->t_pri = pri; 405*7c478bd9Sstevel@tonic-gate t->t_stime = lbolt; 406*7c478bd9Sstevel@tonic-gate t->t_schedflag = TS_LOAD | TS_DONT_SWAP; 407*7c478bd9Sstevel@tonic-gate t->t_bind_cpu = PBIND_NONE; 408*7c478bd9Sstevel@tonic-gate t->t_bind_pset = PS_NONE; 409*7c478bd9Sstevel@tonic-gate t->t_plockp = &pp->p_lock; 410*7c478bd9Sstevel@tonic-gate t->t_copyops = NULL; 411*7c478bd9Sstevel@tonic-gate t->t_taskq = NULL; 412*7c478bd9Sstevel@tonic-gate t->t_anttime = 0; 413*7c478bd9Sstevel@tonic-gate t->t_hatdepth = 0; 414*7c478bd9Sstevel@tonic-gate 415*7c478bd9Sstevel@tonic-gate t->t_dtrace_vtime = 1; /* assure vtimestamp is always non-zero */ 416*7c478bd9Sstevel@tonic-gate 417*7c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, nthreads, 1); 418*7c478bd9Sstevel@tonic-gate #ifndef NPROBE 419*7c478bd9Sstevel@tonic-gate /* Kernel probe */ 420*7c478bd9Sstevel@tonic-gate tnf_thread_create(t); 421*7c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 422*7c478bd9Sstevel@tonic-gate LOCK_INIT_CLEAR(&t->t_lock); 423*7c478bd9Sstevel@tonic-gate 424*7c478bd9Sstevel@tonic-gate /* 425*7c478bd9Sstevel@tonic-gate * Callers who give us a NULL proc must do their own 426*7c478bd9Sstevel@tonic-gate * stack initialization. e.g. lwp_create() 427*7c478bd9Sstevel@tonic-gate */ 428*7c478bd9Sstevel@tonic-gate if (proc != NULL) { 429*7c478bd9Sstevel@tonic-gate t->t_stk = thread_stk_init(t->t_stk); 430*7c478bd9Sstevel@tonic-gate thread_load(t, proc, arg, len); 431*7c478bd9Sstevel@tonic-gate } 432*7c478bd9Sstevel@tonic-gate 433*7c478bd9Sstevel@tonic-gate /* 434*7c478bd9Sstevel@tonic-gate * Put a hold on project0. If this thread is actually in a 435*7c478bd9Sstevel@tonic-gate * different project, then t_proj will be changed later in 436*7c478bd9Sstevel@tonic-gate * lwp_create(). All kernel-only threads must be in project 0. 437*7c478bd9Sstevel@tonic-gate */ 438*7c478bd9Sstevel@tonic-gate t->t_proj = project_hold(proj0p); 439*7c478bd9Sstevel@tonic-gate 440*7c478bd9Sstevel@tonic-gate lgrp_affinity_init(&t->t_lgrp_affinity); 441*7c478bd9Sstevel@tonic-gate 442*7c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 443*7c478bd9Sstevel@tonic-gate nthread++; 444*7c478bd9Sstevel@tonic-gate t->t_did = next_t_id++; 445*7c478bd9Sstevel@tonic-gate t->t_prev = curthread->t_prev; 446*7c478bd9Sstevel@tonic-gate t->t_next = curthread; 447*7c478bd9Sstevel@tonic-gate 448*7c478bd9Sstevel@tonic-gate /* 449*7c478bd9Sstevel@tonic-gate * Add the thread to the list of all threads, and initialize 450*7c478bd9Sstevel@tonic-gate * its t_cpu pointer. We need to block preemption since 451*7c478bd9Sstevel@tonic-gate * cpu_offline walks the thread list looking for threads 452*7c478bd9Sstevel@tonic-gate * with t_cpu pointing to the CPU being offlined. We want 453*7c478bd9Sstevel@tonic-gate * to make sure that the list is consistent and that if t_cpu 454*7c478bd9Sstevel@tonic-gate * is set, the thread is on the list. 455*7c478bd9Sstevel@tonic-gate */ 456*7c478bd9Sstevel@tonic-gate kpreempt_disable(); 457*7c478bd9Sstevel@tonic-gate curthread->t_prev->t_next = t; 458*7c478bd9Sstevel@tonic-gate curthread->t_prev = t; 459*7c478bd9Sstevel@tonic-gate 460*7c478bd9Sstevel@tonic-gate /* 461*7c478bd9Sstevel@tonic-gate * Threads should never have a NULL t_cpu pointer so assign it 462*7c478bd9Sstevel@tonic-gate * here. If the thread is being created with state TS_RUN a 463*7c478bd9Sstevel@tonic-gate * better CPU may be chosen when it is placed on the run queue. 464*7c478bd9Sstevel@tonic-gate * 465*7c478bd9Sstevel@tonic-gate * We need to keep kernel preemption disabled when setting all 466*7c478bd9Sstevel@tonic-gate * three fields to keep them in sync. Also, always create in 467*7c478bd9Sstevel@tonic-gate * the default partition since that's where kernel threads go 468*7c478bd9Sstevel@tonic-gate * (if this isn't a kernel thread, t_cpupart will be changed 469*7c478bd9Sstevel@tonic-gate * in lwp_create before setting the thread runnable). 470*7c478bd9Sstevel@tonic-gate */ 471*7c478bd9Sstevel@tonic-gate t->t_cpupart = &cp_default; 472*7c478bd9Sstevel@tonic-gate 473*7c478bd9Sstevel@tonic-gate /* 474*7c478bd9Sstevel@tonic-gate * For now, affiliate this thread with the root lgroup. 475*7c478bd9Sstevel@tonic-gate * Since the kernel does not (presently) allocate its memory 476*7c478bd9Sstevel@tonic-gate * in a locality aware fashion, the root is an appropriate home. 477*7c478bd9Sstevel@tonic-gate * If this thread is later associated with an lwp, it will have 478*7c478bd9Sstevel@tonic-gate * it's lgroup re-assigned at that time. 479*7c478bd9Sstevel@tonic-gate */ 480*7c478bd9Sstevel@tonic-gate lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1); 481*7c478bd9Sstevel@tonic-gate 482*7c478bd9Sstevel@tonic-gate /* 483*7c478bd9Sstevel@tonic-gate * Inherit the current cpu. If this cpu isn't part of the chosen 484*7c478bd9Sstevel@tonic-gate * lgroup, a new cpu will be chosen by cpu_choose when the thread 485*7c478bd9Sstevel@tonic-gate * is ready to run. 486*7c478bd9Sstevel@tonic-gate */ 487*7c478bd9Sstevel@tonic-gate if (CPU->cpu_part == &cp_default) 488*7c478bd9Sstevel@tonic-gate t->t_cpu = CPU; 489*7c478bd9Sstevel@tonic-gate else 490*7c478bd9Sstevel@tonic-gate t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl, 491*7c478bd9Sstevel@tonic-gate t->t_pri, NULL); 492*7c478bd9Sstevel@tonic-gate 493*7c478bd9Sstevel@tonic-gate t->t_disp_queue = t->t_cpu->cpu_disp; 494*7c478bd9Sstevel@tonic-gate kpreempt_enable(); 495*7c478bd9Sstevel@tonic-gate 496*7c478bd9Sstevel@tonic-gate /* 497*7c478bd9Sstevel@tonic-gate * Initialize thread state and the dispatcher lock pointer. 498*7c478bd9Sstevel@tonic-gate * Need to hold onto pidlock to block allthreads walkers until 499*7c478bd9Sstevel@tonic-gate * the state is set. 500*7c478bd9Sstevel@tonic-gate */ 501*7c478bd9Sstevel@tonic-gate switch (state) { 502*7c478bd9Sstevel@tonic-gate case TS_RUN: 503*7c478bd9Sstevel@tonic-gate curthread->t_oldspl = splhigh(); /* get dispatcher spl */ 504*7c478bd9Sstevel@tonic-gate THREAD_SET_STATE(t, TS_STOPPED, &transition_lock); 505*7c478bd9Sstevel@tonic-gate CL_SETRUN(t); 506*7c478bd9Sstevel@tonic-gate thread_unlock(t); 507*7c478bd9Sstevel@tonic-gate break; 508*7c478bd9Sstevel@tonic-gate 509*7c478bd9Sstevel@tonic-gate case TS_ONPROC: 510*7c478bd9Sstevel@tonic-gate THREAD_ONPROC(t, t->t_cpu); 511*7c478bd9Sstevel@tonic-gate break; 512*7c478bd9Sstevel@tonic-gate 513*7c478bd9Sstevel@tonic-gate case TS_FREE: 514*7c478bd9Sstevel@tonic-gate /* 515*7c478bd9Sstevel@tonic-gate * Free state will be used for intr threads. 516*7c478bd9Sstevel@tonic-gate * The interrupt routine must set the thread dispatcher 517*7c478bd9Sstevel@tonic-gate * lock pointer (t_lockp) if starting on a CPU 518*7c478bd9Sstevel@tonic-gate * other than the current one. 519*7c478bd9Sstevel@tonic-gate */ 520*7c478bd9Sstevel@tonic-gate THREAD_FREEINTR(t, CPU); 521*7c478bd9Sstevel@tonic-gate break; 522*7c478bd9Sstevel@tonic-gate 523*7c478bd9Sstevel@tonic-gate case TS_STOPPED: 524*7c478bd9Sstevel@tonic-gate THREAD_SET_STATE(t, TS_STOPPED, &stop_lock); 525*7c478bd9Sstevel@tonic-gate break; 526*7c478bd9Sstevel@tonic-gate 527*7c478bd9Sstevel@tonic-gate default: /* TS_SLEEP, TS_ZOMB or TS_TRANS */ 528*7c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "thread_create: invalid state %d", state); 529*7c478bd9Sstevel@tonic-gate } 530*7c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 531*7c478bd9Sstevel@tonic-gate return (t); 532*7c478bd9Sstevel@tonic-gate } 533*7c478bd9Sstevel@tonic-gate 534*7c478bd9Sstevel@tonic-gate /* 535*7c478bd9Sstevel@tonic-gate * Move thread to project0 and take care of project reference counters. 536*7c478bd9Sstevel@tonic-gate */ 537*7c478bd9Sstevel@tonic-gate void 538*7c478bd9Sstevel@tonic-gate thread_rele(kthread_t *t) 539*7c478bd9Sstevel@tonic-gate { 540*7c478bd9Sstevel@tonic-gate kproject_t *kpj; 541*7c478bd9Sstevel@tonic-gate 542*7c478bd9Sstevel@tonic-gate thread_lock(t); 543*7c478bd9Sstevel@tonic-gate 544*7c478bd9Sstevel@tonic-gate ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0); 545*7c478bd9Sstevel@tonic-gate kpj = ttoproj(t); 546*7c478bd9Sstevel@tonic-gate t->t_proj = proj0p; 547*7c478bd9Sstevel@tonic-gate 548*7c478bd9Sstevel@tonic-gate thread_unlock(t); 549*7c478bd9Sstevel@tonic-gate 550*7c478bd9Sstevel@tonic-gate if (kpj != proj0p) { 551*7c478bd9Sstevel@tonic-gate project_rele(kpj); 552*7c478bd9Sstevel@tonic-gate (void) project_hold(proj0p); 553*7c478bd9Sstevel@tonic-gate } 554*7c478bd9Sstevel@tonic-gate } 555*7c478bd9Sstevel@tonic-gate 556*7c478bd9Sstevel@tonic-gate 557*7c478bd9Sstevel@tonic-gate void (*ip_cleanup_func)(void); 558*7c478bd9Sstevel@tonic-gate 559*7c478bd9Sstevel@tonic-gate void 560*7c478bd9Sstevel@tonic-gate thread_exit() 561*7c478bd9Sstevel@tonic-gate { 562*7c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 563*7c478bd9Sstevel@tonic-gate 564*7c478bd9Sstevel@tonic-gate if ((t->t_proc_flag & TP_ZTHREAD) != 0) 565*7c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called"); 566*7c478bd9Sstevel@tonic-gate 567*7c478bd9Sstevel@tonic-gate if (ip_cleanup_func != NULL) 568*7c478bd9Sstevel@tonic-gate (*ip_cleanup_func)(); 569*7c478bd9Sstevel@tonic-gate 570*7c478bd9Sstevel@tonic-gate tsd_exit(); /* Clean up this thread's TSD */ 571*7c478bd9Sstevel@tonic-gate 572*7c478bd9Sstevel@tonic-gate kcpc_passivate(); /* clean up performance counter state */ 573*7c478bd9Sstevel@tonic-gate 574*7c478bd9Sstevel@tonic-gate /* 575*7c478bd9Sstevel@tonic-gate * No kernel thread should have called poll() without arranging 576*7c478bd9Sstevel@tonic-gate * calling pollcleanup() here. 577*7c478bd9Sstevel@tonic-gate */ 578*7c478bd9Sstevel@tonic-gate ASSERT(t->t_pollstate == NULL); 579*7c478bd9Sstevel@tonic-gate ASSERT(t->t_schedctl == NULL); 580*7c478bd9Sstevel@tonic-gate if (t->t_door) 581*7c478bd9Sstevel@tonic-gate door_slam(); /* in case thread did an upcall */ 582*7c478bd9Sstevel@tonic-gate 583*7c478bd9Sstevel@tonic-gate #ifndef NPROBE 584*7c478bd9Sstevel@tonic-gate /* Kernel probe */ 585*7c478bd9Sstevel@tonic-gate if (t->t_tnf_tpdp) 586*7c478bd9Sstevel@tonic-gate tnf_thread_exit(); 587*7c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 588*7c478bd9Sstevel@tonic-gate 589*7c478bd9Sstevel@tonic-gate thread_rele(t); 590*7c478bd9Sstevel@tonic-gate t->t_preempt++; 591*7c478bd9Sstevel@tonic-gate 592*7c478bd9Sstevel@tonic-gate /* 593*7c478bd9Sstevel@tonic-gate * remove thread from the all threads list so that 594*7c478bd9Sstevel@tonic-gate * death-row can use the same pointers. 595*7c478bd9Sstevel@tonic-gate */ 596*7c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 597*7c478bd9Sstevel@tonic-gate t->t_next->t_prev = t->t_prev; 598*7c478bd9Sstevel@tonic-gate t->t_prev->t_next = t->t_next; 599*7c478bd9Sstevel@tonic-gate ASSERT(allthreads != t); /* t0 never exits */ 600*7c478bd9Sstevel@tonic-gate cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */ 601*7c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 602*7c478bd9Sstevel@tonic-gate 603*7c478bd9Sstevel@tonic-gate if (t->t_ctx != NULL) 604*7c478bd9Sstevel@tonic-gate exitctx(t); 605*7c478bd9Sstevel@tonic-gate 606*7c478bd9Sstevel@tonic-gate t->t_state = TS_ZOMB; /* set zombie thread */ 607*7c478bd9Sstevel@tonic-gate 608*7c478bd9Sstevel@tonic-gate swtch_from_zombie(); /* give up the CPU */ 609*7c478bd9Sstevel@tonic-gate /* NOTREACHED */ 610*7c478bd9Sstevel@tonic-gate } 611*7c478bd9Sstevel@tonic-gate 612*7c478bd9Sstevel@tonic-gate /* 613*7c478bd9Sstevel@tonic-gate * Check to see if the specified thread is active (defined as being on 614*7c478bd9Sstevel@tonic-gate * the thread list). This is certainly a slow way to do this; if there's 615*7c478bd9Sstevel@tonic-gate * ever a reason to speed it up, we could maintain a hash table of active 616*7c478bd9Sstevel@tonic-gate * threads indexed by their t_did. 617*7c478bd9Sstevel@tonic-gate */ 618*7c478bd9Sstevel@tonic-gate static kthread_t * 619*7c478bd9Sstevel@tonic-gate did_to_thread(kt_did_t tid) 620*7c478bd9Sstevel@tonic-gate { 621*7c478bd9Sstevel@tonic-gate kthread_t *t; 622*7c478bd9Sstevel@tonic-gate 623*7c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&pidlock)); 624*7c478bd9Sstevel@tonic-gate for (t = curthread->t_next; t != curthread; t = t->t_next) { 625*7c478bd9Sstevel@tonic-gate if (t->t_did == tid) 626*7c478bd9Sstevel@tonic-gate break; 627*7c478bd9Sstevel@tonic-gate } 628*7c478bd9Sstevel@tonic-gate if (t->t_did == tid) 629*7c478bd9Sstevel@tonic-gate return (t); 630*7c478bd9Sstevel@tonic-gate else 631*7c478bd9Sstevel@tonic-gate return (NULL); 632*7c478bd9Sstevel@tonic-gate } 633*7c478bd9Sstevel@tonic-gate 634*7c478bd9Sstevel@tonic-gate /* 635*7c478bd9Sstevel@tonic-gate * Wait for specified thread to exit. Returns immediately if the thread 636*7c478bd9Sstevel@tonic-gate * could not be found, meaning that it has either already exited or never 637*7c478bd9Sstevel@tonic-gate * existed. 638*7c478bd9Sstevel@tonic-gate */ 639*7c478bd9Sstevel@tonic-gate void 640*7c478bd9Sstevel@tonic-gate thread_join(kt_did_t tid) 641*7c478bd9Sstevel@tonic-gate { 642*7c478bd9Sstevel@tonic-gate kthread_t *t; 643*7c478bd9Sstevel@tonic-gate 644*7c478bd9Sstevel@tonic-gate ASSERT(tid != curthread->t_did); 645*7c478bd9Sstevel@tonic-gate ASSERT(tid != t0.t_did); 646*7c478bd9Sstevel@tonic-gate 647*7c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 648*7c478bd9Sstevel@tonic-gate /* 649*7c478bd9Sstevel@tonic-gate * Make sure we check that the thread is on the thread list 650*7c478bd9Sstevel@tonic-gate * before blocking on it; otherwise we could end up blocking on 651*7c478bd9Sstevel@tonic-gate * a cv that's already been freed. In other words, don't cache 652*7c478bd9Sstevel@tonic-gate * the thread pointer across calls to cv_wait. 653*7c478bd9Sstevel@tonic-gate * 654*7c478bd9Sstevel@tonic-gate * The choice of loop invariant means that whenever a thread 655*7c478bd9Sstevel@tonic-gate * is taken off the allthreads list, a cv_broadcast must be 656*7c478bd9Sstevel@tonic-gate * performed on that thread's t_joincv to wake up any waiters. 657*7c478bd9Sstevel@tonic-gate * The broadcast doesn't have to happen right away, but it 658*7c478bd9Sstevel@tonic-gate * shouldn't be postponed indefinitely (e.g., by doing it in 659*7c478bd9Sstevel@tonic-gate * thread_free which may only be executed when the deathrow 660*7c478bd9Sstevel@tonic-gate * queue is processed. 661*7c478bd9Sstevel@tonic-gate */ 662*7c478bd9Sstevel@tonic-gate while (t = did_to_thread(tid)) 663*7c478bd9Sstevel@tonic-gate cv_wait(&t->t_joincv, &pidlock); 664*7c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 665*7c478bd9Sstevel@tonic-gate } 666*7c478bd9Sstevel@tonic-gate 667*7c478bd9Sstevel@tonic-gate void 668*7c478bd9Sstevel@tonic-gate thread_free(kthread_t *t) 669*7c478bd9Sstevel@tonic-gate { 670*7c478bd9Sstevel@tonic-gate ASSERT(t != &t0 && t->t_state == TS_FREE); 671*7c478bd9Sstevel@tonic-gate ASSERT(t->t_door == NULL); 672*7c478bd9Sstevel@tonic-gate ASSERT(t->t_schedctl == NULL); 673*7c478bd9Sstevel@tonic-gate ASSERT(t->t_pollstate == NULL); 674*7c478bd9Sstevel@tonic-gate 675*7c478bd9Sstevel@tonic-gate t->t_pri = 0; 676*7c478bd9Sstevel@tonic-gate t->t_pc = 0; 677*7c478bd9Sstevel@tonic-gate t->t_sp = 0; 678*7c478bd9Sstevel@tonic-gate t->t_wchan0 = NULL; 679*7c478bd9Sstevel@tonic-gate t->t_wchan = NULL; 680*7c478bd9Sstevel@tonic-gate if (t->t_cred != NULL) { 681*7c478bd9Sstevel@tonic-gate crfree(t->t_cred); 682*7c478bd9Sstevel@tonic-gate t->t_cred = 0; 683*7c478bd9Sstevel@tonic-gate } 684*7c478bd9Sstevel@tonic-gate if (t->t_pdmsg) { 685*7c478bd9Sstevel@tonic-gate kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1); 686*7c478bd9Sstevel@tonic-gate t->t_pdmsg = NULL; 687*7c478bd9Sstevel@tonic-gate } 688*7c478bd9Sstevel@tonic-gate #ifdef C2_AUDIT 689*7c478bd9Sstevel@tonic-gate if (audit_active) 690*7c478bd9Sstevel@tonic-gate audit_thread_free(t); 691*7c478bd9Sstevel@tonic-gate #endif 692*7c478bd9Sstevel@tonic-gate #ifndef NPROBE 693*7c478bd9Sstevel@tonic-gate if (t->t_tnf_tpdp) 694*7c478bd9Sstevel@tonic-gate tnf_thread_free(t); 695*7c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 696*7c478bd9Sstevel@tonic-gate if (t->t_cldata) { 697*7c478bd9Sstevel@tonic-gate CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata); 698*7c478bd9Sstevel@tonic-gate } 699*7c478bd9Sstevel@tonic-gate if (t->t_rprof != NULL) { 700*7c478bd9Sstevel@tonic-gate kmem_free(t->t_rprof, sizeof (*t->t_rprof)); 701*7c478bd9Sstevel@tonic-gate t->t_rprof = NULL; 702*7c478bd9Sstevel@tonic-gate } 703*7c478bd9Sstevel@tonic-gate t->t_lockp = NULL; /* nothing should try to lock this thread now */ 704*7c478bd9Sstevel@tonic-gate if (t->t_lwp) 705*7c478bd9Sstevel@tonic-gate lwp_freeregs(t->t_lwp, 0); 706*7c478bd9Sstevel@tonic-gate if (t->t_ctx) 707*7c478bd9Sstevel@tonic-gate freectx(t, 0); 708*7c478bd9Sstevel@tonic-gate t->t_stk = NULL; 709*7c478bd9Sstevel@tonic-gate if (t->t_lwp) 710*7c478bd9Sstevel@tonic-gate lwp_stk_fini(t->t_lwp); 711*7c478bd9Sstevel@tonic-gate lock_clear(&t->t_lock); 712*7c478bd9Sstevel@tonic-gate 713*7c478bd9Sstevel@tonic-gate if (t->t_ts->ts_waiters > 0) 714*7c478bd9Sstevel@tonic-gate panic("thread_free: turnstile still active"); 715*7c478bd9Sstevel@tonic-gate 716*7c478bd9Sstevel@tonic-gate kmem_cache_free(turnstile_cache, t->t_ts); 717*7c478bd9Sstevel@tonic-gate 718*7c478bd9Sstevel@tonic-gate free_afd(&t->t_activefd); 719*7c478bd9Sstevel@tonic-gate 720*7c478bd9Sstevel@tonic-gate /* 721*7c478bd9Sstevel@tonic-gate * Barrier for clock thread. The clock holds this lock to 722*7c478bd9Sstevel@tonic-gate * keep the thread from going away while it's looking at it. 723*7c478bd9Sstevel@tonic-gate */ 724*7c478bd9Sstevel@tonic-gate mutex_enter(&thread_free_lock); 725*7c478bd9Sstevel@tonic-gate mutex_exit(&thread_free_lock); 726*7c478bd9Sstevel@tonic-gate 727*7c478bd9Sstevel@tonic-gate ASSERT(ttoproj(t) == proj0p); 728*7c478bd9Sstevel@tonic-gate project_rele(ttoproj(t)); 729*7c478bd9Sstevel@tonic-gate 730*7c478bd9Sstevel@tonic-gate lgrp_affinity_free(&t->t_lgrp_affinity); 731*7c478bd9Sstevel@tonic-gate 732*7c478bd9Sstevel@tonic-gate /* 733*7c478bd9Sstevel@tonic-gate * Free thread struct and its stack. 734*7c478bd9Sstevel@tonic-gate */ 735*7c478bd9Sstevel@tonic-gate if (t->t_flag & T_TALLOCSTK) { 736*7c478bd9Sstevel@tonic-gate /* thread struct is embedded in stack */ 737*7c478bd9Sstevel@tonic-gate segkp_release(segkp, t->t_swap); 738*7c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 739*7c478bd9Sstevel@tonic-gate nthread--; 740*7c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 741*7c478bd9Sstevel@tonic-gate } else { 742*7c478bd9Sstevel@tonic-gate if (t->t_swap) { 743*7c478bd9Sstevel@tonic-gate segkp_release(segkp, t->t_swap); 744*7c478bd9Sstevel@tonic-gate t->t_swap = NULL; 745*7c478bd9Sstevel@tonic-gate } 746*7c478bd9Sstevel@tonic-gate if (t->t_lwp) { 747*7c478bd9Sstevel@tonic-gate kmem_cache_free(lwp_cache, t->t_lwp); 748*7c478bd9Sstevel@tonic-gate t->t_lwp = NULL; 749*7c478bd9Sstevel@tonic-gate } 750*7c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 751*7c478bd9Sstevel@tonic-gate nthread--; 752*7c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 753*7c478bd9Sstevel@tonic-gate kmem_cache_free(thread_cache, t); 754*7c478bd9Sstevel@tonic-gate } 755*7c478bd9Sstevel@tonic-gate } 756*7c478bd9Sstevel@tonic-gate 757*7c478bd9Sstevel@tonic-gate /* 758*7c478bd9Sstevel@tonic-gate * Removes threads associated with the given zone from a deathrow queue. 759*7c478bd9Sstevel@tonic-gate * tp is a pointer to the head of the deathrow queue, and countp is a 760*7c478bd9Sstevel@tonic-gate * pointer to the current deathrow count. Returns a linked list of 761*7c478bd9Sstevel@tonic-gate * threads removed from the list. 762*7c478bd9Sstevel@tonic-gate */ 763*7c478bd9Sstevel@tonic-gate static kthread_t * 764*7c478bd9Sstevel@tonic-gate thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid) 765*7c478bd9Sstevel@tonic-gate { 766*7c478bd9Sstevel@tonic-gate kthread_t *tmp, *list = NULL; 767*7c478bd9Sstevel@tonic-gate cred_t *cr; 768*7c478bd9Sstevel@tonic-gate 769*7c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&reaplock)); 770*7c478bd9Sstevel@tonic-gate while (*tp != NULL) { 771*7c478bd9Sstevel@tonic-gate if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) { 772*7c478bd9Sstevel@tonic-gate tmp = *tp; 773*7c478bd9Sstevel@tonic-gate *tp = tmp->t_forw; 774*7c478bd9Sstevel@tonic-gate tmp->t_forw = list; 775*7c478bd9Sstevel@tonic-gate list = tmp; 776*7c478bd9Sstevel@tonic-gate (*countp)--; 777*7c478bd9Sstevel@tonic-gate } else { 778*7c478bd9Sstevel@tonic-gate tp = &(*tp)->t_forw; 779*7c478bd9Sstevel@tonic-gate } 780*7c478bd9Sstevel@tonic-gate } 781*7c478bd9Sstevel@tonic-gate return (list); 782*7c478bd9Sstevel@tonic-gate } 783*7c478bd9Sstevel@tonic-gate 784*7c478bd9Sstevel@tonic-gate static void 785*7c478bd9Sstevel@tonic-gate thread_reap_list(kthread_t *t) 786*7c478bd9Sstevel@tonic-gate { 787*7c478bd9Sstevel@tonic-gate kthread_t *next; 788*7c478bd9Sstevel@tonic-gate 789*7c478bd9Sstevel@tonic-gate while (t != NULL) { 790*7c478bd9Sstevel@tonic-gate next = t->t_forw; 791*7c478bd9Sstevel@tonic-gate thread_free(t); 792*7c478bd9Sstevel@tonic-gate t = next; 793*7c478bd9Sstevel@tonic-gate } 794*7c478bd9Sstevel@tonic-gate } 795*7c478bd9Sstevel@tonic-gate 796*7c478bd9Sstevel@tonic-gate /* ARGSUSED */ 797*7c478bd9Sstevel@tonic-gate static void 798*7c478bd9Sstevel@tonic-gate thread_zone_destroy(zoneid_t zoneid, void *unused) 799*7c478bd9Sstevel@tonic-gate { 800*7c478bd9Sstevel@tonic-gate kthread_t *t, *l; 801*7c478bd9Sstevel@tonic-gate 802*7c478bd9Sstevel@tonic-gate mutex_enter(&reaplock); 803*7c478bd9Sstevel@tonic-gate /* 804*7c478bd9Sstevel@tonic-gate * Pull threads and lwps associated with zone off deathrow lists. 805*7c478bd9Sstevel@tonic-gate */ 806*7c478bd9Sstevel@tonic-gate t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid); 807*7c478bd9Sstevel@tonic-gate l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid); 808*7c478bd9Sstevel@tonic-gate mutex_exit(&reaplock); 809*7c478bd9Sstevel@tonic-gate 810*7c478bd9Sstevel@tonic-gate /* 811*7c478bd9Sstevel@tonic-gate * Reap threads 812*7c478bd9Sstevel@tonic-gate */ 813*7c478bd9Sstevel@tonic-gate thread_reap_list(t); 814*7c478bd9Sstevel@tonic-gate 815*7c478bd9Sstevel@tonic-gate /* 816*7c478bd9Sstevel@tonic-gate * Reap lwps 817*7c478bd9Sstevel@tonic-gate */ 818*7c478bd9Sstevel@tonic-gate thread_reap_list(l); 819*7c478bd9Sstevel@tonic-gate } 820*7c478bd9Sstevel@tonic-gate 821*7c478bd9Sstevel@tonic-gate /* 822*7c478bd9Sstevel@tonic-gate * cleanup zombie threads that are on deathrow. 823*7c478bd9Sstevel@tonic-gate */ 824*7c478bd9Sstevel@tonic-gate void 825*7c478bd9Sstevel@tonic-gate thread_reaper() 826*7c478bd9Sstevel@tonic-gate { 827*7c478bd9Sstevel@tonic-gate kthread_t *t, *l; 828*7c478bd9Sstevel@tonic-gate callb_cpr_t cprinfo; 829*7c478bd9Sstevel@tonic-gate 830*7c478bd9Sstevel@tonic-gate /* 831*7c478bd9Sstevel@tonic-gate * Register callback to clean up threads when zone is destroyed. 832*7c478bd9Sstevel@tonic-gate */ 833*7c478bd9Sstevel@tonic-gate zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy); 834*7c478bd9Sstevel@tonic-gate 835*7c478bd9Sstevel@tonic-gate CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper"); 836*7c478bd9Sstevel@tonic-gate for (;;) { 837*7c478bd9Sstevel@tonic-gate mutex_enter(&reaplock); 838*7c478bd9Sstevel@tonic-gate while (thread_deathrow == NULL && lwp_deathrow == NULL) { 839*7c478bd9Sstevel@tonic-gate CALLB_CPR_SAFE_BEGIN(&cprinfo); 840*7c478bd9Sstevel@tonic-gate cv_wait(&reaper_cv, &reaplock); 841*7c478bd9Sstevel@tonic-gate CALLB_CPR_SAFE_END(&cprinfo, &reaplock); 842*7c478bd9Sstevel@tonic-gate } 843*7c478bd9Sstevel@tonic-gate t = thread_deathrow; 844*7c478bd9Sstevel@tonic-gate l = lwp_deathrow; 845*7c478bd9Sstevel@tonic-gate thread_deathrow = NULL; 846*7c478bd9Sstevel@tonic-gate lwp_deathrow = NULL; 847*7c478bd9Sstevel@tonic-gate thread_reapcnt = 0; 848*7c478bd9Sstevel@tonic-gate lwp_reapcnt = 0; 849*7c478bd9Sstevel@tonic-gate mutex_exit(&reaplock); 850*7c478bd9Sstevel@tonic-gate 851*7c478bd9Sstevel@tonic-gate /* 852*7c478bd9Sstevel@tonic-gate * Reap threads 853*7c478bd9Sstevel@tonic-gate */ 854*7c478bd9Sstevel@tonic-gate thread_reap_list(t); 855*7c478bd9Sstevel@tonic-gate 856*7c478bd9Sstevel@tonic-gate /* 857*7c478bd9Sstevel@tonic-gate * Reap lwps 858*7c478bd9Sstevel@tonic-gate */ 859*7c478bd9Sstevel@tonic-gate thread_reap_list(l); 860*7c478bd9Sstevel@tonic-gate } 861*7c478bd9Sstevel@tonic-gate } 862*7c478bd9Sstevel@tonic-gate 863*7c478bd9Sstevel@tonic-gate /* 864*7c478bd9Sstevel@tonic-gate * This is called by resume() to put a zombie thread onto deathrow. 865*7c478bd9Sstevel@tonic-gate * The thread's state is changed to TS_FREE to indicate that is reapable. 866*7c478bd9Sstevel@tonic-gate * This is called from the idle thread so it must not block (just spin). 867*7c478bd9Sstevel@tonic-gate */ 868*7c478bd9Sstevel@tonic-gate void 869*7c478bd9Sstevel@tonic-gate reapq_add(kthread_t *t) 870*7c478bd9Sstevel@tonic-gate { 871*7c478bd9Sstevel@tonic-gate mutex_enter(&reaplock); 872*7c478bd9Sstevel@tonic-gate 873*7c478bd9Sstevel@tonic-gate /* 874*7c478bd9Sstevel@tonic-gate * lwp_deathrow contains only threads with lwp linkage 875*7c478bd9Sstevel@tonic-gate * that are of the default stacksize. Anything else goes 876*7c478bd9Sstevel@tonic-gate * on thread_deathrow. 877*7c478bd9Sstevel@tonic-gate */ 878*7c478bd9Sstevel@tonic-gate if (ttolwp(t) && (t->t_flag & T_DFLTSTK)) { 879*7c478bd9Sstevel@tonic-gate t->t_forw = lwp_deathrow; 880*7c478bd9Sstevel@tonic-gate lwp_deathrow = t; 881*7c478bd9Sstevel@tonic-gate lwp_reapcnt++; 882*7c478bd9Sstevel@tonic-gate } else { 883*7c478bd9Sstevel@tonic-gate t->t_forw = thread_deathrow; 884*7c478bd9Sstevel@tonic-gate thread_deathrow = t; 885*7c478bd9Sstevel@tonic-gate thread_reapcnt++; 886*7c478bd9Sstevel@tonic-gate } 887*7c478bd9Sstevel@tonic-gate if (lwp_reapcnt + thread_reapcnt > reaplimit) 888*7c478bd9Sstevel@tonic-gate cv_signal(&reaper_cv); /* wake the reaper */ 889*7c478bd9Sstevel@tonic-gate t->t_state = TS_FREE; 890*7c478bd9Sstevel@tonic-gate lock_clear(&t->t_lock); 891*7c478bd9Sstevel@tonic-gate mutex_exit(&reaplock); 892*7c478bd9Sstevel@tonic-gate } 893*7c478bd9Sstevel@tonic-gate 894*7c478bd9Sstevel@tonic-gate /* 895*7c478bd9Sstevel@tonic-gate * Install a device context for the current thread 896*7c478bd9Sstevel@tonic-gate */ 897*7c478bd9Sstevel@tonic-gate void 898*7c478bd9Sstevel@tonic-gate installctx( 899*7c478bd9Sstevel@tonic-gate kthread_t *t, 900*7c478bd9Sstevel@tonic-gate void *arg, 901*7c478bd9Sstevel@tonic-gate void (*save)(void *), 902*7c478bd9Sstevel@tonic-gate void (*restore)(void *), 903*7c478bd9Sstevel@tonic-gate void (*fork)(void *, void *), 904*7c478bd9Sstevel@tonic-gate void (*lwp_create)(void *, void *), 905*7c478bd9Sstevel@tonic-gate void (*exit)(void *), 906*7c478bd9Sstevel@tonic-gate void (*free)(void *, int)) 907*7c478bd9Sstevel@tonic-gate { 908*7c478bd9Sstevel@tonic-gate struct ctxop *ctx; 909*7c478bd9Sstevel@tonic-gate 910*7c478bd9Sstevel@tonic-gate ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP); 911*7c478bd9Sstevel@tonic-gate ctx->save_op = save; 912*7c478bd9Sstevel@tonic-gate ctx->restore_op = restore; 913*7c478bd9Sstevel@tonic-gate ctx->fork_op = fork; 914*7c478bd9Sstevel@tonic-gate ctx->lwp_create_op = lwp_create; 915*7c478bd9Sstevel@tonic-gate ctx->exit_op = exit; 916*7c478bd9Sstevel@tonic-gate ctx->free_op = free; 917*7c478bd9Sstevel@tonic-gate ctx->arg = arg; 918*7c478bd9Sstevel@tonic-gate ctx->next = t->t_ctx; 919*7c478bd9Sstevel@tonic-gate t->t_ctx = ctx; 920*7c478bd9Sstevel@tonic-gate } 921*7c478bd9Sstevel@tonic-gate 922*7c478bd9Sstevel@tonic-gate /* 923*7c478bd9Sstevel@tonic-gate * Remove a device context from the current thread 924*7c478bd9Sstevel@tonic-gate * (Or allow the agent thread to remove device context from another 925*7c478bd9Sstevel@tonic-gate * thread in the same, stopped, process) 926*7c478bd9Sstevel@tonic-gate */ 927*7c478bd9Sstevel@tonic-gate int 928*7c478bd9Sstevel@tonic-gate removectx( 929*7c478bd9Sstevel@tonic-gate kthread_t *t, 930*7c478bd9Sstevel@tonic-gate void *arg, 931*7c478bd9Sstevel@tonic-gate void (*save)(void *), 932*7c478bd9Sstevel@tonic-gate void (*restore)(void *), 933*7c478bd9Sstevel@tonic-gate void (*fork)(void *, void *), 934*7c478bd9Sstevel@tonic-gate void (*lwp_create)(void *, void *), 935*7c478bd9Sstevel@tonic-gate void (*exit)(void *), 936*7c478bd9Sstevel@tonic-gate void (*free)(void *, int)) 937*7c478bd9Sstevel@tonic-gate { 938*7c478bd9Sstevel@tonic-gate struct ctxop *ctx, *prev_ctx; 939*7c478bd9Sstevel@tonic-gate 940*7c478bd9Sstevel@tonic-gate ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL || 941*7c478bd9Sstevel@tonic-gate ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 942*7c478bd9Sstevel@tonic-gate 943*7c478bd9Sstevel@tonic-gate prev_ctx = NULL; 944*7c478bd9Sstevel@tonic-gate for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) { 945*7c478bd9Sstevel@tonic-gate if (ctx->save_op == save && ctx->restore_op == restore && 946*7c478bd9Sstevel@tonic-gate ctx->fork_op == fork && ctx->lwp_create_op == lwp_create && 947*7c478bd9Sstevel@tonic-gate ctx->exit_op == exit && ctx->free_op == free && 948*7c478bd9Sstevel@tonic-gate ctx->arg == arg) { 949*7c478bd9Sstevel@tonic-gate if (prev_ctx) 950*7c478bd9Sstevel@tonic-gate prev_ctx->next = ctx->next; 951*7c478bd9Sstevel@tonic-gate else 952*7c478bd9Sstevel@tonic-gate t->t_ctx = ctx->next; 953*7c478bd9Sstevel@tonic-gate if (ctx->free_op != NULL) 954*7c478bd9Sstevel@tonic-gate (ctx->free_op)(ctx->arg, 0); 955*7c478bd9Sstevel@tonic-gate kmem_free(ctx, sizeof (struct ctxop)); 956*7c478bd9Sstevel@tonic-gate return (1); 957*7c478bd9Sstevel@tonic-gate } 958*7c478bd9Sstevel@tonic-gate prev_ctx = ctx; 959*7c478bd9Sstevel@tonic-gate } 960*7c478bd9Sstevel@tonic-gate return (0); 961*7c478bd9Sstevel@tonic-gate } 962*7c478bd9Sstevel@tonic-gate 963*7c478bd9Sstevel@tonic-gate void 964*7c478bd9Sstevel@tonic-gate savectx(kthread_t *t) 965*7c478bd9Sstevel@tonic-gate { 966*7c478bd9Sstevel@tonic-gate struct ctxop *ctx; 967*7c478bd9Sstevel@tonic-gate 968*7c478bd9Sstevel@tonic-gate ASSERT(t == curthread); 969*7c478bd9Sstevel@tonic-gate for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next) 970*7c478bd9Sstevel@tonic-gate if (ctx->save_op != NULL) 971*7c478bd9Sstevel@tonic-gate (ctx->save_op)(ctx->arg); 972*7c478bd9Sstevel@tonic-gate } 973*7c478bd9Sstevel@tonic-gate 974*7c478bd9Sstevel@tonic-gate void 975*7c478bd9Sstevel@tonic-gate restorectx(kthread_t *t) 976*7c478bd9Sstevel@tonic-gate { 977*7c478bd9Sstevel@tonic-gate struct ctxop *ctx; 978*7c478bd9Sstevel@tonic-gate 979*7c478bd9Sstevel@tonic-gate ASSERT(t == curthread); 980*7c478bd9Sstevel@tonic-gate for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next) 981*7c478bd9Sstevel@tonic-gate if (ctx->restore_op != NULL) 982*7c478bd9Sstevel@tonic-gate (ctx->restore_op)(ctx->arg); 983*7c478bd9Sstevel@tonic-gate } 984*7c478bd9Sstevel@tonic-gate 985*7c478bd9Sstevel@tonic-gate void 986*7c478bd9Sstevel@tonic-gate forkctx(kthread_t *t, kthread_t *ct) 987*7c478bd9Sstevel@tonic-gate { 988*7c478bd9Sstevel@tonic-gate struct ctxop *ctx; 989*7c478bd9Sstevel@tonic-gate 990*7c478bd9Sstevel@tonic-gate for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 991*7c478bd9Sstevel@tonic-gate if (ctx->fork_op != NULL) 992*7c478bd9Sstevel@tonic-gate (ctx->fork_op)(t, ct); 993*7c478bd9Sstevel@tonic-gate } 994*7c478bd9Sstevel@tonic-gate 995*7c478bd9Sstevel@tonic-gate /* 996*7c478bd9Sstevel@tonic-gate * Note that this operator is only invoked via the _lwp_create 997*7c478bd9Sstevel@tonic-gate * system call. The system may have other reasons to create lwps 998*7c478bd9Sstevel@tonic-gate * e.g. the agent lwp or the doors unreferenced lwp. 999*7c478bd9Sstevel@tonic-gate */ 1000*7c478bd9Sstevel@tonic-gate void 1001*7c478bd9Sstevel@tonic-gate lwp_createctx(kthread_t *t, kthread_t *ct) 1002*7c478bd9Sstevel@tonic-gate { 1003*7c478bd9Sstevel@tonic-gate struct ctxop *ctx; 1004*7c478bd9Sstevel@tonic-gate 1005*7c478bd9Sstevel@tonic-gate for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1006*7c478bd9Sstevel@tonic-gate if (ctx->lwp_create_op != NULL) 1007*7c478bd9Sstevel@tonic-gate (ctx->lwp_create_op)(t, ct); 1008*7c478bd9Sstevel@tonic-gate } 1009*7c478bd9Sstevel@tonic-gate 1010*7c478bd9Sstevel@tonic-gate /* 1011*7c478bd9Sstevel@tonic-gate * exitctx is called from thread_exit() and lwp_exit() to perform any actions 1012*7c478bd9Sstevel@tonic-gate * needed when the thread/LWP leaves the processor for the last time. This 1013*7c478bd9Sstevel@tonic-gate * routine is not intended to deal with freeing memory; freectx() is used for 1014*7c478bd9Sstevel@tonic-gate * that purpose during thread_free(). This routine is provided to allow for 1015*7c478bd9Sstevel@tonic-gate * clean-up that can't wait until thread_free(). 1016*7c478bd9Sstevel@tonic-gate */ 1017*7c478bd9Sstevel@tonic-gate void 1018*7c478bd9Sstevel@tonic-gate exitctx(kthread_t *t) 1019*7c478bd9Sstevel@tonic-gate { 1020*7c478bd9Sstevel@tonic-gate struct ctxop *ctx; 1021*7c478bd9Sstevel@tonic-gate 1022*7c478bd9Sstevel@tonic-gate for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1023*7c478bd9Sstevel@tonic-gate if (ctx->exit_op != NULL) 1024*7c478bd9Sstevel@tonic-gate (ctx->exit_op)(t); 1025*7c478bd9Sstevel@tonic-gate } 1026*7c478bd9Sstevel@tonic-gate 1027*7c478bd9Sstevel@tonic-gate /* 1028*7c478bd9Sstevel@tonic-gate * freectx is called from thread_free() and exec() to get 1029*7c478bd9Sstevel@tonic-gate * rid of old device context. 1030*7c478bd9Sstevel@tonic-gate */ 1031*7c478bd9Sstevel@tonic-gate void 1032*7c478bd9Sstevel@tonic-gate freectx(kthread_t *t, int isexec) 1033*7c478bd9Sstevel@tonic-gate { 1034*7c478bd9Sstevel@tonic-gate struct ctxop *ctx; 1035*7c478bd9Sstevel@tonic-gate 1036*7c478bd9Sstevel@tonic-gate while ((ctx = t->t_ctx) != NULL) { 1037*7c478bd9Sstevel@tonic-gate t->t_ctx = ctx->next; 1038*7c478bd9Sstevel@tonic-gate if (ctx->free_op != NULL) 1039*7c478bd9Sstevel@tonic-gate (ctx->free_op)(ctx->arg, isexec); 1040*7c478bd9Sstevel@tonic-gate kmem_free(ctx, sizeof (struct ctxop)); 1041*7c478bd9Sstevel@tonic-gate } 1042*7c478bd9Sstevel@tonic-gate } 1043*7c478bd9Sstevel@tonic-gate 1044*7c478bd9Sstevel@tonic-gate /* 1045*7c478bd9Sstevel@tonic-gate * Set the thread running; arrange for it to be swapped in if necessary. 1046*7c478bd9Sstevel@tonic-gate */ 1047*7c478bd9Sstevel@tonic-gate void 1048*7c478bd9Sstevel@tonic-gate setrun_locked(kthread_t *t) 1049*7c478bd9Sstevel@tonic-gate { 1050*7c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(t)); 1051*7c478bd9Sstevel@tonic-gate if (t->t_state == TS_SLEEP) { 1052*7c478bd9Sstevel@tonic-gate /* 1053*7c478bd9Sstevel@tonic-gate * Take off sleep queue. 1054*7c478bd9Sstevel@tonic-gate */ 1055*7c478bd9Sstevel@tonic-gate SOBJ_UNSLEEP(t->t_sobj_ops, t); 1056*7c478bd9Sstevel@tonic-gate } else if (t->t_state & (TS_RUN | TS_ONPROC)) { 1057*7c478bd9Sstevel@tonic-gate /* 1058*7c478bd9Sstevel@tonic-gate * Already on dispatcher queue. 1059*7c478bd9Sstevel@tonic-gate */ 1060*7c478bd9Sstevel@tonic-gate return; 1061*7c478bd9Sstevel@tonic-gate } else if (t->t_state == TS_STOPPED) { 1062*7c478bd9Sstevel@tonic-gate /* 1063*7c478bd9Sstevel@tonic-gate * All of the sending of SIGCONT (TC_XSTART) and /proc 1064*7c478bd9Sstevel@tonic-gate * (TC_PSTART) and lwp_continue() (TC_CSTART) must have 1065*7c478bd9Sstevel@tonic-gate * requested that the thread be run. 1066*7c478bd9Sstevel@tonic-gate * Just calling setrun() is not sufficient to set a stopped 1067*7c478bd9Sstevel@tonic-gate * thread running. TP_TXSTART is always set if the thread 1068*7c478bd9Sstevel@tonic-gate * is not stopped by a jobcontrol stop signal. 1069*7c478bd9Sstevel@tonic-gate * TP_TPSTART is always set if /proc is not controlling it. 1070*7c478bd9Sstevel@tonic-gate * TP_TCSTART is always set if lwp_suspend() didn't stop it. 1071*7c478bd9Sstevel@tonic-gate * The thread won't be stopped unless one of these 1072*7c478bd9Sstevel@tonic-gate * three mechanisms did it. 1073*7c478bd9Sstevel@tonic-gate * 1074*7c478bd9Sstevel@tonic-gate * These flags must be set before calling setrun_locked(t). 1075*7c478bd9Sstevel@tonic-gate * They can't be passed as arguments because the streams 1076*7c478bd9Sstevel@tonic-gate * code calls setrun() indirectly and the mechanism for 1077*7c478bd9Sstevel@tonic-gate * doing so admits only one argument. Note that the 1078*7c478bd9Sstevel@tonic-gate * thread must be locked in order to change t_schedflags. 1079*7c478bd9Sstevel@tonic-gate */ 1080*7c478bd9Sstevel@tonic-gate if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART) 1081*7c478bd9Sstevel@tonic-gate return; 1082*7c478bd9Sstevel@tonic-gate /* 1083*7c478bd9Sstevel@tonic-gate * Process is no longer stopped (a thread is running). 1084*7c478bd9Sstevel@tonic-gate */ 1085*7c478bd9Sstevel@tonic-gate t->t_whystop = 0; 1086*7c478bd9Sstevel@tonic-gate t->t_whatstop = 0; 1087*7c478bd9Sstevel@tonic-gate /* 1088*7c478bd9Sstevel@tonic-gate * Strictly speaking, we do not have to clear these 1089*7c478bd9Sstevel@tonic-gate * flags here; they are cleared on entry to stop(). 1090*7c478bd9Sstevel@tonic-gate * However, they are confusing when doing kernel 1091*7c478bd9Sstevel@tonic-gate * debugging or when they are revealed by ps(1). 1092*7c478bd9Sstevel@tonic-gate */ 1093*7c478bd9Sstevel@tonic-gate t->t_schedflag &= ~TS_ALLSTART; 1094*7c478bd9Sstevel@tonic-gate THREAD_TRANSITION(t); /* drop stopped-thread lock */ 1095*7c478bd9Sstevel@tonic-gate ASSERT(t->t_lockp == &transition_lock); 1096*7c478bd9Sstevel@tonic-gate ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); 1097*7c478bd9Sstevel@tonic-gate /* 1098*7c478bd9Sstevel@tonic-gate * Let the class put the process on the dispatcher queue. 1099*7c478bd9Sstevel@tonic-gate */ 1100*7c478bd9Sstevel@tonic-gate CL_SETRUN(t); 1101*7c478bd9Sstevel@tonic-gate } 1102*7c478bd9Sstevel@tonic-gate 1103*7c478bd9Sstevel@tonic-gate 1104*7c478bd9Sstevel@tonic-gate } 1105*7c478bd9Sstevel@tonic-gate 1106*7c478bd9Sstevel@tonic-gate void 1107*7c478bd9Sstevel@tonic-gate setrun(kthread_t *t) 1108*7c478bd9Sstevel@tonic-gate { 1109*7c478bd9Sstevel@tonic-gate thread_lock(t); 1110*7c478bd9Sstevel@tonic-gate setrun_locked(t); 1111*7c478bd9Sstevel@tonic-gate thread_unlock(t); 1112*7c478bd9Sstevel@tonic-gate } 1113*7c478bd9Sstevel@tonic-gate 1114*7c478bd9Sstevel@tonic-gate /* 1115*7c478bd9Sstevel@tonic-gate * Unpin an interrupted thread. 1116*7c478bd9Sstevel@tonic-gate * When an interrupt occurs, the interrupt is handled on the stack 1117*7c478bd9Sstevel@tonic-gate * of an interrupt thread, taken from a pool linked to the CPU structure. 1118*7c478bd9Sstevel@tonic-gate * 1119*7c478bd9Sstevel@tonic-gate * When swtch() is switching away from an interrupt thread because it 1120*7c478bd9Sstevel@tonic-gate * blocked or was preempted, this routine is called to complete the 1121*7c478bd9Sstevel@tonic-gate * saving of the interrupted thread state, and returns the interrupted 1122*7c478bd9Sstevel@tonic-gate * thread pointer so it may be resumed. 1123*7c478bd9Sstevel@tonic-gate * 1124*7c478bd9Sstevel@tonic-gate * Called by swtch() only at high spl. 1125*7c478bd9Sstevel@tonic-gate */ 1126*7c478bd9Sstevel@tonic-gate kthread_t * 1127*7c478bd9Sstevel@tonic-gate thread_unpin() 1128*7c478bd9Sstevel@tonic-gate { 1129*7c478bd9Sstevel@tonic-gate kthread_t *t = curthread; /* current thread */ 1130*7c478bd9Sstevel@tonic-gate kthread_t *itp; /* interrupted thread */ 1131*7c478bd9Sstevel@tonic-gate int i; /* interrupt level */ 1132*7c478bd9Sstevel@tonic-gate extern int intr_passivate(); 1133*7c478bd9Sstevel@tonic-gate 1134*7c478bd9Sstevel@tonic-gate ASSERT(t->t_intr != NULL); 1135*7c478bd9Sstevel@tonic-gate 1136*7c478bd9Sstevel@tonic-gate itp = t->t_intr; /* interrupted thread */ 1137*7c478bd9Sstevel@tonic-gate t->t_intr = NULL; /* clear interrupt ptr */ 1138*7c478bd9Sstevel@tonic-gate 1139*7c478bd9Sstevel@tonic-gate /* 1140*7c478bd9Sstevel@tonic-gate * Get state from interrupt thread for the one 1141*7c478bd9Sstevel@tonic-gate * it interrupted. 1142*7c478bd9Sstevel@tonic-gate */ 1143*7c478bd9Sstevel@tonic-gate 1144*7c478bd9Sstevel@tonic-gate i = intr_passivate(t, itp); 1145*7c478bd9Sstevel@tonic-gate 1146*7c478bd9Sstevel@tonic-gate TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE, 1147*7c478bd9Sstevel@tonic-gate "intr_passivate:level %d curthread %p (%T) ithread %p (%T)", 1148*7c478bd9Sstevel@tonic-gate i, t, t, itp, itp); 1149*7c478bd9Sstevel@tonic-gate 1150*7c478bd9Sstevel@tonic-gate /* 1151*7c478bd9Sstevel@tonic-gate * Dissociate the current thread from the interrupted thread's LWP. 1152*7c478bd9Sstevel@tonic-gate */ 1153*7c478bd9Sstevel@tonic-gate t->t_lwp = NULL; 1154*7c478bd9Sstevel@tonic-gate 1155*7c478bd9Sstevel@tonic-gate /* 1156*7c478bd9Sstevel@tonic-gate * Interrupt handlers above the level that spinlocks block must 1157*7c478bd9Sstevel@tonic-gate * not block. 1158*7c478bd9Sstevel@tonic-gate */ 1159*7c478bd9Sstevel@tonic-gate #if DEBUG 1160*7c478bd9Sstevel@tonic-gate if (i < 0 || i > LOCK_LEVEL) 1161*7c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i); 1162*7c478bd9Sstevel@tonic-gate #endif 1163*7c478bd9Sstevel@tonic-gate 1164*7c478bd9Sstevel@tonic-gate /* 1165*7c478bd9Sstevel@tonic-gate * Compute the CPU's base interrupt level based on the active 1166*7c478bd9Sstevel@tonic-gate * interrupts. 1167*7c478bd9Sstevel@tonic-gate */ 1168*7c478bd9Sstevel@tonic-gate ASSERT(CPU->cpu_intr_actv & (1 << i)); 1169*7c478bd9Sstevel@tonic-gate set_base_spl(); 1170*7c478bd9Sstevel@tonic-gate 1171*7c478bd9Sstevel@tonic-gate return (itp); 1172*7c478bd9Sstevel@tonic-gate } 1173*7c478bd9Sstevel@tonic-gate 1174*7c478bd9Sstevel@tonic-gate /* 1175*7c478bd9Sstevel@tonic-gate * Create and initialize an interrupt thread. 1176*7c478bd9Sstevel@tonic-gate * Returns non-zero on error. 1177*7c478bd9Sstevel@tonic-gate * Called at spl7() or better. 1178*7c478bd9Sstevel@tonic-gate */ 1179*7c478bd9Sstevel@tonic-gate void 1180*7c478bd9Sstevel@tonic-gate thread_create_intr(struct cpu *cp) 1181*7c478bd9Sstevel@tonic-gate { 1182*7c478bd9Sstevel@tonic-gate kthread_t *tp; 1183*7c478bd9Sstevel@tonic-gate 1184*7c478bd9Sstevel@tonic-gate tp = thread_create(NULL, 0, 1185*7c478bd9Sstevel@tonic-gate (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0); 1186*7c478bd9Sstevel@tonic-gate 1187*7c478bd9Sstevel@tonic-gate /* 1188*7c478bd9Sstevel@tonic-gate * Set the thread in the TS_FREE state. The state will change 1189*7c478bd9Sstevel@tonic-gate * to TS_ONPROC only while the interrupt is active. Think of these 1190*7c478bd9Sstevel@tonic-gate * as being on a private free list for the CPU. Being TS_FREE keeps 1191*7c478bd9Sstevel@tonic-gate * inactive interrupt threads out of debugger thread lists. 1192*7c478bd9Sstevel@tonic-gate * 1193*7c478bd9Sstevel@tonic-gate * We cannot call thread_create with TS_FREE because of the current 1194*7c478bd9Sstevel@tonic-gate * checks there for ONPROC. Fix this when thread_create takes flags. 1195*7c478bd9Sstevel@tonic-gate */ 1196*7c478bd9Sstevel@tonic-gate THREAD_FREEINTR(tp, cp); 1197*7c478bd9Sstevel@tonic-gate 1198*7c478bd9Sstevel@tonic-gate /* 1199*7c478bd9Sstevel@tonic-gate * Nobody should ever reference the credentials of an interrupt 1200*7c478bd9Sstevel@tonic-gate * thread so make it NULL to catch any such references. 1201*7c478bd9Sstevel@tonic-gate */ 1202*7c478bd9Sstevel@tonic-gate tp->t_cred = NULL; 1203*7c478bd9Sstevel@tonic-gate tp->t_flag |= T_INTR_THREAD; 1204*7c478bd9Sstevel@tonic-gate tp->t_cpu = cp; 1205*7c478bd9Sstevel@tonic-gate tp->t_bound_cpu = cp; 1206*7c478bd9Sstevel@tonic-gate tp->t_disp_queue = cp->cpu_disp; 1207*7c478bd9Sstevel@tonic-gate tp->t_affinitycnt = 1; 1208*7c478bd9Sstevel@tonic-gate tp->t_preempt = 1; 1209*7c478bd9Sstevel@tonic-gate 1210*7c478bd9Sstevel@tonic-gate /* 1211*7c478bd9Sstevel@tonic-gate * Don't make a user-requested binding on this thread so that 1212*7c478bd9Sstevel@tonic-gate * the processor can be offlined. 1213*7c478bd9Sstevel@tonic-gate */ 1214*7c478bd9Sstevel@tonic-gate tp->t_bind_cpu = PBIND_NONE; /* no USER-requested binding */ 1215*7c478bd9Sstevel@tonic-gate tp->t_bind_pset = PS_NONE; 1216*7c478bd9Sstevel@tonic-gate 1217*7c478bd9Sstevel@tonic-gate #if defined(__i386) || defined(__amd64) 1218*7c478bd9Sstevel@tonic-gate tp->t_stk -= STACK_ALIGN; 1219*7c478bd9Sstevel@tonic-gate *(tp->t_stk) = 0; /* terminate intr thread stack */ 1220*7c478bd9Sstevel@tonic-gate #endif 1221*7c478bd9Sstevel@tonic-gate 1222*7c478bd9Sstevel@tonic-gate /* 1223*7c478bd9Sstevel@tonic-gate * Link onto CPU's interrupt pool. 1224*7c478bd9Sstevel@tonic-gate */ 1225*7c478bd9Sstevel@tonic-gate tp->t_link = cp->cpu_intr_thread; 1226*7c478bd9Sstevel@tonic-gate cp->cpu_intr_thread = tp; 1227*7c478bd9Sstevel@tonic-gate } 1228*7c478bd9Sstevel@tonic-gate 1229*7c478bd9Sstevel@tonic-gate /* 1230*7c478bd9Sstevel@tonic-gate * TSD -- THREAD SPECIFIC DATA 1231*7c478bd9Sstevel@tonic-gate */ 1232*7c478bd9Sstevel@tonic-gate static kmutex_t tsd_mutex; /* linked list spin lock */ 1233*7c478bd9Sstevel@tonic-gate static uint_t tsd_nkeys; /* size of destructor array */ 1234*7c478bd9Sstevel@tonic-gate /* per-key destructor funcs */ 1235*7c478bd9Sstevel@tonic-gate static void (**tsd_destructor)(void *); 1236*7c478bd9Sstevel@tonic-gate /* list of tsd_thread's */ 1237*7c478bd9Sstevel@tonic-gate static struct tsd_thread *tsd_list; 1238*7c478bd9Sstevel@tonic-gate 1239*7c478bd9Sstevel@tonic-gate /* 1240*7c478bd9Sstevel@tonic-gate * Default destructor 1241*7c478bd9Sstevel@tonic-gate * Needed because NULL destructor means that the key is unused 1242*7c478bd9Sstevel@tonic-gate */ 1243*7c478bd9Sstevel@tonic-gate /* ARGSUSED */ 1244*7c478bd9Sstevel@tonic-gate void 1245*7c478bd9Sstevel@tonic-gate tsd_defaultdestructor(void *value) 1246*7c478bd9Sstevel@tonic-gate {} 1247*7c478bd9Sstevel@tonic-gate 1248*7c478bd9Sstevel@tonic-gate /* 1249*7c478bd9Sstevel@tonic-gate * Create a key (index into per thread array) 1250*7c478bd9Sstevel@tonic-gate * Locks out tsd_create, tsd_destroy, and tsd_exit 1251*7c478bd9Sstevel@tonic-gate * May allocate memory with lock held 1252*7c478bd9Sstevel@tonic-gate */ 1253*7c478bd9Sstevel@tonic-gate void 1254*7c478bd9Sstevel@tonic-gate tsd_create(uint_t *keyp, void (*destructor)(void *)) 1255*7c478bd9Sstevel@tonic-gate { 1256*7c478bd9Sstevel@tonic-gate int i; 1257*7c478bd9Sstevel@tonic-gate uint_t nkeys; 1258*7c478bd9Sstevel@tonic-gate 1259*7c478bd9Sstevel@tonic-gate /* 1260*7c478bd9Sstevel@tonic-gate * if key is allocated, do nothing 1261*7c478bd9Sstevel@tonic-gate */ 1262*7c478bd9Sstevel@tonic-gate mutex_enter(&tsd_mutex); 1263*7c478bd9Sstevel@tonic-gate if (*keyp) { 1264*7c478bd9Sstevel@tonic-gate mutex_exit(&tsd_mutex); 1265*7c478bd9Sstevel@tonic-gate return; 1266*7c478bd9Sstevel@tonic-gate } 1267*7c478bd9Sstevel@tonic-gate /* 1268*7c478bd9Sstevel@tonic-gate * find an unused key 1269*7c478bd9Sstevel@tonic-gate */ 1270*7c478bd9Sstevel@tonic-gate if (destructor == NULL) 1271*7c478bd9Sstevel@tonic-gate destructor = tsd_defaultdestructor; 1272*7c478bd9Sstevel@tonic-gate 1273*7c478bd9Sstevel@tonic-gate for (i = 0; i < tsd_nkeys; ++i) 1274*7c478bd9Sstevel@tonic-gate if (tsd_destructor[i] == NULL) 1275*7c478bd9Sstevel@tonic-gate break; 1276*7c478bd9Sstevel@tonic-gate 1277*7c478bd9Sstevel@tonic-gate /* 1278*7c478bd9Sstevel@tonic-gate * if no unused keys, increase the size of the destructor array 1279*7c478bd9Sstevel@tonic-gate */ 1280*7c478bd9Sstevel@tonic-gate if (i == tsd_nkeys) { 1281*7c478bd9Sstevel@tonic-gate if ((nkeys = (tsd_nkeys << 1)) == 0) 1282*7c478bd9Sstevel@tonic-gate nkeys = 1; 1283*7c478bd9Sstevel@tonic-gate tsd_destructor = 1284*7c478bd9Sstevel@tonic-gate (void (**)(void *))tsd_realloc((void *)tsd_destructor, 1285*7c478bd9Sstevel@tonic-gate (size_t)(tsd_nkeys * sizeof (void (*)(void *))), 1286*7c478bd9Sstevel@tonic-gate (size_t)(nkeys * sizeof (void (*)(void *)))); 1287*7c478bd9Sstevel@tonic-gate tsd_nkeys = nkeys; 1288*7c478bd9Sstevel@tonic-gate } 1289*7c478bd9Sstevel@tonic-gate 1290*7c478bd9Sstevel@tonic-gate /* 1291*7c478bd9Sstevel@tonic-gate * allocate the next available unused key 1292*7c478bd9Sstevel@tonic-gate */ 1293*7c478bd9Sstevel@tonic-gate tsd_destructor[i] = destructor; 1294*7c478bd9Sstevel@tonic-gate *keyp = i + 1; 1295*7c478bd9Sstevel@tonic-gate mutex_exit(&tsd_mutex); 1296*7c478bd9Sstevel@tonic-gate } 1297*7c478bd9Sstevel@tonic-gate 1298*7c478bd9Sstevel@tonic-gate /* 1299*7c478bd9Sstevel@tonic-gate * Destroy a key -- this is for unloadable modules 1300*7c478bd9Sstevel@tonic-gate * 1301*7c478bd9Sstevel@tonic-gate * Assumes that the caller is preventing tsd_set and tsd_get 1302*7c478bd9Sstevel@tonic-gate * Locks out tsd_create, tsd_destroy, and tsd_exit 1303*7c478bd9Sstevel@tonic-gate * May free memory with lock held 1304*7c478bd9Sstevel@tonic-gate */ 1305*7c478bd9Sstevel@tonic-gate void 1306*7c478bd9Sstevel@tonic-gate tsd_destroy(uint_t *keyp) 1307*7c478bd9Sstevel@tonic-gate { 1308*7c478bd9Sstevel@tonic-gate uint_t key; 1309*7c478bd9Sstevel@tonic-gate struct tsd_thread *tsd; 1310*7c478bd9Sstevel@tonic-gate 1311*7c478bd9Sstevel@tonic-gate /* 1312*7c478bd9Sstevel@tonic-gate * protect the key namespace and our destructor lists 1313*7c478bd9Sstevel@tonic-gate */ 1314*7c478bd9Sstevel@tonic-gate mutex_enter(&tsd_mutex); 1315*7c478bd9Sstevel@tonic-gate key = *keyp; 1316*7c478bd9Sstevel@tonic-gate *keyp = 0; 1317*7c478bd9Sstevel@tonic-gate 1318*7c478bd9Sstevel@tonic-gate ASSERT(key <= tsd_nkeys); 1319*7c478bd9Sstevel@tonic-gate 1320*7c478bd9Sstevel@tonic-gate /* 1321*7c478bd9Sstevel@tonic-gate * if the key is valid 1322*7c478bd9Sstevel@tonic-gate */ 1323*7c478bd9Sstevel@tonic-gate if (key != 0) { 1324*7c478bd9Sstevel@tonic-gate uint_t k = key - 1; 1325*7c478bd9Sstevel@tonic-gate /* 1326*7c478bd9Sstevel@tonic-gate * for every thread with TSD, call key's destructor 1327*7c478bd9Sstevel@tonic-gate */ 1328*7c478bd9Sstevel@tonic-gate for (tsd = tsd_list; tsd; tsd = tsd->ts_next) { 1329*7c478bd9Sstevel@tonic-gate /* 1330*7c478bd9Sstevel@tonic-gate * no TSD for key in this thread 1331*7c478bd9Sstevel@tonic-gate */ 1332*7c478bd9Sstevel@tonic-gate if (key > tsd->ts_nkeys) 1333*7c478bd9Sstevel@tonic-gate continue; 1334*7c478bd9Sstevel@tonic-gate /* 1335*7c478bd9Sstevel@tonic-gate * call destructor for key 1336*7c478bd9Sstevel@tonic-gate */ 1337*7c478bd9Sstevel@tonic-gate if (tsd->ts_value[k] && tsd_destructor[k]) 1338*7c478bd9Sstevel@tonic-gate (*tsd_destructor[k])(tsd->ts_value[k]); 1339*7c478bd9Sstevel@tonic-gate /* 1340*7c478bd9Sstevel@tonic-gate * reset value for key 1341*7c478bd9Sstevel@tonic-gate */ 1342*7c478bd9Sstevel@tonic-gate tsd->ts_value[k] = NULL; 1343*7c478bd9Sstevel@tonic-gate } 1344*7c478bd9Sstevel@tonic-gate /* 1345*7c478bd9Sstevel@tonic-gate * actually free the key (NULL destructor == unused) 1346*7c478bd9Sstevel@tonic-gate */ 1347*7c478bd9Sstevel@tonic-gate tsd_destructor[k] = NULL; 1348*7c478bd9Sstevel@tonic-gate } 1349*7c478bd9Sstevel@tonic-gate 1350*7c478bd9Sstevel@tonic-gate mutex_exit(&tsd_mutex); 1351*7c478bd9Sstevel@tonic-gate } 1352*7c478bd9Sstevel@tonic-gate 1353*7c478bd9Sstevel@tonic-gate /* 1354*7c478bd9Sstevel@tonic-gate * Quickly return the per thread value that was stored with the specified key 1355*7c478bd9Sstevel@tonic-gate * Assumes the caller is protecting key from tsd_create and tsd_destroy 1356*7c478bd9Sstevel@tonic-gate */ 1357*7c478bd9Sstevel@tonic-gate void * 1358*7c478bd9Sstevel@tonic-gate tsd_get(uint_t key) 1359*7c478bd9Sstevel@tonic-gate { 1360*7c478bd9Sstevel@tonic-gate return (tsd_agent_get(curthread, key)); 1361*7c478bd9Sstevel@tonic-gate } 1362*7c478bd9Sstevel@tonic-gate 1363*7c478bd9Sstevel@tonic-gate /* 1364*7c478bd9Sstevel@tonic-gate * Set a per thread value indexed with the specified key 1365*7c478bd9Sstevel@tonic-gate */ 1366*7c478bd9Sstevel@tonic-gate int 1367*7c478bd9Sstevel@tonic-gate tsd_set(uint_t key, void *value) 1368*7c478bd9Sstevel@tonic-gate { 1369*7c478bd9Sstevel@tonic-gate return (tsd_agent_set(curthread, key, value)); 1370*7c478bd9Sstevel@tonic-gate } 1371*7c478bd9Sstevel@tonic-gate 1372*7c478bd9Sstevel@tonic-gate /* 1373*7c478bd9Sstevel@tonic-gate * Like tsd_get(), except that the agent lwp can get the tsd of 1374*7c478bd9Sstevel@tonic-gate * another thread in the same process (the agent thread only runs when the 1375*7c478bd9Sstevel@tonic-gate * process is completely stopped by /proc), or syslwp is creating a new lwp. 1376*7c478bd9Sstevel@tonic-gate */ 1377*7c478bd9Sstevel@tonic-gate void * 1378*7c478bd9Sstevel@tonic-gate tsd_agent_get(kthread_t *t, uint_t key) 1379*7c478bd9Sstevel@tonic-gate { 1380*7c478bd9Sstevel@tonic-gate struct tsd_thread *tsd = t->t_tsd; 1381*7c478bd9Sstevel@tonic-gate 1382*7c478bd9Sstevel@tonic-gate ASSERT(t == curthread || 1383*7c478bd9Sstevel@tonic-gate ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1384*7c478bd9Sstevel@tonic-gate 1385*7c478bd9Sstevel@tonic-gate if (key && tsd != NULL && key <= tsd->ts_nkeys) 1386*7c478bd9Sstevel@tonic-gate return (tsd->ts_value[key - 1]); 1387*7c478bd9Sstevel@tonic-gate return (NULL); 1388*7c478bd9Sstevel@tonic-gate } 1389*7c478bd9Sstevel@tonic-gate 1390*7c478bd9Sstevel@tonic-gate /* 1391*7c478bd9Sstevel@tonic-gate * Like tsd_set(), except that the agent lwp can set the tsd of 1392*7c478bd9Sstevel@tonic-gate * another thread in the same process, or syslwp can set the tsd 1393*7c478bd9Sstevel@tonic-gate * of a thread it's in the middle of creating. 1394*7c478bd9Sstevel@tonic-gate * 1395*7c478bd9Sstevel@tonic-gate * Assumes the caller is protecting key from tsd_create and tsd_destroy 1396*7c478bd9Sstevel@tonic-gate * May lock out tsd_destroy (and tsd_create), may allocate memory with 1397*7c478bd9Sstevel@tonic-gate * lock held 1398*7c478bd9Sstevel@tonic-gate */ 1399*7c478bd9Sstevel@tonic-gate int 1400*7c478bd9Sstevel@tonic-gate tsd_agent_set(kthread_t *t, uint_t key, void *value) 1401*7c478bd9Sstevel@tonic-gate { 1402*7c478bd9Sstevel@tonic-gate struct tsd_thread *tsd = t->t_tsd; 1403*7c478bd9Sstevel@tonic-gate 1404*7c478bd9Sstevel@tonic-gate ASSERT(t == curthread || 1405*7c478bd9Sstevel@tonic-gate ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1406*7c478bd9Sstevel@tonic-gate 1407*7c478bd9Sstevel@tonic-gate if (key == 0) 1408*7c478bd9Sstevel@tonic-gate return (EINVAL); 1409*7c478bd9Sstevel@tonic-gate if (tsd == NULL) 1410*7c478bd9Sstevel@tonic-gate tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP); 1411*7c478bd9Sstevel@tonic-gate if (key <= tsd->ts_nkeys) { 1412*7c478bd9Sstevel@tonic-gate tsd->ts_value[key - 1] = value; 1413*7c478bd9Sstevel@tonic-gate return (0); 1414*7c478bd9Sstevel@tonic-gate } 1415*7c478bd9Sstevel@tonic-gate 1416*7c478bd9Sstevel@tonic-gate ASSERT(key <= tsd_nkeys); 1417*7c478bd9Sstevel@tonic-gate 1418*7c478bd9Sstevel@tonic-gate /* 1419*7c478bd9Sstevel@tonic-gate * lock out tsd_destroy() 1420*7c478bd9Sstevel@tonic-gate */ 1421*7c478bd9Sstevel@tonic-gate mutex_enter(&tsd_mutex); 1422*7c478bd9Sstevel@tonic-gate if (tsd->ts_nkeys == 0) { 1423*7c478bd9Sstevel@tonic-gate /* 1424*7c478bd9Sstevel@tonic-gate * Link onto list of threads with TSD 1425*7c478bd9Sstevel@tonic-gate */ 1426*7c478bd9Sstevel@tonic-gate if ((tsd->ts_next = tsd_list) != NULL) 1427*7c478bd9Sstevel@tonic-gate tsd_list->ts_prev = tsd; 1428*7c478bd9Sstevel@tonic-gate tsd_list = tsd; 1429*7c478bd9Sstevel@tonic-gate } 1430*7c478bd9Sstevel@tonic-gate 1431*7c478bd9Sstevel@tonic-gate /* 1432*7c478bd9Sstevel@tonic-gate * Allocate thread local storage and set the value for key 1433*7c478bd9Sstevel@tonic-gate */ 1434*7c478bd9Sstevel@tonic-gate tsd->ts_value = tsd_realloc(tsd->ts_value, 1435*7c478bd9Sstevel@tonic-gate tsd->ts_nkeys * sizeof (void *), 1436*7c478bd9Sstevel@tonic-gate key * sizeof (void *)); 1437*7c478bd9Sstevel@tonic-gate tsd->ts_nkeys = key; 1438*7c478bd9Sstevel@tonic-gate tsd->ts_value[key - 1] = value; 1439*7c478bd9Sstevel@tonic-gate mutex_exit(&tsd_mutex); 1440*7c478bd9Sstevel@tonic-gate 1441*7c478bd9Sstevel@tonic-gate return (0); 1442*7c478bd9Sstevel@tonic-gate } 1443*7c478bd9Sstevel@tonic-gate 1444*7c478bd9Sstevel@tonic-gate 1445*7c478bd9Sstevel@tonic-gate /* 1446*7c478bd9Sstevel@tonic-gate * Return the per thread value that was stored with the specified key 1447*7c478bd9Sstevel@tonic-gate * If necessary, create the key and the value 1448*7c478bd9Sstevel@tonic-gate * Assumes the caller is protecting *keyp from tsd_destroy 1449*7c478bd9Sstevel@tonic-gate */ 1450*7c478bd9Sstevel@tonic-gate void * 1451*7c478bd9Sstevel@tonic-gate tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void)) 1452*7c478bd9Sstevel@tonic-gate { 1453*7c478bd9Sstevel@tonic-gate void *value; 1454*7c478bd9Sstevel@tonic-gate uint_t key = *keyp; 1455*7c478bd9Sstevel@tonic-gate struct tsd_thread *tsd = curthread->t_tsd; 1456*7c478bd9Sstevel@tonic-gate 1457*7c478bd9Sstevel@tonic-gate if (tsd == NULL) 1458*7c478bd9Sstevel@tonic-gate tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP); 1459*7c478bd9Sstevel@tonic-gate if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1])) 1460*7c478bd9Sstevel@tonic-gate return (value); 1461*7c478bd9Sstevel@tonic-gate if (key == 0) 1462*7c478bd9Sstevel@tonic-gate tsd_create(keyp, destroy); 1463*7c478bd9Sstevel@tonic-gate (void) tsd_set(*keyp, value = (*allocate)()); 1464*7c478bd9Sstevel@tonic-gate 1465*7c478bd9Sstevel@tonic-gate return (value); 1466*7c478bd9Sstevel@tonic-gate } 1467*7c478bd9Sstevel@tonic-gate 1468*7c478bd9Sstevel@tonic-gate /* 1469*7c478bd9Sstevel@tonic-gate * Called from thread_exit() to run the destructor function for each tsd 1470*7c478bd9Sstevel@tonic-gate * Locks out tsd_create and tsd_destroy 1471*7c478bd9Sstevel@tonic-gate * Assumes that the destructor *DOES NOT* use tsd 1472*7c478bd9Sstevel@tonic-gate */ 1473*7c478bd9Sstevel@tonic-gate void 1474*7c478bd9Sstevel@tonic-gate tsd_exit(void) 1475*7c478bd9Sstevel@tonic-gate { 1476*7c478bd9Sstevel@tonic-gate int i; 1477*7c478bd9Sstevel@tonic-gate struct tsd_thread *tsd = curthread->t_tsd; 1478*7c478bd9Sstevel@tonic-gate 1479*7c478bd9Sstevel@tonic-gate if (tsd == NULL) 1480*7c478bd9Sstevel@tonic-gate return; 1481*7c478bd9Sstevel@tonic-gate 1482*7c478bd9Sstevel@tonic-gate if (tsd->ts_nkeys == 0) { 1483*7c478bd9Sstevel@tonic-gate kmem_free(tsd, sizeof (*tsd)); 1484*7c478bd9Sstevel@tonic-gate curthread->t_tsd = NULL; 1485*7c478bd9Sstevel@tonic-gate return; 1486*7c478bd9Sstevel@tonic-gate } 1487*7c478bd9Sstevel@tonic-gate 1488*7c478bd9Sstevel@tonic-gate /* 1489*7c478bd9Sstevel@tonic-gate * lock out tsd_create and tsd_destroy, call 1490*7c478bd9Sstevel@tonic-gate * the destructor, and mark the value as destroyed. 1491*7c478bd9Sstevel@tonic-gate */ 1492*7c478bd9Sstevel@tonic-gate mutex_enter(&tsd_mutex); 1493*7c478bd9Sstevel@tonic-gate 1494*7c478bd9Sstevel@tonic-gate for (i = 0; i < tsd->ts_nkeys; i++) { 1495*7c478bd9Sstevel@tonic-gate if (tsd->ts_value[i] && tsd_destructor[i]) 1496*7c478bd9Sstevel@tonic-gate (*tsd_destructor[i])(tsd->ts_value[i]); 1497*7c478bd9Sstevel@tonic-gate tsd->ts_value[i] = NULL; 1498*7c478bd9Sstevel@tonic-gate } 1499*7c478bd9Sstevel@tonic-gate 1500*7c478bd9Sstevel@tonic-gate /* 1501*7c478bd9Sstevel@tonic-gate * remove from linked list of threads with TSD 1502*7c478bd9Sstevel@tonic-gate */ 1503*7c478bd9Sstevel@tonic-gate if (tsd->ts_next) 1504*7c478bd9Sstevel@tonic-gate tsd->ts_next->ts_prev = tsd->ts_prev; 1505*7c478bd9Sstevel@tonic-gate if (tsd->ts_prev) 1506*7c478bd9Sstevel@tonic-gate tsd->ts_prev->ts_next = tsd->ts_next; 1507*7c478bd9Sstevel@tonic-gate if (tsd_list == tsd) 1508*7c478bd9Sstevel@tonic-gate tsd_list = tsd->ts_next; 1509*7c478bd9Sstevel@tonic-gate 1510*7c478bd9Sstevel@tonic-gate mutex_exit(&tsd_mutex); 1511*7c478bd9Sstevel@tonic-gate 1512*7c478bd9Sstevel@tonic-gate /* 1513*7c478bd9Sstevel@tonic-gate * free up the TSD 1514*7c478bd9Sstevel@tonic-gate */ 1515*7c478bd9Sstevel@tonic-gate kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *)); 1516*7c478bd9Sstevel@tonic-gate kmem_free(tsd, sizeof (struct tsd_thread)); 1517*7c478bd9Sstevel@tonic-gate curthread->t_tsd = NULL; 1518*7c478bd9Sstevel@tonic-gate } 1519*7c478bd9Sstevel@tonic-gate 1520*7c478bd9Sstevel@tonic-gate /* 1521*7c478bd9Sstevel@tonic-gate * realloc 1522*7c478bd9Sstevel@tonic-gate */ 1523*7c478bd9Sstevel@tonic-gate static void * 1524*7c478bd9Sstevel@tonic-gate tsd_realloc(void *old, size_t osize, size_t nsize) 1525*7c478bd9Sstevel@tonic-gate { 1526*7c478bd9Sstevel@tonic-gate void *new; 1527*7c478bd9Sstevel@tonic-gate 1528*7c478bd9Sstevel@tonic-gate new = kmem_zalloc(nsize, KM_SLEEP); 1529*7c478bd9Sstevel@tonic-gate if (old) { 1530*7c478bd9Sstevel@tonic-gate bcopy(old, new, osize); 1531*7c478bd9Sstevel@tonic-gate kmem_free(old, osize); 1532*7c478bd9Sstevel@tonic-gate } 1533*7c478bd9Sstevel@tonic-gate return (new); 1534*7c478bd9Sstevel@tonic-gate } 1535*7c478bd9Sstevel@tonic-gate 1536*7c478bd9Sstevel@tonic-gate /* 1537*7c478bd9Sstevel@tonic-gate * Check to see if an interrupt thread might be active at a given ipl. 1538*7c478bd9Sstevel@tonic-gate * If so return true. 1539*7c478bd9Sstevel@tonic-gate * We must be conservative--it is ok to give a false yes, but a false no 1540*7c478bd9Sstevel@tonic-gate * will cause disaster. (But if the situation changes after we check it is 1541*7c478bd9Sstevel@tonic-gate * ok--the caller is trying to ensure that an interrupt routine has been 1542*7c478bd9Sstevel@tonic-gate * exited). 1543*7c478bd9Sstevel@tonic-gate * This is used when trying to remove an interrupt handler from an autovector 1544*7c478bd9Sstevel@tonic-gate * list in avintr.c. 1545*7c478bd9Sstevel@tonic-gate */ 1546*7c478bd9Sstevel@tonic-gate int 1547*7c478bd9Sstevel@tonic-gate intr_active(struct cpu *cp, int level) 1548*7c478bd9Sstevel@tonic-gate { 1549*7c478bd9Sstevel@tonic-gate if (level <= LOCK_LEVEL) 1550*7c478bd9Sstevel@tonic-gate return (cp->cpu_thread != cp->cpu_dispthread); 1551*7c478bd9Sstevel@tonic-gate else 1552*7c478bd9Sstevel@tonic-gate return (CPU_ON_INTR(cp)); 1553*7c478bd9Sstevel@tonic-gate } 1554*7c478bd9Sstevel@tonic-gate 1555*7c478bd9Sstevel@tonic-gate /* 1556*7c478bd9Sstevel@tonic-gate * Return non-zero if an interrupt is being serviced. 1557*7c478bd9Sstevel@tonic-gate */ 1558*7c478bd9Sstevel@tonic-gate int 1559*7c478bd9Sstevel@tonic-gate servicing_interrupt() 1560*7c478bd9Sstevel@tonic-gate { 1561*7c478bd9Sstevel@tonic-gate /* 1562*7c478bd9Sstevel@tonic-gate * Note: single-OR used on purpose to return non-zero if T_INTR_THREAD 1563*7c478bd9Sstevel@tonic-gate * flag set or CPU_ON_INTR(CPU) is non-zero (indicating high-level 1564*7c478bd9Sstevel@tonic-gate * interrupt). 1565*7c478bd9Sstevel@tonic-gate */ 1566*7c478bd9Sstevel@tonic-gate return ((curthread->t_flag & T_INTR_THREAD) | CPU_ON_INTR(CPU)); 1567*7c478bd9Sstevel@tonic-gate } 1568*7c478bd9Sstevel@tonic-gate 1569*7c478bd9Sstevel@tonic-gate 1570*7c478bd9Sstevel@tonic-gate /* 1571*7c478bd9Sstevel@tonic-gate * Change the dispatch priority of a thread in the system. 1572*7c478bd9Sstevel@tonic-gate * Used when raising or lowering a thread's priority. 1573*7c478bd9Sstevel@tonic-gate * (E.g., priority inheritance) 1574*7c478bd9Sstevel@tonic-gate * 1575*7c478bd9Sstevel@tonic-gate * Since threads are queued according to their priority, we 1576*7c478bd9Sstevel@tonic-gate * we must check the thread's state to determine whether it 1577*7c478bd9Sstevel@tonic-gate * is on a queue somewhere. If it is, we've got to: 1578*7c478bd9Sstevel@tonic-gate * 1579*7c478bd9Sstevel@tonic-gate * o Dequeue the thread. 1580*7c478bd9Sstevel@tonic-gate * o Change its effective priority. 1581*7c478bd9Sstevel@tonic-gate * o Enqueue the thread. 1582*7c478bd9Sstevel@tonic-gate * 1583*7c478bd9Sstevel@tonic-gate * Assumptions: The thread whose priority we wish to change 1584*7c478bd9Sstevel@tonic-gate * must be locked before we call thread_change_(e)pri(). 1585*7c478bd9Sstevel@tonic-gate * The thread_change(e)pri() function doesn't drop the thread 1586*7c478bd9Sstevel@tonic-gate * lock--that must be done by its caller. 1587*7c478bd9Sstevel@tonic-gate */ 1588*7c478bd9Sstevel@tonic-gate void 1589*7c478bd9Sstevel@tonic-gate thread_change_epri(kthread_t *t, pri_t disp_pri) 1590*7c478bd9Sstevel@tonic-gate { 1591*7c478bd9Sstevel@tonic-gate uint_t state; 1592*7c478bd9Sstevel@tonic-gate 1593*7c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(t)); 1594*7c478bd9Sstevel@tonic-gate 1595*7c478bd9Sstevel@tonic-gate /* 1596*7c478bd9Sstevel@tonic-gate * If the inherited priority hasn't actually changed, 1597*7c478bd9Sstevel@tonic-gate * just return. 1598*7c478bd9Sstevel@tonic-gate */ 1599*7c478bd9Sstevel@tonic-gate if (t->t_epri == disp_pri) 1600*7c478bd9Sstevel@tonic-gate return; 1601*7c478bd9Sstevel@tonic-gate 1602*7c478bd9Sstevel@tonic-gate state = t->t_state; 1603*7c478bd9Sstevel@tonic-gate 1604*7c478bd9Sstevel@tonic-gate /* 1605*7c478bd9Sstevel@tonic-gate * If it's not on a queue, change the priority with 1606*7c478bd9Sstevel@tonic-gate * impunity. 1607*7c478bd9Sstevel@tonic-gate */ 1608*7c478bd9Sstevel@tonic-gate if ((state & (TS_SLEEP | TS_RUN)) == 0) { 1609*7c478bd9Sstevel@tonic-gate t->t_epri = disp_pri; 1610*7c478bd9Sstevel@tonic-gate 1611*7c478bd9Sstevel@tonic-gate if (state == TS_ONPROC) { 1612*7c478bd9Sstevel@tonic-gate cpu_t *cp = t->t_disp_queue->disp_cpu; 1613*7c478bd9Sstevel@tonic-gate 1614*7c478bd9Sstevel@tonic-gate if (t == cp->cpu_dispthread) 1615*7c478bd9Sstevel@tonic-gate cp->cpu_dispatch_pri = DISP_PRIO(t); 1616*7c478bd9Sstevel@tonic-gate } 1617*7c478bd9Sstevel@tonic-gate return; 1618*7c478bd9Sstevel@tonic-gate } 1619*7c478bd9Sstevel@tonic-gate 1620*7c478bd9Sstevel@tonic-gate /* 1621*7c478bd9Sstevel@tonic-gate * It's either on a sleep queue or a run queue. 1622*7c478bd9Sstevel@tonic-gate */ 1623*7c478bd9Sstevel@tonic-gate if (state == TS_SLEEP) { 1624*7c478bd9Sstevel@tonic-gate 1625*7c478bd9Sstevel@tonic-gate /* 1626*7c478bd9Sstevel@tonic-gate * Take the thread out of its sleep queue. 1627*7c478bd9Sstevel@tonic-gate * Change the inherited priority. 1628*7c478bd9Sstevel@tonic-gate * Re-enqueue the thread. 1629*7c478bd9Sstevel@tonic-gate * Each synchronization object exports a function 1630*7c478bd9Sstevel@tonic-gate * to do this in an appropriate manner. 1631*7c478bd9Sstevel@tonic-gate */ 1632*7c478bd9Sstevel@tonic-gate SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri); 1633*7c478bd9Sstevel@tonic-gate } else { 1634*7c478bd9Sstevel@tonic-gate /* 1635*7c478bd9Sstevel@tonic-gate * The thread is on a run queue. 1636*7c478bd9Sstevel@tonic-gate * Note: setbackdq() may not put the thread 1637*7c478bd9Sstevel@tonic-gate * back on the same run queue where it originally 1638*7c478bd9Sstevel@tonic-gate * resided. 1639*7c478bd9Sstevel@tonic-gate */ 1640*7c478bd9Sstevel@tonic-gate (void) dispdeq(t); 1641*7c478bd9Sstevel@tonic-gate t->t_epri = disp_pri; 1642*7c478bd9Sstevel@tonic-gate setbackdq(t); 1643*7c478bd9Sstevel@tonic-gate } 1644*7c478bd9Sstevel@tonic-gate } /* end of thread_change_epri */ 1645*7c478bd9Sstevel@tonic-gate 1646*7c478bd9Sstevel@tonic-gate /* 1647*7c478bd9Sstevel@tonic-gate * Function: Change the t_pri field of a thread. 1648*7c478bd9Sstevel@tonic-gate * Side Effects: Adjust the thread ordering on a run queue 1649*7c478bd9Sstevel@tonic-gate * or sleep queue, if necessary. 1650*7c478bd9Sstevel@tonic-gate * Returns: 1 if the thread was on a run queue, else 0. 1651*7c478bd9Sstevel@tonic-gate */ 1652*7c478bd9Sstevel@tonic-gate int 1653*7c478bd9Sstevel@tonic-gate thread_change_pri(kthread_t *t, pri_t disp_pri, int front) 1654*7c478bd9Sstevel@tonic-gate { 1655*7c478bd9Sstevel@tonic-gate uint_t state; 1656*7c478bd9Sstevel@tonic-gate int on_rq = 0; 1657*7c478bd9Sstevel@tonic-gate 1658*7c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(t)); 1659*7c478bd9Sstevel@tonic-gate 1660*7c478bd9Sstevel@tonic-gate state = t->t_state; 1661*7c478bd9Sstevel@tonic-gate THREAD_WILLCHANGE_PRI(t, disp_pri); 1662*7c478bd9Sstevel@tonic-gate 1663*7c478bd9Sstevel@tonic-gate /* 1664*7c478bd9Sstevel@tonic-gate * If it's not on a queue, change the priority with 1665*7c478bd9Sstevel@tonic-gate * impunity. 1666*7c478bd9Sstevel@tonic-gate */ 1667*7c478bd9Sstevel@tonic-gate if ((state & (TS_SLEEP | TS_RUN)) == 0) { 1668*7c478bd9Sstevel@tonic-gate t->t_pri = disp_pri; 1669*7c478bd9Sstevel@tonic-gate 1670*7c478bd9Sstevel@tonic-gate if (state == TS_ONPROC) { 1671*7c478bd9Sstevel@tonic-gate cpu_t *cp = t->t_disp_queue->disp_cpu; 1672*7c478bd9Sstevel@tonic-gate 1673*7c478bd9Sstevel@tonic-gate if (t == cp->cpu_dispthread) 1674*7c478bd9Sstevel@tonic-gate cp->cpu_dispatch_pri = DISP_PRIO(t); 1675*7c478bd9Sstevel@tonic-gate } 1676*7c478bd9Sstevel@tonic-gate return (0); 1677*7c478bd9Sstevel@tonic-gate } 1678*7c478bd9Sstevel@tonic-gate 1679*7c478bd9Sstevel@tonic-gate /* 1680*7c478bd9Sstevel@tonic-gate * It's either on a sleep queue or a run queue. 1681*7c478bd9Sstevel@tonic-gate */ 1682*7c478bd9Sstevel@tonic-gate if (state == TS_SLEEP) { 1683*7c478bd9Sstevel@tonic-gate /* 1684*7c478bd9Sstevel@tonic-gate * If the priority has changed, take the thread out of 1685*7c478bd9Sstevel@tonic-gate * its sleep queue and change the priority. 1686*7c478bd9Sstevel@tonic-gate * Re-enqueue the thread. 1687*7c478bd9Sstevel@tonic-gate * Each synchronization object exports a function 1688*7c478bd9Sstevel@tonic-gate * to do this in an appropriate manner. 1689*7c478bd9Sstevel@tonic-gate */ 1690*7c478bd9Sstevel@tonic-gate if (disp_pri != t->t_pri) 1691*7c478bd9Sstevel@tonic-gate SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri); 1692*7c478bd9Sstevel@tonic-gate } else { 1693*7c478bd9Sstevel@tonic-gate /* 1694*7c478bd9Sstevel@tonic-gate * The thread is on a run queue. 1695*7c478bd9Sstevel@tonic-gate * Note: setbackdq() may not put the thread 1696*7c478bd9Sstevel@tonic-gate * back on the same run queue where it originally 1697*7c478bd9Sstevel@tonic-gate * resided. 1698*7c478bd9Sstevel@tonic-gate * 1699*7c478bd9Sstevel@tonic-gate * We still requeue the thread even if the priority 1700*7c478bd9Sstevel@tonic-gate * is unchanged to preserve round-robin (and other) 1701*7c478bd9Sstevel@tonic-gate * effects between threads of the same priority. 1702*7c478bd9Sstevel@tonic-gate */ 1703*7c478bd9Sstevel@tonic-gate on_rq = dispdeq(t); 1704*7c478bd9Sstevel@tonic-gate ASSERT(on_rq); 1705*7c478bd9Sstevel@tonic-gate t->t_pri = disp_pri; 1706*7c478bd9Sstevel@tonic-gate if (front) { 1707*7c478bd9Sstevel@tonic-gate setfrontdq(t); 1708*7c478bd9Sstevel@tonic-gate } else { 1709*7c478bd9Sstevel@tonic-gate setbackdq(t); 1710*7c478bd9Sstevel@tonic-gate } 1711*7c478bd9Sstevel@tonic-gate } 1712*7c478bd9Sstevel@tonic-gate return (on_rq); 1713*7c478bd9Sstevel@tonic-gate } 1714