17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 545916cd2Sjpk * Common Development and Distribution License (the "License"). 645916cd2Sjpk * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22c97ad5cdSakolb * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 277c478bd9Sstevel@tonic-gate 287c478bd9Sstevel@tonic-gate #include <sys/types.h> 297c478bd9Sstevel@tonic-gate #include <sys/param.h> 307c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 317c478bd9Sstevel@tonic-gate #include <sys/signal.h> 327c478bd9Sstevel@tonic-gate #include <sys/stack.h> 337c478bd9Sstevel@tonic-gate #include <sys/pcb.h> 347c478bd9Sstevel@tonic-gate #include <sys/user.h> 357c478bd9Sstevel@tonic-gate #include <sys/systm.h> 367c478bd9Sstevel@tonic-gate #include <sys/sysinfo.h> 377c478bd9Sstevel@tonic-gate #include <sys/var.h> 387c478bd9Sstevel@tonic-gate #include <sys/errno.h> 397c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 407c478bd9Sstevel@tonic-gate #include <sys/cred.h> 417c478bd9Sstevel@tonic-gate #include <sys/resource.h> 427c478bd9Sstevel@tonic-gate #include <sys/task.h> 437c478bd9Sstevel@tonic-gate #include <sys/project.h> 447c478bd9Sstevel@tonic-gate #include <sys/proc.h> 457c478bd9Sstevel@tonic-gate #include <sys/debug.h> 467c478bd9Sstevel@tonic-gate #include <sys/inline.h> 477c478bd9Sstevel@tonic-gate #include <sys/disp.h> 487c478bd9Sstevel@tonic-gate #include <sys/class.h> 497c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 507c478bd9Sstevel@tonic-gate #include <vm/seg_kp.h> 517c478bd9Sstevel@tonic-gate #include <sys/machlock.h> 527c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 537c478bd9Sstevel@tonic-gate #include <sys/varargs.h> 547c478bd9Sstevel@tonic-gate #include <sys/turnstile.h> 557c478bd9Sstevel@tonic-gate #include <sys/poll.h> 567c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 577c478bd9Sstevel@tonic-gate #include <sys/callb.h> 587c478bd9Sstevel@tonic-gate #include <c2/audit.h> 597c478bd9Sstevel@tonic-gate #include <sys/tnf.h> 607c478bd9Sstevel@tonic-gate #include <sys/sobject.h> 617c478bd9Sstevel@tonic-gate #include <sys/cpupart.h> 627c478bd9Sstevel@tonic-gate #include <sys/pset.h> 637c478bd9Sstevel@tonic-gate #include <sys/door.h> 647c478bd9Sstevel@tonic-gate #include <sys/spl.h> 657c478bd9Sstevel@tonic-gate #include <sys/copyops.h> 667c478bd9Sstevel@tonic-gate #include <sys/rctl.h> 679acbbeafSnn35248 #include <sys/brand.h> 687c478bd9Sstevel@tonic-gate #include <sys/pool.h> 697c478bd9Sstevel@tonic-gate #include <sys/zone.h> 7045916cd2Sjpk #include <sys/tsol/label.h> 7145916cd2Sjpk #include <sys/tsol/tndb.h> 727c478bd9Sstevel@tonic-gate #include <sys/cpc_impl.h> 737c478bd9Sstevel@tonic-gate #include <sys/sdt.h> 747c478bd9Sstevel@tonic-gate #include <sys/reboot.h> 757c478bd9Sstevel@tonic-gate #include <sys/kdi.h> 76c97ad5cdSakolb #include <sys/waitq.h> 77c97ad5cdSakolb #include <sys/cpucaps.h> 78fd006805Snordmark #include <inet/ip.h> 79fd006805Snordmark #include <inet/ip_if.h> 807c478bd9Sstevel@tonic-gate 817c478bd9Sstevel@tonic-gate struct kmem_cache *thread_cache; /* cache of free threads */ 827c478bd9Sstevel@tonic-gate struct kmem_cache *lwp_cache; /* cache of free lwps */ 837c478bd9Sstevel@tonic-gate struct kmem_cache *turnstile_cache; /* cache of free turnstiles */ 847c478bd9Sstevel@tonic-gate 857c478bd9Sstevel@tonic-gate /* 867c478bd9Sstevel@tonic-gate * allthreads is only for use by kmem_readers. All kernel loops can use 877c478bd9Sstevel@tonic-gate * the current thread as a start/end point. 887c478bd9Sstevel@tonic-gate */ 897c478bd9Sstevel@tonic-gate static kthread_t *allthreads = &t0; /* circular list of all threads */ 907c478bd9Sstevel@tonic-gate 917c478bd9Sstevel@tonic-gate static kcondvar_t reaper_cv; /* synchronization var */ 927c478bd9Sstevel@tonic-gate kthread_t *thread_deathrow; /* circular list of reapable threads */ 937c478bd9Sstevel@tonic-gate kthread_t *lwp_deathrow; /* circular list of reapable threads */ 947c478bd9Sstevel@tonic-gate kmutex_t reaplock; /* protects lwp and thread deathrows */ 957c478bd9Sstevel@tonic-gate kmutex_t thread_free_lock; /* protects clock from reaper */ 967c478bd9Sstevel@tonic-gate int thread_reapcnt = 0; /* number of threads on deathrow */ 977c478bd9Sstevel@tonic-gate int lwp_reapcnt = 0; /* number of lwps on deathrow */ 987c478bd9Sstevel@tonic-gate int reaplimit = 16; /* delay reaping until reaplimit */ 997c478bd9Sstevel@tonic-gate 1007c478bd9Sstevel@tonic-gate extern int nthread; 1017c478bd9Sstevel@tonic-gate 1027c478bd9Sstevel@tonic-gate id_t syscid; /* system scheduling class ID */ 1037c478bd9Sstevel@tonic-gate void *segkp_thread; /* cookie for segkp pool */ 1047c478bd9Sstevel@tonic-gate 1057c478bd9Sstevel@tonic-gate int lwp_cache_sz = 32; 1067c478bd9Sstevel@tonic-gate int t_cache_sz = 8; 1077c478bd9Sstevel@tonic-gate static kt_did_t next_t_id = 1; 1087c478bd9Sstevel@tonic-gate 1097c478bd9Sstevel@tonic-gate /* 1107c478bd9Sstevel@tonic-gate * Min/Max stack sizes for stack size parameters 1117c478bd9Sstevel@tonic-gate */ 1127c478bd9Sstevel@tonic-gate #define MAX_STKSIZE (32 * DEFAULTSTKSZ) 1137c478bd9Sstevel@tonic-gate #define MIN_STKSIZE DEFAULTSTKSZ 1147c478bd9Sstevel@tonic-gate 1157c478bd9Sstevel@tonic-gate /* 1167c478bd9Sstevel@tonic-gate * default_stksize overrides lwp_default_stksize if it is set. 1177c478bd9Sstevel@tonic-gate */ 1187c478bd9Sstevel@tonic-gate int default_stksize; 1197c478bd9Sstevel@tonic-gate int lwp_default_stksize; 1207c478bd9Sstevel@tonic-gate 1217c478bd9Sstevel@tonic-gate static zone_key_t zone_thread_key; 1227c478bd9Sstevel@tonic-gate 1237c478bd9Sstevel@tonic-gate /* 1247c478bd9Sstevel@tonic-gate * forward declarations for internal thread specific data (tsd) 1257c478bd9Sstevel@tonic-gate */ 1267c478bd9Sstevel@tonic-gate static void *tsd_realloc(void *, size_t, size_t); 1277c478bd9Sstevel@tonic-gate 1285ce42f36Spraks void thread_reaper(void); 1295ce42f36Spraks 1307c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 1317c478bd9Sstevel@tonic-gate static int 1327c478bd9Sstevel@tonic-gate turnstile_constructor(void *buf, void *cdrarg, int kmflags) 1337c478bd9Sstevel@tonic-gate { 1347c478bd9Sstevel@tonic-gate bzero(buf, sizeof (turnstile_t)); 1357c478bd9Sstevel@tonic-gate return (0); 1367c478bd9Sstevel@tonic-gate } 1377c478bd9Sstevel@tonic-gate 1387c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 1397c478bd9Sstevel@tonic-gate static void 1407c478bd9Sstevel@tonic-gate turnstile_destructor(void *buf, void *cdrarg) 1417c478bd9Sstevel@tonic-gate { 1427c478bd9Sstevel@tonic-gate turnstile_t *ts = buf; 1437c478bd9Sstevel@tonic-gate 1447c478bd9Sstevel@tonic-gate ASSERT(ts->ts_free == NULL); 1457c478bd9Sstevel@tonic-gate ASSERT(ts->ts_waiters == 0); 1467c478bd9Sstevel@tonic-gate ASSERT(ts->ts_inheritor == NULL); 1477c478bd9Sstevel@tonic-gate ASSERT(ts->ts_sleepq[0].sq_first == NULL); 1487c478bd9Sstevel@tonic-gate ASSERT(ts->ts_sleepq[1].sq_first == NULL); 1497c478bd9Sstevel@tonic-gate } 1507c478bd9Sstevel@tonic-gate 1517c478bd9Sstevel@tonic-gate void 1527c478bd9Sstevel@tonic-gate thread_init(void) 1537c478bd9Sstevel@tonic-gate { 1547c478bd9Sstevel@tonic-gate kthread_t *tp; 1557c478bd9Sstevel@tonic-gate extern char sys_name[]; 1567c478bd9Sstevel@tonic-gate extern void idle(); 1577c478bd9Sstevel@tonic-gate struct cpu *cpu = CPU; 1587c478bd9Sstevel@tonic-gate 1597c478bd9Sstevel@tonic-gate mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL)); 1607c478bd9Sstevel@tonic-gate 1617c478bd9Sstevel@tonic-gate #if defined(__i386) || defined(__amd64) 1627c478bd9Sstevel@tonic-gate thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t), 1637c478bd9Sstevel@tonic-gate PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0); 1647c478bd9Sstevel@tonic-gate 1657c478bd9Sstevel@tonic-gate /* 1667c478bd9Sstevel@tonic-gate * "struct _klwp" includes a "struct pcb", which includes a 1677c478bd9Sstevel@tonic-gate * "struct fpu", which needs to be 16-byte aligned on amd64 1687c478bd9Sstevel@tonic-gate * (and even on i386 for fxsave/fxrstor). 1697c478bd9Sstevel@tonic-gate */ 1707c478bd9Sstevel@tonic-gate lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t), 1717c478bd9Sstevel@tonic-gate 16, NULL, NULL, NULL, NULL, NULL, 0); 1727c478bd9Sstevel@tonic-gate #else 1737c478bd9Sstevel@tonic-gate /* 1747c478bd9Sstevel@tonic-gate * Allocate thread structures from static_arena. This prevents 1757c478bd9Sstevel@tonic-gate * issues where a thread tries to relocate its own thread 1767c478bd9Sstevel@tonic-gate * structure and touches it after the mapping has been suspended. 1777c478bd9Sstevel@tonic-gate */ 1787c478bd9Sstevel@tonic-gate thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t), 1797c478bd9Sstevel@tonic-gate PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0); 1807c478bd9Sstevel@tonic-gate 181a8599265Selowe lwp_stk_cache_init(); 182a8599265Selowe 1837c478bd9Sstevel@tonic-gate lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t), 1847c478bd9Sstevel@tonic-gate 0, NULL, NULL, NULL, NULL, NULL, 0); 1857c478bd9Sstevel@tonic-gate #endif 1867c478bd9Sstevel@tonic-gate 1877c478bd9Sstevel@tonic-gate turnstile_cache = kmem_cache_create("turnstile_cache", 1887c478bd9Sstevel@tonic-gate sizeof (turnstile_t), 0, 1897c478bd9Sstevel@tonic-gate turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0); 1907c478bd9Sstevel@tonic-gate 19145916cd2Sjpk label_init(); 1927c478bd9Sstevel@tonic-gate cred_init(); 1937c478bd9Sstevel@tonic-gate 194c97ad5cdSakolb /* 195c97ad5cdSakolb * Initialize various resource management facilities. 196c97ad5cdSakolb */ 1977c478bd9Sstevel@tonic-gate rctl_init(); 198c97ad5cdSakolb cpucaps_init(); 199c97ad5cdSakolb /* 200c97ad5cdSakolb * Zone_init() should be called before project_init() so that project ID 201c97ad5cdSakolb * for the first project is initialized correctly. 202c97ad5cdSakolb */ 203c97ad5cdSakolb zone_init(); 2047c478bd9Sstevel@tonic-gate project_init(); 2059acbbeafSnn35248 brand_init(); 2067c478bd9Sstevel@tonic-gate task_init(); 20745916cd2Sjpk tcache_init(); 2087c478bd9Sstevel@tonic-gate pool_init(); 2097c478bd9Sstevel@tonic-gate 2107c478bd9Sstevel@tonic-gate curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP); 2117c478bd9Sstevel@tonic-gate 2127c478bd9Sstevel@tonic-gate /* 2137c478bd9Sstevel@tonic-gate * Originally, we had two parameters to set default stack 2147c478bd9Sstevel@tonic-gate * size: one for lwp's (lwp_default_stksize), and one for 2157c478bd9Sstevel@tonic-gate * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz). 2167c478bd9Sstevel@tonic-gate * Now we have a third parameter that overrides both if it is 2177c478bd9Sstevel@tonic-gate * set to a legal stack size, called default_stksize. 2187c478bd9Sstevel@tonic-gate */ 2197c478bd9Sstevel@tonic-gate 2207c478bd9Sstevel@tonic-gate if (default_stksize == 0) { 2217c478bd9Sstevel@tonic-gate default_stksize = DEFAULTSTKSZ; 2227c478bd9Sstevel@tonic-gate } else if (default_stksize % PAGESIZE != 0 || 2237c478bd9Sstevel@tonic-gate default_stksize > MAX_STKSIZE || 2247c478bd9Sstevel@tonic-gate default_stksize < MIN_STKSIZE) { 2257c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "Illegal stack size. Using %d", 2267c478bd9Sstevel@tonic-gate (int)DEFAULTSTKSZ); 2277c478bd9Sstevel@tonic-gate default_stksize = DEFAULTSTKSZ; 2287c478bd9Sstevel@tonic-gate } else { 2297c478bd9Sstevel@tonic-gate lwp_default_stksize = default_stksize; 2307c478bd9Sstevel@tonic-gate } 2317c478bd9Sstevel@tonic-gate 2327c478bd9Sstevel@tonic-gate if (lwp_default_stksize == 0) { 2337c478bd9Sstevel@tonic-gate lwp_default_stksize = default_stksize; 2347c478bd9Sstevel@tonic-gate } else if (lwp_default_stksize % PAGESIZE != 0 || 2357c478bd9Sstevel@tonic-gate lwp_default_stksize > MAX_STKSIZE || 2367c478bd9Sstevel@tonic-gate lwp_default_stksize < MIN_STKSIZE) { 2377c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "Illegal stack size. Using %d", 2387c478bd9Sstevel@tonic-gate default_stksize); 2397c478bd9Sstevel@tonic-gate lwp_default_stksize = default_stksize; 2407c478bd9Sstevel@tonic-gate } 2417c478bd9Sstevel@tonic-gate 2427c478bd9Sstevel@tonic-gate segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz, 2437c478bd9Sstevel@tonic-gate lwp_default_stksize, 2447c478bd9Sstevel@tonic-gate (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED)); 2457c478bd9Sstevel@tonic-gate 2467c478bd9Sstevel@tonic-gate segkp_thread = segkp_cache_init(segkp, t_cache_sz, 2477c478bd9Sstevel@tonic-gate default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON); 2487c478bd9Sstevel@tonic-gate 2497c478bd9Sstevel@tonic-gate (void) getcid(sys_name, &syscid); 2507c478bd9Sstevel@tonic-gate curthread->t_cid = syscid; /* current thread is t0 */ 2517c478bd9Sstevel@tonic-gate 2527c478bd9Sstevel@tonic-gate /* 2537c478bd9Sstevel@tonic-gate * Set up the first CPU's idle thread. 2547c478bd9Sstevel@tonic-gate * It runs whenever the CPU has nothing worthwhile to do. 2557c478bd9Sstevel@tonic-gate */ 2567c478bd9Sstevel@tonic-gate tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1); 2577c478bd9Sstevel@tonic-gate cpu->cpu_idle_thread = tp; 2587c478bd9Sstevel@tonic-gate tp->t_preempt = 1; 2597c478bd9Sstevel@tonic-gate tp->t_disp_queue = cpu->cpu_disp; 2607c478bd9Sstevel@tonic-gate ASSERT(tp->t_disp_queue != NULL); 2617c478bd9Sstevel@tonic-gate tp->t_bound_cpu = cpu; 2627c478bd9Sstevel@tonic-gate tp->t_affinitycnt = 1; 2637c478bd9Sstevel@tonic-gate 2647c478bd9Sstevel@tonic-gate /* 2657c478bd9Sstevel@tonic-gate * Registering a thread in the callback table is usually 2667c478bd9Sstevel@tonic-gate * done in the initialization code of the thread. In this 2677c478bd9Sstevel@tonic-gate * case, we do it right after thread creation to avoid 2687c478bd9Sstevel@tonic-gate * blocking idle thread while registering itself. It also 2697c478bd9Sstevel@tonic-gate * avoids the possibility of reregistration in case a CPU 2707c478bd9Sstevel@tonic-gate * restarts its idle thread. 2717c478bd9Sstevel@tonic-gate */ 2727c478bd9Sstevel@tonic-gate CALLB_CPR_INIT_SAFE(tp, "idle"); 2737c478bd9Sstevel@tonic-gate 2747c478bd9Sstevel@tonic-gate /* 2755ce42f36Spraks * Create the thread_reaper daemon. From this point on, exited 2765ce42f36Spraks * threads will get reaped. 2775ce42f36Spraks */ 2785ce42f36Spraks (void) thread_create(NULL, 0, (void (*)())thread_reaper, 2795ce42f36Spraks NULL, 0, &p0, TS_RUN, minclsyspri); 2805ce42f36Spraks 2815ce42f36Spraks /* 2827c478bd9Sstevel@tonic-gate * Finish initializing the kernel memory allocator now that 2837c478bd9Sstevel@tonic-gate * thread_create() is available. 2847c478bd9Sstevel@tonic-gate */ 2857c478bd9Sstevel@tonic-gate kmem_thread_init(); 2867c478bd9Sstevel@tonic-gate 2877c478bd9Sstevel@tonic-gate if (boothowto & RB_DEBUG) 2887c478bd9Sstevel@tonic-gate kdi_dvec_thravail(); 2897c478bd9Sstevel@tonic-gate } 2907c478bd9Sstevel@tonic-gate 2917c478bd9Sstevel@tonic-gate /* 2927c478bd9Sstevel@tonic-gate * Create a thread. 2937c478bd9Sstevel@tonic-gate * 2947c478bd9Sstevel@tonic-gate * thread_create() blocks for memory if necessary. It never fails. 2957c478bd9Sstevel@tonic-gate * 2967c478bd9Sstevel@tonic-gate * If stk is NULL, the thread is created at the base of the stack 2977c478bd9Sstevel@tonic-gate * and cannot be swapped. 2987c478bd9Sstevel@tonic-gate */ 2997c478bd9Sstevel@tonic-gate kthread_t * 3007c478bd9Sstevel@tonic-gate thread_create( 3017c478bd9Sstevel@tonic-gate caddr_t stk, 3027c478bd9Sstevel@tonic-gate size_t stksize, 3037c478bd9Sstevel@tonic-gate void (*proc)(), 3047c478bd9Sstevel@tonic-gate void *arg, 3057c478bd9Sstevel@tonic-gate size_t len, 3067c478bd9Sstevel@tonic-gate proc_t *pp, 3077c478bd9Sstevel@tonic-gate int state, 3087c478bd9Sstevel@tonic-gate pri_t pri) 3097c478bd9Sstevel@tonic-gate { 3107c478bd9Sstevel@tonic-gate kthread_t *t; 3117c478bd9Sstevel@tonic-gate extern struct classfuncs sys_classfuncs; 3127c478bd9Sstevel@tonic-gate turnstile_t *ts; 3137c478bd9Sstevel@tonic-gate 3147c478bd9Sstevel@tonic-gate /* 3157c478bd9Sstevel@tonic-gate * Every thread keeps a turnstile around in case it needs to block. 3167c478bd9Sstevel@tonic-gate * The only reason the turnstile is not simply part of the thread 3177c478bd9Sstevel@tonic-gate * structure is that we may have to break the association whenever 3187c478bd9Sstevel@tonic-gate * more than one thread blocks on a given synchronization object. 3197c478bd9Sstevel@tonic-gate * From a memory-management standpoint, turnstiles are like the 3207c478bd9Sstevel@tonic-gate * "attached mblks" that hang off dblks in the streams allocator. 3217c478bd9Sstevel@tonic-gate */ 3227c478bd9Sstevel@tonic-gate ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP); 3237c478bd9Sstevel@tonic-gate 3247c478bd9Sstevel@tonic-gate if (stk == NULL) { 3257c478bd9Sstevel@tonic-gate /* 3267c478bd9Sstevel@tonic-gate * alloc both thread and stack in segkp chunk 3277c478bd9Sstevel@tonic-gate */ 3287c478bd9Sstevel@tonic-gate 3297c478bd9Sstevel@tonic-gate if (stksize < default_stksize) 3307c478bd9Sstevel@tonic-gate stksize = default_stksize; 3317c478bd9Sstevel@tonic-gate 3327c478bd9Sstevel@tonic-gate if (stksize == default_stksize) { 3337c478bd9Sstevel@tonic-gate stk = (caddr_t)segkp_cache_get(segkp_thread); 3347c478bd9Sstevel@tonic-gate } else { 3357c478bd9Sstevel@tonic-gate stksize = roundup(stksize, PAGESIZE); 3367c478bd9Sstevel@tonic-gate stk = (caddr_t)segkp_get(segkp, stksize, 3377c478bd9Sstevel@tonic-gate (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED)); 3387c478bd9Sstevel@tonic-gate } 3397c478bd9Sstevel@tonic-gate 3407c478bd9Sstevel@tonic-gate ASSERT(stk != NULL); 3417c478bd9Sstevel@tonic-gate 3427c478bd9Sstevel@tonic-gate /* 3437c478bd9Sstevel@tonic-gate * The machine-dependent mutex code may require that 3447c478bd9Sstevel@tonic-gate * thread pointers (since they may be used for mutex owner 3457c478bd9Sstevel@tonic-gate * fields) have certain alignment requirements. 3467c478bd9Sstevel@tonic-gate * PTR24_ALIGN is the size of the alignment quanta. 3477c478bd9Sstevel@tonic-gate * XXX - assumes stack grows toward low addresses. 3487c478bd9Sstevel@tonic-gate */ 3497c478bd9Sstevel@tonic-gate if (stksize <= sizeof (kthread_t) + PTR24_ALIGN) 3507c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "thread_create: proposed stack size" 3517c478bd9Sstevel@tonic-gate " too small to hold thread."); 3527c478bd9Sstevel@tonic-gate #ifdef STACK_GROWTH_DOWN 3537c478bd9Sstevel@tonic-gate stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1); 3547c478bd9Sstevel@tonic-gate stksize &= -PTR24_ALIGN; /* make thread aligned */ 3557c478bd9Sstevel@tonic-gate t = (kthread_t *)(stk + stksize); 3567c478bd9Sstevel@tonic-gate bzero(t, sizeof (kthread_t)); 3577c478bd9Sstevel@tonic-gate #ifdef C2_AUDIT 3587c478bd9Sstevel@tonic-gate if (audit_active) 3597c478bd9Sstevel@tonic-gate audit_thread_create(t); 3607c478bd9Sstevel@tonic-gate #endif 3617c478bd9Sstevel@tonic-gate t->t_stk = stk + stksize; 3627c478bd9Sstevel@tonic-gate t->t_stkbase = stk; 3637c478bd9Sstevel@tonic-gate #else /* stack grows to larger addresses */ 3647c478bd9Sstevel@tonic-gate stksize -= SA(sizeof (kthread_t)); 3657c478bd9Sstevel@tonic-gate t = (kthread_t *)(stk); 3667c478bd9Sstevel@tonic-gate bzero(t, sizeof (kthread_t)); 3677c478bd9Sstevel@tonic-gate t->t_stk = stk + sizeof (kthread_t); 3687c478bd9Sstevel@tonic-gate t->t_stkbase = stk + stksize + sizeof (kthread_t); 3697c478bd9Sstevel@tonic-gate #endif /* STACK_GROWTH_DOWN */ 3707c478bd9Sstevel@tonic-gate t->t_flag |= T_TALLOCSTK; 3717c478bd9Sstevel@tonic-gate t->t_swap = stk; 3727c478bd9Sstevel@tonic-gate } else { 3737c478bd9Sstevel@tonic-gate t = kmem_cache_alloc(thread_cache, KM_SLEEP); 3747c478bd9Sstevel@tonic-gate bzero(t, sizeof (kthread_t)); 3757c478bd9Sstevel@tonic-gate ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0); 3767c478bd9Sstevel@tonic-gate #ifdef C2_AUDIT 3777c478bd9Sstevel@tonic-gate if (audit_active) 3787c478bd9Sstevel@tonic-gate audit_thread_create(t); 3797c478bd9Sstevel@tonic-gate #endif 3807c478bd9Sstevel@tonic-gate /* 3817c478bd9Sstevel@tonic-gate * Initialize t_stk to the kernel stack pointer to use 3827c478bd9Sstevel@tonic-gate * upon entry to the kernel 3837c478bd9Sstevel@tonic-gate */ 3847c478bd9Sstevel@tonic-gate #ifdef STACK_GROWTH_DOWN 3857c478bd9Sstevel@tonic-gate t->t_stk = stk + stksize; 3867c478bd9Sstevel@tonic-gate t->t_stkbase = stk; 3877c478bd9Sstevel@tonic-gate #else 3887c478bd9Sstevel@tonic-gate t->t_stk = stk; /* 3b2-like */ 3897c478bd9Sstevel@tonic-gate t->t_stkbase = stk + stksize; 3907c478bd9Sstevel@tonic-gate #endif /* STACK_GROWTH_DOWN */ 3917c478bd9Sstevel@tonic-gate } 3927c478bd9Sstevel@tonic-gate 3937c478bd9Sstevel@tonic-gate /* set default stack flag */ 3947c478bd9Sstevel@tonic-gate if (stksize == lwp_default_stksize) 3957c478bd9Sstevel@tonic-gate t->t_flag |= T_DFLTSTK; 3967c478bd9Sstevel@tonic-gate 3977c478bd9Sstevel@tonic-gate t->t_ts = ts; 3987c478bd9Sstevel@tonic-gate 3997c478bd9Sstevel@tonic-gate /* 4007c478bd9Sstevel@tonic-gate * p_cred could be NULL if it thread_create is called before cred_init 4017c478bd9Sstevel@tonic-gate * is called in main. 4027c478bd9Sstevel@tonic-gate */ 4037c478bd9Sstevel@tonic-gate mutex_enter(&pp->p_crlock); 4047c478bd9Sstevel@tonic-gate if (pp->p_cred) 4057c478bd9Sstevel@tonic-gate crhold(t->t_cred = pp->p_cred); 4067c478bd9Sstevel@tonic-gate mutex_exit(&pp->p_crlock); 4077c478bd9Sstevel@tonic-gate t->t_start = gethrestime_sec(); 4087c478bd9Sstevel@tonic-gate t->t_startpc = proc; 4097c478bd9Sstevel@tonic-gate t->t_procp = pp; 4107c478bd9Sstevel@tonic-gate t->t_clfuncs = &sys_classfuncs.thread; 4117c478bd9Sstevel@tonic-gate t->t_cid = syscid; 4127c478bd9Sstevel@tonic-gate t->t_pri = pri; 4137c478bd9Sstevel@tonic-gate t->t_stime = lbolt; 4147c478bd9Sstevel@tonic-gate t->t_schedflag = TS_LOAD | TS_DONT_SWAP; 4157c478bd9Sstevel@tonic-gate t->t_bind_cpu = PBIND_NONE; 4167c478bd9Sstevel@tonic-gate t->t_bind_pset = PS_NONE; 4177c478bd9Sstevel@tonic-gate t->t_plockp = &pp->p_lock; 4187c478bd9Sstevel@tonic-gate t->t_copyops = NULL; 4197c478bd9Sstevel@tonic-gate t->t_taskq = NULL; 4207c478bd9Sstevel@tonic-gate t->t_anttime = 0; 4217c478bd9Sstevel@tonic-gate t->t_hatdepth = 0; 4227c478bd9Sstevel@tonic-gate 4237c478bd9Sstevel@tonic-gate t->t_dtrace_vtime = 1; /* assure vtimestamp is always non-zero */ 4247c478bd9Sstevel@tonic-gate 4257c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, nthreads, 1); 4267c478bd9Sstevel@tonic-gate #ifndef NPROBE 4277c478bd9Sstevel@tonic-gate /* Kernel probe */ 4287c478bd9Sstevel@tonic-gate tnf_thread_create(t); 4297c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 4307c478bd9Sstevel@tonic-gate LOCK_INIT_CLEAR(&t->t_lock); 4317c478bd9Sstevel@tonic-gate 4327c478bd9Sstevel@tonic-gate /* 4337c478bd9Sstevel@tonic-gate * Callers who give us a NULL proc must do their own 4347c478bd9Sstevel@tonic-gate * stack initialization. e.g. lwp_create() 4357c478bd9Sstevel@tonic-gate */ 4367c478bd9Sstevel@tonic-gate if (proc != NULL) { 4377c478bd9Sstevel@tonic-gate t->t_stk = thread_stk_init(t->t_stk); 4387c478bd9Sstevel@tonic-gate thread_load(t, proc, arg, len); 4397c478bd9Sstevel@tonic-gate } 4407c478bd9Sstevel@tonic-gate 4417c478bd9Sstevel@tonic-gate /* 4427c478bd9Sstevel@tonic-gate * Put a hold on project0. If this thread is actually in a 4437c478bd9Sstevel@tonic-gate * different project, then t_proj will be changed later in 4447c478bd9Sstevel@tonic-gate * lwp_create(). All kernel-only threads must be in project 0. 4457c478bd9Sstevel@tonic-gate */ 4467c478bd9Sstevel@tonic-gate t->t_proj = project_hold(proj0p); 4477c478bd9Sstevel@tonic-gate 4487c478bd9Sstevel@tonic-gate lgrp_affinity_init(&t->t_lgrp_affinity); 4497c478bd9Sstevel@tonic-gate 4507c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 4517c478bd9Sstevel@tonic-gate nthread++; 4527c478bd9Sstevel@tonic-gate t->t_did = next_t_id++; 4537c478bd9Sstevel@tonic-gate t->t_prev = curthread->t_prev; 4547c478bd9Sstevel@tonic-gate t->t_next = curthread; 4557c478bd9Sstevel@tonic-gate 4567c478bd9Sstevel@tonic-gate /* 4577c478bd9Sstevel@tonic-gate * Add the thread to the list of all threads, and initialize 4587c478bd9Sstevel@tonic-gate * its t_cpu pointer. We need to block preemption since 4597c478bd9Sstevel@tonic-gate * cpu_offline walks the thread list looking for threads 4607c478bd9Sstevel@tonic-gate * with t_cpu pointing to the CPU being offlined. We want 4617c478bd9Sstevel@tonic-gate * to make sure that the list is consistent and that if t_cpu 4627c478bd9Sstevel@tonic-gate * is set, the thread is on the list. 4637c478bd9Sstevel@tonic-gate */ 4647c478bd9Sstevel@tonic-gate kpreempt_disable(); 4657c478bd9Sstevel@tonic-gate curthread->t_prev->t_next = t; 4667c478bd9Sstevel@tonic-gate curthread->t_prev = t; 4677c478bd9Sstevel@tonic-gate 4687c478bd9Sstevel@tonic-gate /* 4697c478bd9Sstevel@tonic-gate * Threads should never have a NULL t_cpu pointer so assign it 4707c478bd9Sstevel@tonic-gate * here. If the thread is being created with state TS_RUN a 4717c478bd9Sstevel@tonic-gate * better CPU may be chosen when it is placed on the run queue. 4727c478bd9Sstevel@tonic-gate * 4737c478bd9Sstevel@tonic-gate * We need to keep kernel preemption disabled when setting all 4747c478bd9Sstevel@tonic-gate * three fields to keep them in sync. Also, always create in 4757c478bd9Sstevel@tonic-gate * the default partition since that's where kernel threads go 4767c478bd9Sstevel@tonic-gate * (if this isn't a kernel thread, t_cpupart will be changed 4777c478bd9Sstevel@tonic-gate * in lwp_create before setting the thread runnable). 4787c478bd9Sstevel@tonic-gate */ 4797c478bd9Sstevel@tonic-gate t->t_cpupart = &cp_default; 4807c478bd9Sstevel@tonic-gate 4817c478bd9Sstevel@tonic-gate /* 4827c478bd9Sstevel@tonic-gate * For now, affiliate this thread with the root lgroup. 4837c478bd9Sstevel@tonic-gate * Since the kernel does not (presently) allocate its memory 4847c478bd9Sstevel@tonic-gate * in a locality aware fashion, the root is an appropriate home. 4857c478bd9Sstevel@tonic-gate * If this thread is later associated with an lwp, it will have 4867c478bd9Sstevel@tonic-gate * it's lgroup re-assigned at that time. 4877c478bd9Sstevel@tonic-gate */ 4887c478bd9Sstevel@tonic-gate lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1); 4897c478bd9Sstevel@tonic-gate 4907c478bd9Sstevel@tonic-gate /* 4917c478bd9Sstevel@tonic-gate * Inherit the current cpu. If this cpu isn't part of the chosen 4927c478bd9Sstevel@tonic-gate * lgroup, a new cpu will be chosen by cpu_choose when the thread 4937c478bd9Sstevel@tonic-gate * is ready to run. 4947c478bd9Sstevel@tonic-gate */ 4957c478bd9Sstevel@tonic-gate if (CPU->cpu_part == &cp_default) 4967c478bd9Sstevel@tonic-gate t->t_cpu = CPU; 4977c478bd9Sstevel@tonic-gate else 4987c478bd9Sstevel@tonic-gate t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl, 4997c478bd9Sstevel@tonic-gate t->t_pri, NULL); 5007c478bd9Sstevel@tonic-gate 5017c478bd9Sstevel@tonic-gate t->t_disp_queue = t->t_cpu->cpu_disp; 5027c478bd9Sstevel@tonic-gate kpreempt_enable(); 5037c478bd9Sstevel@tonic-gate 5047c478bd9Sstevel@tonic-gate /* 5057c478bd9Sstevel@tonic-gate * Initialize thread state and the dispatcher lock pointer. 5067c478bd9Sstevel@tonic-gate * Need to hold onto pidlock to block allthreads walkers until 5077c478bd9Sstevel@tonic-gate * the state is set. 5087c478bd9Sstevel@tonic-gate */ 5097c478bd9Sstevel@tonic-gate switch (state) { 5107c478bd9Sstevel@tonic-gate case TS_RUN: 5117c478bd9Sstevel@tonic-gate curthread->t_oldspl = splhigh(); /* get dispatcher spl */ 5127c478bd9Sstevel@tonic-gate THREAD_SET_STATE(t, TS_STOPPED, &transition_lock); 5137c478bd9Sstevel@tonic-gate CL_SETRUN(t); 5147c478bd9Sstevel@tonic-gate thread_unlock(t); 5157c478bd9Sstevel@tonic-gate break; 5167c478bd9Sstevel@tonic-gate 5177c478bd9Sstevel@tonic-gate case TS_ONPROC: 5187c478bd9Sstevel@tonic-gate THREAD_ONPROC(t, t->t_cpu); 5197c478bd9Sstevel@tonic-gate break; 5207c478bd9Sstevel@tonic-gate 5217c478bd9Sstevel@tonic-gate case TS_FREE: 5227c478bd9Sstevel@tonic-gate /* 5237c478bd9Sstevel@tonic-gate * Free state will be used for intr threads. 5247c478bd9Sstevel@tonic-gate * The interrupt routine must set the thread dispatcher 5257c478bd9Sstevel@tonic-gate * lock pointer (t_lockp) if starting on a CPU 5267c478bd9Sstevel@tonic-gate * other than the current one. 5277c478bd9Sstevel@tonic-gate */ 5287c478bd9Sstevel@tonic-gate THREAD_FREEINTR(t, CPU); 5297c478bd9Sstevel@tonic-gate break; 5307c478bd9Sstevel@tonic-gate 5317c478bd9Sstevel@tonic-gate case TS_STOPPED: 5327c478bd9Sstevel@tonic-gate THREAD_SET_STATE(t, TS_STOPPED, &stop_lock); 5337c478bd9Sstevel@tonic-gate break; 5347c478bd9Sstevel@tonic-gate 5357c478bd9Sstevel@tonic-gate default: /* TS_SLEEP, TS_ZOMB or TS_TRANS */ 5367c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "thread_create: invalid state %d", state); 5377c478bd9Sstevel@tonic-gate } 5387c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 5397c478bd9Sstevel@tonic-gate return (t); 5407c478bd9Sstevel@tonic-gate } 5417c478bd9Sstevel@tonic-gate 5427c478bd9Sstevel@tonic-gate /* 5437c478bd9Sstevel@tonic-gate * Move thread to project0 and take care of project reference counters. 5447c478bd9Sstevel@tonic-gate */ 5457c478bd9Sstevel@tonic-gate void 5467c478bd9Sstevel@tonic-gate thread_rele(kthread_t *t) 5477c478bd9Sstevel@tonic-gate { 5487c478bd9Sstevel@tonic-gate kproject_t *kpj; 5497c478bd9Sstevel@tonic-gate 5507c478bd9Sstevel@tonic-gate thread_lock(t); 5517c478bd9Sstevel@tonic-gate 5527c478bd9Sstevel@tonic-gate ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0); 5537c478bd9Sstevel@tonic-gate kpj = ttoproj(t); 5547c478bd9Sstevel@tonic-gate t->t_proj = proj0p; 5557c478bd9Sstevel@tonic-gate 5567c478bd9Sstevel@tonic-gate thread_unlock(t); 5577c478bd9Sstevel@tonic-gate 5587c478bd9Sstevel@tonic-gate if (kpj != proj0p) { 5597c478bd9Sstevel@tonic-gate project_rele(kpj); 5607c478bd9Sstevel@tonic-gate (void) project_hold(proj0p); 5617c478bd9Sstevel@tonic-gate } 5627c478bd9Sstevel@tonic-gate } 5637c478bd9Sstevel@tonic-gate 564fd006805Snordmark /* 565fd006805Snordmark * This is a function which is called from thread_exit 566fd006805Snordmark * that can be used to debug reference count issues in IP. 567fd006805Snordmark */ 5687c478bd9Sstevel@tonic-gate void (*ip_cleanup_func)(void); 5697c478bd9Sstevel@tonic-gate 5707c478bd9Sstevel@tonic-gate void 5717c478bd9Sstevel@tonic-gate thread_exit() 5727c478bd9Sstevel@tonic-gate { 5737c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 5747c478bd9Sstevel@tonic-gate 5757c478bd9Sstevel@tonic-gate if ((t->t_proc_flag & TP_ZTHREAD) != 0) 5767c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called"); 5777c478bd9Sstevel@tonic-gate 5787c478bd9Sstevel@tonic-gate if (ip_cleanup_func != NULL) 5797c478bd9Sstevel@tonic-gate (*ip_cleanup_func)(); 5807c478bd9Sstevel@tonic-gate 5817c478bd9Sstevel@tonic-gate tsd_exit(); /* Clean up this thread's TSD */ 5827c478bd9Sstevel@tonic-gate 5837c478bd9Sstevel@tonic-gate kcpc_passivate(); /* clean up performance counter state */ 5847c478bd9Sstevel@tonic-gate 5857c478bd9Sstevel@tonic-gate /* 5867c478bd9Sstevel@tonic-gate * No kernel thread should have called poll() without arranging 5877c478bd9Sstevel@tonic-gate * calling pollcleanup() here. 5887c478bd9Sstevel@tonic-gate */ 5897c478bd9Sstevel@tonic-gate ASSERT(t->t_pollstate == NULL); 5907c478bd9Sstevel@tonic-gate ASSERT(t->t_schedctl == NULL); 5917c478bd9Sstevel@tonic-gate if (t->t_door) 5927c478bd9Sstevel@tonic-gate door_slam(); /* in case thread did an upcall */ 5937c478bd9Sstevel@tonic-gate 5947c478bd9Sstevel@tonic-gate #ifndef NPROBE 5957c478bd9Sstevel@tonic-gate /* Kernel probe */ 5967c478bd9Sstevel@tonic-gate if (t->t_tnf_tpdp) 5977c478bd9Sstevel@tonic-gate tnf_thread_exit(); 5987c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 5997c478bd9Sstevel@tonic-gate 6007c478bd9Sstevel@tonic-gate thread_rele(t); 6017c478bd9Sstevel@tonic-gate t->t_preempt++; 6027c478bd9Sstevel@tonic-gate 6037c478bd9Sstevel@tonic-gate /* 6047c478bd9Sstevel@tonic-gate * remove thread from the all threads list so that 6057c478bd9Sstevel@tonic-gate * death-row can use the same pointers. 6067c478bd9Sstevel@tonic-gate */ 6077c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 6087c478bd9Sstevel@tonic-gate t->t_next->t_prev = t->t_prev; 6097c478bd9Sstevel@tonic-gate t->t_prev->t_next = t->t_next; 6107c478bd9Sstevel@tonic-gate ASSERT(allthreads != t); /* t0 never exits */ 6117c478bd9Sstevel@tonic-gate cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */ 6127c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 6137c478bd9Sstevel@tonic-gate 6147c478bd9Sstevel@tonic-gate if (t->t_ctx != NULL) 6157c478bd9Sstevel@tonic-gate exitctx(t); 6160baeff3dSrab if (t->t_procp->p_pctx != NULL) 6170baeff3dSrab exitpctx(t->t_procp); 6187c478bd9Sstevel@tonic-gate 6197c478bd9Sstevel@tonic-gate t->t_state = TS_ZOMB; /* set zombie thread */ 6207c478bd9Sstevel@tonic-gate 6217c478bd9Sstevel@tonic-gate swtch_from_zombie(); /* give up the CPU */ 6227c478bd9Sstevel@tonic-gate /* NOTREACHED */ 6237c478bd9Sstevel@tonic-gate } 6247c478bd9Sstevel@tonic-gate 6257c478bd9Sstevel@tonic-gate /* 6267c478bd9Sstevel@tonic-gate * Check to see if the specified thread is active (defined as being on 6277c478bd9Sstevel@tonic-gate * the thread list). This is certainly a slow way to do this; if there's 6287c478bd9Sstevel@tonic-gate * ever a reason to speed it up, we could maintain a hash table of active 6297c478bd9Sstevel@tonic-gate * threads indexed by their t_did. 6307c478bd9Sstevel@tonic-gate */ 6317c478bd9Sstevel@tonic-gate static kthread_t * 6327c478bd9Sstevel@tonic-gate did_to_thread(kt_did_t tid) 6337c478bd9Sstevel@tonic-gate { 6347c478bd9Sstevel@tonic-gate kthread_t *t; 6357c478bd9Sstevel@tonic-gate 6367c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&pidlock)); 6377c478bd9Sstevel@tonic-gate for (t = curthread->t_next; t != curthread; t = t->t_next) { 6387c478bd9Sstevel@tonic-gate if (t->t_did == tid) 6397c478bd9Sstevel@tonic-gate break; 6407c478bd9Sstevel@tonic-gate } 6417c478bd9Sstevel@tonic-gate if (t->t_did == tid) 6427c478bd9Sstevel@tonic-gate return (t); 6437c478bd9Sstevel@tonic-gate else 6447c478bd9Sstevel@tonic-gate return (NULL); 6457c478bd9Sstevel@tonic-gate } 6467c478bd9Sstevel@tonic-gate 6477c478bd9Sstevel@tonic-gate /* 6487c478bd9Sstevel@tonic-gate * Wait for specified thread to exit. Returns immediately if the thread 6497c478bd9Sstevel@tonic-gate * could not be found, meaning that it has either already exited or never 6507c478bd9Sstevel@tonic-gate * existed. 6517c478bd9Sstevel@tonic-gate */ 6527c478bd9Sstevel@tonic-gate void 6537c478bd9Sstevel@tonic-gate thread_join(kt_did_t tid) 6547c478bd9Sstevel@tonic-gate { 6557c478bd9Sstevel@tonic-gate kthread_t *t; 6567c478bd9Sstevel@tonic-gate 6577c478bd9Sstevel@tonic-gate ASSERT(tid != curthread->t_did); 6587c478bd9Sstevel@tonic-gate ASSERT(tid != t0.t_did); 6597c478bd9Sstevel@tonic-gate 6607c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 6617c478bd9Sstevel@tonic-gate /* 6627c478bd9Sstevel@tonic-gate * Make sure we check that the thread is on the thread list 6637c478bd9Sstevel@tonic-gate * before blocking on it; otherwise we could end up blocking on 6647c478bd9Sstevel@tonic-gate * a cv that's already been freed. In other words, don't cache 6657c478bd9Sstevel@tonic-gate * the thread pointer across calls to cv_wait. 6667c478bd9Sstevel@tonic-gate * 6677c478bd9Sstevel@tonic-gate * The choice of loop invariant means that whenever a thread 6687c478bd9Sstevel@tonic-gate * is taken off the allthreads list, a cv_broadcast must be 6697c478bd9Sstevel@tonic-gate * performed on that thread's t_joincv to wake up any waiters. 6707c478bd9Sstevel@tonic-gate * The broadcast doesn't have to happen right away, but it 6717c478bd9Sstevel@tonic-gate * shouldn't be postponed indefinitely (e.g., by doing it in 6727c478bd9Sstevel@tonic-gate * thread_free which may only be executed when the deathrow 6737c478bd9Sstevel@tonic-gate * queue is processed. 6747c478bd9Sstevel@tonic-gate */ 6757c478bd9Sstevel@tonic-gate while (t = did_to_thread(tid)) 6767c478bd9Sstevel@tonic-gate cv_wait(&t->t_joincv, &pidlock); 6777c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 6787c478bd9Sstevel@tonic-gate } 6797c478bd9Sstevel@tonic-gate 6807c478bd9Sstevel@tonic-gate void 6817c478bd9Sstevel@tonic-gate thread_free(kthread_t *t) 6827c478bd9Sstevel@tonic-gate { 6837c478bd9Sstevel@tonic-gate ASSERT(t != &t0 && t->t_state == TS_FREE); 6847c478bd9Sstevel@tonic-gate ASSERT(t->t_door == NULL); 6857c478bd9Sstevel@tonic-gate ASSERT(t->t_schedctl == NULL); 6867c478bd9Sstevel@tonic-gate ASSERT(t->t_pollstate == NULL); 6877c478bd9Sstevel@tonic-gate 6887c478bd9Sstevel@tonic-gate t->t_pri = 0; 6897c478bd9Sstevel@tonic-gate t->t_pc = 0; 6907c478bd9Sstevel@tonic-gate t->t_sp = 0; 6917c478bd9Sstevel@tonic-gate t->t_wchan0 = NULL; 6927c478bd9Sstevel@tonic-gate t->t_wchan = NULL; 6937c478bd9Sstevel@tonic-gate if (t->t_cred != NULL) { 6947c478bd9Sstevel@tonic-gate crfree(t->t_cred); 6957c478bd9Sstevel@tonic-gate t->t_cred = 0; 6967c478bd9Sstevel@tonic-gate } 6977c478bd9Sstevel@tonic-gate if (t->t_pdmsg) { 6987c478bd9Sstevel@tonic-gate kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1); 6997c478bd9Sstevel@tonic-gate t->t_pdmsg = NULL; 7007c478bd9Sstevel@tonic-gate } 7017c478bd9Sstevel@tonic-gate #ifdef C2_AUDIT 7027c478bd9Sstevel@tonic-gate if (audit_active) 7037c478bd9Sstevel@tonic-gate audit_thread_free(t); 7047c478bd9Sstevel@tonic-gate #endif 7057c478bd9Sstevel@tonic-gate #ifndef NPROBE 7067c478bd9Sstevel@tonic-gate if (t->t_tnf_tpdp) 7077c478bd9Sstevel@tonic-gate tnf_thread_free(t); 7087c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 7097c478bd9Sstevel@tonic-gate if (t->t_cldata) { 7107c478bd9Sstevel@tonic-gate CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata); 7117c478bd9Sstevel@tonic-gate } 7127c478bd9Sstevel@tonic-gate if (t->t_rprof != NULL) { 7137c478bd9Sstevel@tonic-gate kmem_free(t->t_rprof, sizeof (*t->t_rprof)); 7147c478bd9Sstevel@tonic-gate t->t_rprof = NULL; 7157c478bd9Sstevel@tonic-gate } 7167c478bd9Sstevel@tonic-gate t->t_lockp = NULL; /* nothing should try to lock this thread now */ 7177c478bd9Sstevel@tonic-gate if (t->t_lwp) 7187c478bd9Sstevel@tonic-gate lwp_freeregs(t->t_lwp, 0); 7197c478bd9Sstevel@tonic-gate if (t->t_ctx) 7207c478bd9Sstevel@tonic-gate freectx(t, 0); 7217c478bd9Sstevel@tonic-gate t->t_stk = NULL; 7227c478bd9Sstevel@tonic-gate if (t->t_lwp) 7237c478bd9Sstevel@tonic-gate lwp_stk_fini(t->t_lwp); 7247c478bd9Sstevel@tonic-gate lock_clear(&t->t_lock); 7257c478bd9Sstevel@tonic-gate 7267c478bd9Sstevel@tonic-gate if (t->t_ts->ts_waiters > 0) 7277c478bd9Sstevel@tonic-gate panic("thread_free: turnstile still active"); 7287c478bd9Sstevel@tonic-gate 7297c478bd9Sstevel@tonic-gate kmem_cache_free(turnstile_cache, t->t_ts); 7307c478bd9Sstevel@tonic-gate 7317c478bd9Sstevel@tonic-gate free_afd(&t->t_activefd); 7327c478bd9Sstevel@tonic-gate 7337c478bd9Sstevel@tonic-gate /* 7347c478bd9Sstevel@tonic-gate * Barrier for clock thread. The clock holds this lock to 7357c478bd9Sstevel@tonic-gate * keep the thread from going away while it's looking at it. 7367c478bd9Sstevel@tonic-gate */ 7377c478bd9Sstevel@tonic-gate mutex_enter(&thread_free_lock); 7387c478bd9Sstevel@tonic-gate mutex_exit(&thread_free_lock); 7397c478bd9Sstevel@tonic-gate 7407c478bd9Sstevel@tonic-gate ASSERT(ttoproj(t) == proj0p); 7417c478bd9Sstevel@tonic-gate project_rele(ttoproj(t)); 7427c478bd9Sstevel@tonic-gate 7437c478bd9Sstevel@tonic-gate lgrp_affinity_free(&t->t_lgrp_affinity); 7447c478bd9Sstevel@tonic-gate 7457c478bd9Sstevel@tonic-gate /* 7467c478bd9Sstevel@tonic-gate * Free thread struct and its stack. 7477c478bd9Sstevel@tonic-gate */ 7487c478bd9Sstevel@tonic-gate if (t->t_flag & T_TALLOCSTK) { 7497c478bd9Sstevel@tonic-gate /* thread struct is embedded in stack */ 7507c478bd9Sstevel@tonic-gate segkp_release(segkp, t->t_swap); 7517c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 7527c478bd9Sstevel@tonic-gate nthread--; 7537c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 7547c478bd9Sstevel@tonic-gate } else { 7557c478bd9Sstevel@tonic-gate if (t->t_swap) { 7567c478bd9Sstevel@tonic-gate segkp_release(segkp, t->t_swap); 7577c478bd9Sstevel@tonic-gate t->t_swap = NULL; 7587c478bd9Sstevel@tonic-gate } 7597c478bd9Sstevel@tonic-gate if (t->t_lwp) { 7607c478bd9Sstevel@tonic-gate kmem_cache_free(lwp_cache, t->t_lwp); 7617c478bd9Sstevel@tonic-gate t->t_lwp = NULL; 7627c478bd9Sstevel@tonic-gate } 7637c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 7647c478bd9Sstevel@tonic-gate nthread--; 7657c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 7667c478bd9Sstevel@tonic-gate kmem_cache_free(thread_cache, t); 7677c478bd9Sstevel@tonic-gate } 7687c478bd9Sstevel@tonic-gate } 7697c478bd9Sstevel@tonic-gate 7707c478bd9Sstevel@tonic-gate /* 7717c478bd9Sstevel@tonic-gate * Removes threads associated with the given zone from a deathrow queue. 7727c478bd9Sstevel@tonic-gate * tp is a pointer to the head of the deathrow queue, and countp is a 7737c478bd9Sstevel@tonic-gate * pointer to the current deathrow count. Returns a linked list of 7747c478bd9Sstevel@tonic-gate * threads removed from the list. 7757c478bd9Sstevel@tonic-gate */ 7767c478bd9Sstevel@tonic-gate static kthread_t * 7777c478bd9Sstevel@tonic-gate thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid) 7787c478bd9Sstevel@tonic-gate { 7797c478bd9Sstevel@tonic-gate kthread_t *tmp, *list = NULL; 7807c478bd9Sstevel@tonic-gate cred_t *cr; 7817c478bd9Sstevel@tonic-gate 7827c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&reaplock)); 7837c478bd9Sstevel@tonic-gate while (*tp != NULL) { 7847c478bd9Sstevel@tonic-gate if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) { 7857c478bd9Sstevel@tonic-gate tmp = *tp; 7867c478bd9Sstevel@tonic-gate *tp = tmp->t_forw; 7877c478bd9Sstevel@tonic-gate tmp->t_forw = list; 7887c478bd9Sstevel@tonic-gate list = tmp; 7897c478bd9Sstevel@tonic-gate (*countp)--; 7907c478bd9Sstevel@tonic-gate } else { 7917c478bd9Sstevel@tonic-gate tp = &(*tp)->t_forw; 7927c478bd9Sstevel@tonic-gate } 7937c478bd9Sstevel@tonic-gate } 7947c478bd9Sstevel@tonic-gate return (list); 7957c478bd9Sstevel@tonic-gate } 7967c478bd9Sstevel@tonic-gate 7977c478bd9Sstevel@tonic-gate static void 7987c478bd9Sstevel@tonic-gate thread_reap_list(kthread_t *t) 7997c478bd9Sstevel@tonic-gate { 8007c478bd9Sstevel@tonic-gate kthread_t *next; 8017c478bd9Sstevel@tonic-gate 8027c478bd9Sstevel@tonic-gate while (t != NULL) { 8037c478bd9Sstevel@tonic-gate next = t->t_forw; 8047c478bd9Sstevel@tonic-gate thread_free(t); 8057c478bd9Sstevel@tonic-gate t = next; 8067c478bd9Sstevel@tonic-gate } 8077c478bd9Sstevel@tonic-gate } 8087c478bd9Sstevel@tonic-gate 8097c478bd9Sstevel@tonic-gate /* ARGSUSED */ 8107c478bd9Sstevel@tonic-gate static void 8117c478bd9Sstevel@tonic-gate thread_zone_destroy(zoneid_t zoneid, void *unused) 8127c478bd9Sstevel@tonic-gate { 8137c478bd9Sstevel@tonic-gate kthread_t *t, *l; 8147c478bd9Sstevel@tonic-gate 8157c478bd9Sstevel@tonic-gate mutex_enter(&reaplock); 8167c478bd9Sstevel@tonic-gate /* 8177c478bd9Sstevel@tonic-gate * Pull threads and lwps associated with zone off deathrow lists. 8187c478bd9Sstevel@tonic-gate */ 8197c478bd9Sstevel@tonic-gate t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid); 8207c478bd9Sstevel@tonic-gate l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid); 8217c478bd9Sstevel@tonic-gate mutex_exit(&reaplock); 8227c478bd9Sstevel@tonic-gate 8237c478bd9Sstevel@tonic-gate /* 8247c478bd9Sstevel@tonic-gate * Reap threads 8257c478bd9Sstevel@tonic-gate */ 8267c478bd9Sstevel@tonic-gate thread_reap_list(t); 8277c478bd9Sstevel@tonic-gate 8287c478bd9Sstevel@tonic-gate /* 8297c478bd9Sstevel@tonic-gate * Reap lwps 8307c478bd9Sstevel@tonic-gate */ 8317c478bd9Sstevel@tonic-gate thread_reap_list(l); 8327c478bd9Sstevel@tonic-gate } 8337c478bd9Sstevel@tonic-gate 8347c478bd9Sstevel@tonic-gate /* 8357c478bd9Sstevel@tonic-gate * cleanup zombie threads that are on deathrow. 8367c478bd9Sstevel@tonic-gate */ 8377c478bd9Sstevel@tonic-gate void 8387c478bd9Sstevel@tonic-gate thread_reaper() 8397c478bd9Sstevel@tonic-gate { 8407c478bd9Sstevel@tonic-gate kthread_t *t, *l; 8417c478bd9Sstevel@tonic-gate callb_cpr_t cprinfo; 8427c478bd9Sstevel@tonic-gate 8437c478bd9Sstevel@tonic-gate /* 8447c478bd9Sstevel@tonic-gate * Register callback to clean up threads when zone is destroyed. 8457c478bd9Sstevel@tonic-gate */ 8467c478bd9Sstevel@tonic-gate zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy); 8477c478bd9Sstevel@tonic-gate 8487c478bd9Sstevel@tonic-gate CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper"); 8497c478bd9Sstevel@tonic-gate for (;;) { 8507c478bd9Sstevel@tonic-gate mutex_enter(&reaplock); 8517c478bd9Sstevel@tonic-gate while (thread_deathrow == NULL && lwp_deathrow == NULL) { 8527c478bd9Sstevel@tonic-gate CALLB_CPR_SAFE_BEGIN(&cprinfo); 8537c478bd9Sstevel@tonic-gate cv_wait(&reaper_cv, &reaplock); 8547c478bd9Sstevel@tonic-gate CALLB_CPR_SAFE_END(&cprinfo, &reaplock); 8557c478bd9Sstevel@tonic-gate } 8567c478bd9Sstevel@tonic-gate t = thread_deathrow; 8577c478bd9Sstevel@tonic-gate l = lwp_deathrow; 8587c478bd9Sstevel@tonic-gate thread_deathrow = NULL; 8597c478bd9Sstevel@tonic-gate lwp_deathrow = NULL; 8607c478bd9Sstevel@tonic-gate thread_reapcnt = 0; 8617c478bd9Sstevel@tonic-gate lwp_reapcnt = 0; 8627c478bd9Sstevel@tonic-gate mutex_exit(&reaplock); 8637c478bd9Sstevel@tonic-gate 8647c478bd9Sstevel@tonic-gate /* 8657c478bd9Sstevel@tonic-gate * Reap threads 8667c478bd9Sstevel@tonic-gate */ 8677c478bd9Sstevel@tonic-gate thread_reap_list(t); 8687c478bd9Sstevel@tonic-gate 8697c478bd9Sstevel@tonic-gate /* 8707c478bd9Sstevel@tonic-gate * Reap lwps 8717c478bd9Sstevel@tonic-gate */ 8727c478bd9Sstevel@tonic-gate thread_reap_list(l); 8737c478bd9Sstevel@tonic-gate } 8747c478bd9Sstevel@tonic-gate } 8757c478bd9Sstevel@tonic-gate 8767c478bd9Sstevel@tonic-gate /* 8777c478bd9Sstevel@tonic-gate * This is called by resume() to put a zombie thread onto deathrow. 8787c478bd9Sstevel@tonic-gate * The thread's state is changed to TS_FREE to indicate that is reapable. 8797c478bd9Sstevel@tonic-gate * This is called from the idle thread so it must not block (just spin). 8807c478bd9Sstevel@tonic-gate */ 8817c478bd9Sstevel@tonic-gate void 8827c478bd9Sstevel@tonic-gate reapq_add(kthread_t *t) 8837c478bd9Sstevel@tonic-gate { 8847c478bd9Sstevel@tonic-gate mutex_enter(&reaplock); 8857c478bd9Sstevel@tonic-gate 8867c478bd9Sstevel@tonic-gate /* 8877c478bd9Sstevel@tonic-gate * lwp_deathrow contains only threads with lwp linkage 8887c478bd9Sstevel@tonic-gate * that are of the default stacksize. Anything else goes 8897c478bd9Sstevel@tonic-gate * on thread_deathrow. 8907c478bd9Sstevel@tonic-gate */ 8917c478bd9Sstevel@tonic-gate if (ttolwp(t) && (t->t_flag & T_DFLTSTK)) { 8927c478bd9Sstevel@tonic-gate t->t_forw = lwp_deathrow; 8937c478bd9Sstevel@tonic-gate lwp_deathrow = t; 8947c478bd9Sstevel@tonic-gate lwp_reapcnt++; 8957c478bd9Sstevel@tonic-gate } else { 8967c478bd9Sstevel@tonic-gate t->t_forw = thread_deathrow; 8977c478bd9Sstevel@tonic-gate thread_deathrow = t; 8987c478bd9Sstevel@tonic-gate thread_reapcnt++; 8997c478bd9Sstevel@tonic-gate } 9007c478bd9Sstevel@tonic-gate if (lwp_reapcnt + thread_reapcnt > reaplimit) 9017c478bd9Sstevel@tonic-gate cv_signal(&reaper_cv); /* wake the reaper */ 9027c478bd9Sstevel@tonic-gate t->t_state = TS_FREE; 9037c478bd9Sstevel@tonic-gate lock_clear(&t->t_lock); 904*2192a40fSqiao 905*2192a40fSqiao /* 906*2192a40fSqiao * Before we return, we need to grab and drop the thread lock for 907*2192a40fSqiao * the dead thread. At this point, the current thread is the idle 908*2192a40fSqiao * thread, and the dead thread's CPU lock points to the current 909*2192a40fSqiao * CPU -- and we must grab and drop the lock to synchronize with 910*2192a40fSqiao * a racing thread walking a blocking chain that the zombie thread 911*2192a40fSqiao * was recently in. By this point, that blocking chain is (by 912*2192a40fSqiao * definition) stale: the dead thread is not holding any locks, and 913*2192a40fSqiao * is therefore not in any blocking chains -- but if we do not regrab 914*2192a40fSqiao * our lock before freeing the dead thread's data structures, the 915*2192a40fSqiao * thread walking the (stale) blocking chain will die on memory 916*2192a40fSqiao * corruption when it attempts to drop the dead thread's lock. We 917*2192a40fSqiao * only need do this once because there is no way for the dead thread 918*2192a40fSqiao * to ever again be on a blocking chain: once we have grabbed and 919*2192a40fSqiao * dropped the thread lock, we are guaranteed that anyone that could 920*2192a40fSqiao * have seen this thread in a blocking chain can no longer see it. 921*2192a40fSqiao */ 922*2192a40fSqiao thread_lock(t); 923*2192a40fSqiao thread_unlock(t); 924*2192a40fSqiao 9257c478bd9Sstevel@tonic-gate mutex_exit(&reaplock); 9267c478bd9Sstevel@tonic-gate } 9277c478bd9Sstevel@tonic-gate 9287c478bd9Sstevel@tonic-gate /* 9290baeff3dSrab * Install thread context ops for the current thread. 9307c478bd9Sstevel@tonic-gate */ 9317c478bd9Sstevel@tonic-gate void 9327c478bd9Sstevel@tonic-gate installctx( 9337c478bd9Sstevel@tonic-gate kthread_t *t, 9347c478bd9Sstevel@tonic-gate void *arg, 9357c478bd9Sstevel@tonic-gate void (*save)(void *), 9367c478bd9Sstevel@tonic-gate void (*restore)(void *), 9377c478bd9Sstevel@tonic-gate void (*fork)(void *, void *), 9387c478bd9Sstevel@tonic-gate void (*lwp_create)(void *, void *), 9397c478bd9Sstevel@tonic-gate void (*exit)(void *), 9407c478bd9Sstevel@tonic-gate void (*free)(void *, int)) 9417c478bd9Sstevel@tonic-gate { 9427c478bd9Sstevel@tonic-gate struct ctxop *ctx; 9437c478bd9Sstevel@tonic-gate 9447c478bd9Sstevel@tonic-gate ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP); 9457c478bd9Sstevel@tonic-gate ctx->save_op = save; 9467c478bd9Sstevel@tonic-gate ctx->restore_op = restore; 9477c478bd9Sstevel@tonic-gate ctx->fork_op = fork; 9487c478bd9Sstevel@tonic-gate ctx->lwp_create_op = lwp_create; 9497c478bd9Sstevel@tonic-gate ctx->exit_op = exit; 9507c478bd9Sstevel@tonic-gate ctx->free_op = free; 9517c478bd9Sstevel@tonic-gate ctx->arg = arg; 9527c478bd9Sstevel@tonic-gate ctx->next = t->t_ctx; 9537c478bd9Sstevel@tonic-gate t->t_ctx = ctx; 9547c478bd9Sstevel@tonic-gate } 9557c478bd9Sstevel@tonic-gate 9567c478bd9Sstevel@tonic-gate /* 9575a64ecfaStrevtom * Remove the thread context ops from a thread. 9587c478bd9Sstevel@tonic-gate */ 9597c478bd9Sstevel@tonic-gate int 9607c478bd9Sstevel@tonic-gate removectx( 9617c478bd9Sstevel@tonic-gate kthread_t *t, 9627c478bd9Sstevel@tonic-gate void *arg, 9637c478bd9Sstevel@tonic-gate void (*save)(void *), 9647c478bd9Sstevel@tonic-gate void (*restore)(void *), 9657c478bd9Sstevel@tonic-gate void (*fork)(void *, void *), 9667c478bd9Sstevel@tonic-gate void (*lwp_create)(void *, void *), 9677c478bd9Sstevel@tonic-gate void (*exit)(void *), 9687c478bd9Sstevel@tonic-gate void (*free)(void *, int)) 9697c478bd9Sstevel@tonic-gate { 9707c478bd9Sstevel@tonic-gate struct ctxop *ctx, *prev_ctx; 9717c478bd9Sstevel@tonic-gate 9725a64ecfaStrevtom /* 9735a64ecfaStrevtom * The incoming kthread_t (which is the thread for which the 9745a64ecfaStrevtom * context ops will be removed) should be one of the following: 9755a64ecfaStrevtom * 9765a64ecfaStrevtom * a) the current thread, 9775a64ecfaStrevtom * 9785a64ecfaStrevtom * b) a thread of a process that's being forked (SIDL), 9795a64ecfaStrevtom * 9805a64ecfaStrevtom * c) a thread that belongs to the same process as the current 9815a64ecfaStrevtom * thread and for which the current thread is the agent thread, 9825a64ecfaStrevtom * 9835a64ecfaStrevtom * d) a thread that is TS_STOPPED which is indicative of it 9845a64ecfaStrevtom * being (if curthread is not an agent) a thread being created 9855a64ecfaStrevtom * as part of an lwp creation. 9865a64ecfaStrevtom */ 9877c478bd9Sstevel@tonic-gate ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL || 9883cf60c08Sdm120769 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 9897c478bd9Sstevel@tonic-gate 9905a64ecfaStrevtom /* 9915a64ecfaStrevtom * Serialize modifications to t->t_ctx to prevent the agent thread 9925a64ecfaStrevtom * and the target thread from racing with each other during lwp exit. 9935a64ecfaStrevtom */ 9945a64ecfaStrevtom mutex_enter(&t->t_ctx_lock); 9957c478bd9Sstevel@tonic-gate prev_ctx = NULL; 9967c478bd9Sstevel@tonic-gate for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) { 9977c478bd9Sstevel@tonic-gate if (ctx->save_op == save && ctx->restore_op == restore && 9987c478bd9Sstevel@tonic-gate ctx->fork_op == fork && ctx->lwp_create_op == lwp_create && 9997c478bd9Sstevel@tonic-gate ctx->exit_op == exit && ctx->free_op == free && 10007c478bd9Sstevel@tonic-gate ctx->arg == arg) { 10017c478bd9Sstevel@tonic-gate if (prev_ctx) 10027c478bd9Sstevel@tonic-gate prev_ctx->next = ctx->next; 10037c478bd9Sstevel@tonic-gate else 10047c478bd9Sstevel@tonic-gate t->t_ctx = ctx->next; 10055a64ecfaStrevtom mutex_exit(&t->t_ctx_lock); 10067c478bd9Sstevel@tonic-gate if (ctx->free_op != NULL) 10077c478bd9Sstevel@tonic-gate (ctx->free_op)(ctx->arg, 0); 10087c478bd9Sstevel@tonic-gate kmem_free(ctx, sizeof (struct ctxop)); 10097c478bd9Sstevel@tonic-gate return (1); 10107c478bd9Sstevel@tonic-gate } 10117c478bd9Sstevel@tonic-gate prev_ctx = ctx; 10127c478bd9Sstevel@tonic-gate } 10135a64ecfaStrevtom mutex_exit(&t->t_ctx_lock); 10145a64ecfaStrevtom 10157c478bd9Sstevel@tonic-gate return (0); 10167c478bd9Sstevel@tonic-gate } 10177c478bd9Sstevel@tonic-gate 10187c478bd9Sstevel@tonic-gate void 10197c478bd9Sstevel@tonic-gate savectx(kthread_t *t) 10207c478bd9Sstevel@tonic-gate { 10217c478bd9Sstevel@tonic-gate struct ctxop *ctx; 10227c478bd9Sstevel@tonic-gate 10237c478bd9Sstevel@tonic-gate ASSERT(t == curthread); 10247c478bd9Sstevel@tonic-gate for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next) 10257c478bd9Sstevel@tonic-gate if (ctx->save_op != NULL) 10267c478bd9Sstevel@tonic-gate (ctx->save_op)(ctx->arg); 10277c478bd9Sstevel@tonic-gate } 10287c478bd9Sstevel@tonic-gate 10297c478bd9Sstevel@tonic-gate void 10307c478bd9Sstevel@tonic-gate restorectx(kthread_t *t) 10317c478bd9Sstevel@tonic-gate { 10327c478bd9Sstevel@tonic-gate struct ctxop *ctx; 10337c478bd9Sstevel@tonic-gate 10347c478bd9Sstevel@tonic-gate ASSERT(t == curthread); 10357c478bd9Sstevel@tonic-gate for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next) 10367c478bd9Sstevel@tonic-gate if (ctx->restore_op != NULL) 10377c478bd9Sstevel@tonic-gate (ctx->restore_op)(ctx->arg); 10387c478bd9Sstevel@tonic-gate } 10397c478bd9Sstevel@tonic-gate 10407c478bd9Sstevel@tonic-gate void 10417c478bd9Sstevel@tonic-gate forkctx(kthread_t *t, kthread_t *ct) 10427c478bd9Sstevel@tonic-gate { 10437c478bd9Sstevel@tonic-gate struct ctxop *ctx; 10447c478bd9Sstevel@tonic-gate 10457c478bd9Sstevel@tonic-gate for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 10467c478bd9Sstevel@tonic-gate if (ctx->fork_op != NULL) 10477c478bd9Sstevel@tonic-gate (ctx->fork_op)(t, ct); 10487c478bd9Sstevel@tonic-gate } 10497c478bd9Sstevel@tonic-gate 10507c478bd9Sstevel@tonic-gate /* 10517c478bd9Sstevel@tonic-gate * Note that this operator is only invoked via the _lwp_create 10527c478bd9Sstevel@tonic-gate * system call. The system may have other reasons to create lwps 10537c478bd9Sstevel@tonic-gate * e.g. the agent lwp or the doors unreferenced lwp. 10547c478bd9Sstevel@tonic-gate */ 10557c478bd9Sstevel@tonic-gate void 10567c478bd9Sstevel@tonic-gate lwp_createctx(kthread_t *t, kthread_t *ct) 10577c478bd9Sstevel@tonic-gate { 10587c478bd9Sstevel@tonic-gate struct ctxop *ctx; 10597c478bd9Sstevel@tonic-gate 10607c478bd9Sstevel@tonic-gate for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 10617c478bd9Sstevel@tonic-gate if (ctx->lwp_create_op != NULL) 10627c478bd9Sstevel@tonic-gate (ctx->lwp_create_op)(t, ct); 10637c478bd9Sstevel@tonic-gate } 10647c478bd9Sstevel@tonic-gate 10657c478bd9Sstevel@tonic-gate /* 10667c478bd9Sstevel@tonic-gate * exitctx is called from thread_exit() and lwp_exit() to perform any actions 10677c478bd9Sstevel@tonic-gate * needed when the thread/LWP leaves the processor for the last time. This 10687c478bd9Sstevel@tonic-gate * routine is not intended to deal with freeing memory; freectx() is used for 10697c478bd9Sstevel@tonic-gate * that purpose during thread_free(). This routine is provided to allow for 10707c478bd9Sstevel@tonic-gate * clean-up that can't wait until thread_free(). 10717c478bd9Sstevel@tonic-gate */ 10727c478bd9Sstevel@tonic-gate void 10737c478bd9Sstevel@tonic-gate exitctx(kthread_t *t) 10747c478bd9Sstevel@tonic-gate { 10757c478bd9Sstevel@tonic-gate struct ctxop *ctx; 10767c478bd9Sstevel@tonic-gate 10777c478bd9Sstevel@tonic-gate for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 10787c478bd9Sstevel@tonic-gate if (ctx->exit_op != NULL) 10797c478bd9Sstevel@tonic-gate (ctx->exit_op)(t); 10807c478bd9Sstevel@tonic-gate } 10817c478bd9Sstevel@tonic-gate 10827c478bd9Sstevel@tonic-gate /* 10837c478bd9Sstevel@tonic-gate * freectx is called from thread_free() and exec() to get 10840baeff3dSrab * rid of old thread context ops. 10857c478bd9Sstevel@tonic-gate */ 10867c478bd9Sstevel@tonic-gate void 10877c478bd9Sstevel@tonic-gate freectx(kthread_t *t, int isexec) 10887c478bd9Sstevel@tonic-gate { 10897c478bd9Sstevel@tonic-gate struct ctxop *ctx; 10907c478bd9Sstevel@tonic-gate 10917c478bd9Sstevel@tonic-gate while ((ctx = t->t_ctx) != NULL) { 10927c478bd9Sstevel@tonic-gate t->t_ctx = ctx->next; 10937c478bd9Sstevel@tonic-gate if (ctx->free_op != NULL) 10947c478bd9Sstevel@tonic-gate (ctx->free_op)(ctx->arg, isexec); 10957c478bd9Sstevel@tonic-gate kmem_free(ctx, sizeof (struct ctxop)); 10967c478bd9Sstevel@tonic-gate } 10977c478bd9Sstevel@tonic-gate } 10987c478bd9Sstevel@tonic-gate 10997c478bd9Sstevel@tonic-gate /* 11007c478bd9Sstevel@tonic-gate * Set the thread running; arrange for it to be swapped in if necessary. 11017c478bd9Sstevel@tonic-gate */ 11027c478bd9Sstevel@tonic-gate void 11037c478bd9Sstevel@tonic-gate setrun_locked(kthread_t *t) 11047c478bd9Sstevel@tonic-gate { 11057c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(t)); 11067c478bd9Sstevel@tonic-gate if (t->t_state == TS_SLEEP) { 11077c478bd9Sstevel@tonic-gate /* 11087c478bd9Sstevel@tonic-gate * Take off sleep queue. 11097c478bd9Sstevel@tonic-gate */ 11107c478bd9Sstevel@tonic-gate SOBJ_UNSLEEP(t->t_sobj_ops, t); 11117c478bd9Sstevel@tonic-gate } else if (t->t_state & (TS_RUN | TS_ONPROC)) { 11127c478bd9Sstevel@tonic-gate /* 11137c478bd9Sstevel@tonic-gate * Already on dispatcher queue. 11147c478bd9Sstevel@tonic-gate */ 11157c478bd9Sstevel@tonic-gate return; 1116c97ad5cdSakolb } else if (t->t_state == TS_WAIT) { 1117c97ad5cdSakolb waitq_setrun(t); 11187c478bd9Sstevel@tonic-gate } else if (t->t_state == TS_STOPPED) { 11197c478bd9Sstevel@tonic-gate /* 11207c478bd9Sstevel@tonic-gate * All of the sending of SIGCONT (TC_XSTART) and /proc 11217c478bd9Sstevel@tonic-gate * (TC_PSTART) and lwp_continue() (TC_CSTART) must have 11227c478bd9Sstevel@tonic-gate * requested that the thread be run. 11237c478bd9Sstevel@tonic-gate * Just calling setrun() is not sufficient to set a stopped 11247c478bd9Sstevel@tonic-gate * thread running. TP_TXSTART is always set if the thread 11257c478bd9Sstevel@tonic-gate * is not stopped by a jobcontrol stop signal. 11267c478bd9Sstevel@tonic-gate * TP_TPSTART is always set if /proc is not controlling it. 11277c478bd9Sstevel@tonic-gate * TP_TCSTART is always set if lwp_suspend() didn't stop it. 11287c478bd9Sstevel@tonic-gate * The thread won't be stopped unless one of these 11297c478bd9Sstevel@tonic-gate * three mechanisms did it. 11307c478bd9Sstevel@tonic-gate * 11317c478bd9Sstevel@tonic-gate * These flags must be set before calling setrun_locked(t). 11327c478bd9Sstevel@tonic-gate * They can't be passed as arguments because the streams 11337c478bd9Sstevel@tonic-gate * code calls setrun() indirectly and the mechanism for 11347c478bd9Sstevel@tonic-gate * doing so admits only one argument. Note that the 11357c478bd9Sstevel@tonic-gate * thread must be locked in order to change t_schedflags. 11367c478bd9Sstevel@tonic-gate */ 11377c478bd9Sstevel@tonic-gate if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART) 11387c478bd9Sstevel@tonic-gate return; 11397c478bd9Sstevel@tonic-gate /* 11407c478bd9Sstevel@tonic-gate * Process is no longer stopped (a thread is running). 11417c478bd9Sstevel@tonic-gate */ 11427c478bd9Sstevel@tonic-gate t->t_whystop = 0; 11437c478bd9Sstevel@tonic-gate t->t_whatstop = 0; 11447c478bd9Sstevel@tonic-gate /* 11457c478bd9Sstevel@tonic-gate * Strictly speaking, we do not have to clear these 11467c478bd9Sstevel@tonic-gate * flags here; they are cleared on entry to stop(). 11477c478bd9Sstevel@tonic-gate * However, they are confusing when doing kernel 11487c478bd9Sstevel@tonic-gate * debugging or when they are revealed by ps(1). 11497c478bd9Sstevel@tonic-gate */ 11507c478bd9Sstevel@tonic-gate t->t_schedflag &= ~TS_ALLSTART; 11517c478bd9Sstevel@tonic-gate THREAD_TRANSITION(t); /* drop stopped-thread lock */ 11527c478bd9Sstevel@tonic-gate ASSERT(t->t_lockp == &transition_lock); 11537c478bd9Sstevel@tonic-gate ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); 11547c478bd9Sstevel@tonic-gate /* 11557c478bd9Sstevel@tonic-gate * Let the class put the process on the dispatcher queue. 11567c478bd9Sstevel@tonic-gate */ 11577c478bd9Sstevel@tonic-gate CL_SETRUN(t); 11587c478bd9Sstevel@tonic-gate } 11597c478bd9Sstevel@tonic-gate } 11607c478bd9Sstevel@tonic-gate 11617c478bd9Sstevel@tonic-gate void 11627c478bd9Sstevel@tonic-gate setrun(kthread_t *t) 11637c478bd9Sstevel@tonic-gate { 11647c478bd9Sstevel@tonic-gate thread_lock(t); 11657c478bd9Sstevel@tonic-gate setrun_locked(t); 11667c478bd9Sstevel@tonic-gate thread_unlock(t); 11677c478bd9Sstevel@tonic-gate } 11687c478bd9Sstevel@tonic-gate 11697c478bd9Sstevel@tonic-gate /* 11707c478bd9Sstevel@tonic-gate * Unpin an interrupted thread. 11717c478bd9Sstevel@tonic-gate * When an interrupt occurs, the interrupt is handled on the stack 11727c478bd9Sstevel@tonic-gate * of an interrupt thread, taken from a pool linked to the CPU structure. 11737c478bd9Sstevel@tonic-gate * 11747c478bd9Sstevel@tonic-gate * When swtch() is switching away from an interrupt thread because it 11757c478bd9Sstevel@tonic-gate * blocked or was preempted, this routine is called to complete the 11767c478bd9Sstevel@tonic-gate * saving of the interrupted thread state, and returns the interrupted 11777c478bd9Sstevel@tonic-gate * thread pointer so it may be resumed. 11787c478bd9Sstevel@tonic-gate * 11797c478bd9Sstevel@tonic-gate * Called by swtch() only at high spl. 11807c478bd9Sstevel@tonic-gate */ 11817c478bd9Sstevel@tonic-gate kthread_t * 11827c478bd9Sstevel@tonic-gate thread_unpin() 11837c478bd9Sstevel@tonic-gate { 11847c478bd9Sstevel@tonic-gate kthread_t *t = curthread; /* current thread */ 11857c478bd9Sstevel@tonic-gate kthread_t *itp; /* interrupted thread */ 11867c478bd9Sstevel@tonic-gate int i; /* interrupt level */ 11877c478bd9Sstevel@tonic-gate extern int intr_passivate(); 11887c478bd9Sstevel@tonic-gate 11897c478bd9Sstevel@tonic-gate ASSERT(t->t_intr != NULL); 11907c478bd9Sstevel@tonic-gate 11917c478bd9Sstevel@tonic-gate itp = t->t_intr; /* interrupted thread */ 11927c478bd9Sstevel@tonic-gate t->t_intr = NULL; /* clear interrupt ptr */ 11937c478bd9Sstevel@tonic-gate 11947c478bd9Sstevel@tonic-gate /* 11957c478bd9Sstevel@tonic-gate * Get state from interrupt thread for the one 11967c478bd9Sstevel@tonic-gate * it interrupted. 11977c478bd9Sstevel@tonic-gate */ 11987c478bd9Sstevel@tonic-gate 11997c478bd9Sstevel@tonic-gate i = intr_passivate(t, itp); 12007c478bd9Sstevel@tonic-gate 12017c478bd9Sstevel@tonic-gate TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE, 12027c478bd9Sstevel@tonic-gate "intr_passivate:level %d curthread %p (%T) ithread %p (%T)", 12037c478bd9Sstevel@tonic-gate i, t, t, itp, itp); 12047c478bd9Sstevel@tonic-gate 12057c478bd9Sstevel@tonic-gate /* 12067c478bd9Sstevel@tonic-gate * Dissociate the current thread from the interrupted thread's LWP. 12077c478bd9Sstevel@tonic-gate */ 12087c478bd9Sstevel@tonic-gate t->t_lwp = NULL; 12097c478bd9Sstevel@tonic-gate 12107c478bd9Sstevel@tonic-gate /* 12117c478bd9Sstevel@tonic-gate * Interrupt handlers above the level that spinlocks block must 12127c478bd9Sstevel@tonic-gate * not block. 12137c478bd9Sstevel@tonic-gate */ 12147c478bd9Sstevel@tonic-gate #if DEBUG 12157c478bd9Sstevel@tonic-gate if (i < 0 || i > LOCK_LEVEL) 12167c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i); 12177c478bd9Sstevel@tonic-gate #endif 12187c478bd9Sstevel@tonic-gate 12197c478bd9Sstevel@tonic-gate /* 12207c478bd9Sstevel@tonic-gate * Compute the CPU's base interrupt level based on the active 12217c478bd9Sstevel@tonic-gate * interrupts. 12227c478bd9Sstevel@tonic-gate */ 12237c478bd9Sstevel@tonic-gate ASSERT(CPU->cpu_intr_actv & (1 << i)); 12247c478bd9Sstevel@tonic-gate set_base_spl(); 12257c478bd9Sstevel@tonic-gate 12267c478bd9Sstevel@tonic-gate return (itp); 12277c478bd9Sstevel@tonic-gate } 12287c478bd9Sstevel@tonic-gate 12297c478bd9Sstevel@tonic-gate /* 12307c478bd9Sstevel@tonic-gate * Create and initialize an interrupt thread. 12317c478bd9Sstevel@tonic-gate * Returns non-zero on error. 12327c478bd9Sstevel@tonic-gate * Called at spl7() or better. 12337c478bd9Sstevel@tonic-gate */ 12347c478bd9Sstevel@tonic-gate void 12357c478bd9Sstevel@tonic-gate thread_create_intr(struct cpu *cp) 12367c478bd9Sstevel@tonic-gate { 12377c478bd9Sstevel@tonic-gate kthread_t *tp; 12387c478bd9Sstevel@tonic-gate 12397c478bd9Sstevel@tonic-gate tp = thread_create(NULL, 0, 12407c478bd9Sstevel@tonic-gate (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0); 12417c478bd9Sstevel@tonic-gate 12427c478bd9Sstevel@tonic-gate /* 12437c478bd9Sstevel@tonic-gate * Set the thread in the TS_FREE state. The state will change 12447c478bd9Sstevel@tonic-gate * to TS_ONPROC only while the interrupt is active. Think of these 12457c478bd9Sstevel@tonic-gate * as being on a private free list for the CPU. Being TS_FREE keeps 12467c478bd9Sstevel@tonic-gate * inactive interrupt threads out of debugger thread lists. 12477c478bd9Sstevel@tonic-gate * 12487c478bd9Sstevel@tonic-gate * We cannot call thread_create with TS_FREE because of the current 12497c478bd9Sstevel@tonic-gate * checks there for ONPROC. Fix this when thread_create takes flags. 12507c478bd9Sstevel@tonic-gate */ 12517c478bd9Sstevel@tonic-gate THREAD_FREEINTR(tp, cp); 12527c478bd9Sstevel@tonic-gate 12537c478bd9Sstevel@tonic-gate /* 12547c478bd9Sstevel@tonic-gate * Nobody should ever reference the credentials of an interrupt 12557c478bd9Sstevel@tonic-gate * thread so make it NULL to catch any such references. 12567c478bd9Sstevel@tonic-gate */ 12577c478bd9Sstevel@tonic-gate tp->t_cred = NULL; 12587c478bd9Sstevel@tonic-gate tp->t_flag |= T_INTR_THREAD; 12597c478bd9Sstevel@tonic-gate tp->t_cpu = cp; 12607c478bd9Sstevel@tonic-gate tp->t_bound_cpu = cp; 12617c478bd9Sstevel@tonic-gate tp->t_disp_queue = cp->cpu_disp; 12627c478bd9Sstevel@tonic-gate tp->t_affinitycnt = 1; 12637c478bd9Sstevel@tonic-gate tp->t_preempt = 1; 12647c478bd9Sstevel@tonic-gate 12657c478bd9Sstevel@tonic-gate /* 12667c478bd9Sstevel@tonic-gate * Don't make a user-requested binding on this thread so that 12677c478bd9Sstevel@tonic-gate * the processor can be offlined. 12687c478bd9Sstevel@tonic-gate */ 12697c478bd9Sstevel@tonic-gate tp->t_bind_cpu = PBIND_NONE; /* no USER-requested binding */ 12707c478bd9Sstevel@tonic-gate tp->t_bind_pset = PS_NONE; 12717c478bd9Sstevel@tonic-gate 12727c478bd9Sstevel@tonic-gate #if defined(__i386) || defined(__amd64) 12737c478bd9Sstevel@tonic-gate tp->t_stk -= STACK_ALIGN; 12747c478bd9Sstevel@tonic-gate *(tp->t_stk) = 0; /* terminate intr thread stack */ 12757c478bd9Sstevel@tonic-gate #endif 12767c478bd9Sstevel@tonic-gate 12777c478bd9Sstevel@tonic-gate /* 12787c478bd9Sstevel@tonic-gate * Link onto CPU's interrupt pool. 12797c478bd9Sstevel@tonic-gate */ 12807c478bd9Sstevel@tonic-gate tp->t_link = cp->cpu_intr_thread; 12817c478bd9Sstevel@tonic-gate cp->cpu_intr_thread = tp; 12827c478bd9Sstevel@tonic-gate } 12837c478bd9Sstevel@tonic-gate 12847c478bd9Sstevel@tonic-gate /* 12857c478bd9Sstevel@tonic-gate * TSD -- THREAD SPECIFIC DATA 12867c478bd9Sstevel@tonic-gate */ 12877c478bd9Sstevel@tonic-gate static kmutex_t tsd_mutex; /* linked list spin lock */ 12887c478bd9Sstevel@tonic-gate static uint_t tsd_nkeys; /* size of destructor array */ 12897c478bd9Sstevel@tonic-gate /* per-key destructor funcs */ 12907c478bd9Sstevel@tonic-gate static void (**tsd_destructor)(void *); 12917c478bd9Sstevel@tonic-gate /* list of tsd_thread's */ 12927c478bd9Sstevel@tonic-gate static struct tsd_thread *tsd_list; 12937c478bd9Sstevel@tonic-gate 12947c478bd9Sstevel@tonic-gate /* 12957c478bd9Sstevel@tonic-gate * Default destructor 12967c478bd9Sstevel@tonic-gate * Needed because NULL destructor means that the key is unused 12977c478bd9Sstevel@tonic-gate */ 12987c478bd9Sstevel@tonic-gate /* ARGSUSED */ 12997c478bd9Sstevel@tonic-gate void 13007c478bd9Sstevel@tonic-gate tsd_defaultdestructor(void *value) 13017c478bd9Sstevel@tonic-gate {} 13027c478bd9Sstevel@tonic-gate 13037c478bd9Sstevel@tonic-gate /* 13047c478bd9Sstevel@tonic-gate * Create a key (index into per thread array) 13057c478bd9Sstevel@tonic-gate * Locks out tsd_create, tsd_destroy, and tsd_exit 13067c478bd9Sstevel@tonic-gate * May allocate memory with lock held 13077c478bd9Sstevel@tonic-gate */ 13087c478bd9Sstevel@tonic-gate void 13097c478bd9Sstevel@tonic-gate tsd_create(uint_t *keyp, void (*destructor)(void *)) 13107c478bd9Sstevel@tonic-gate { 13117c478bd9Sstevel@tonic-gate int i; 13127c478bd9Sstevel@tonic-gate uint_t nkeys; 13137c478bd9Sstevel@tonic-gate 13147c478bd9Sstevel@tonic-gate /* 13157c478bd9Sstevel@tonic-gate * if key is allocated, do nothing 13167c478bd9Sstevel@tonic-gate */ 13177c478bd9Sstevel@tonic-gate mutex_enter(&tsd_mutex); 13187c478bd9Sstevel@tonic-gate if (*keyp) { 13197c478bd9Sstevel@tonic-gate mutex_exit(&tsd_mutex); 13207c478bd9Sstevel@tonic-gate return; 13217c478bd9Sstevel@tonic-gate } 13227c478bd9Sstevel@tonic-gate /* 13237c478bd9Sstevel@tonic-gate * find an unused key 13247c478bd9Sstevel@tonic-gate */ 13257c478bd9Sstevel@tonic-gate if (destructor == NULL) 13267c478bd9Sstevel@tonic-gate destructor = tsd_defaultdestructor; 13277c478bd9Sstevel@tonic-gate 13287c478bd9Sstevel@tonic-gate for (i = 0; i < tsd_nkeys; ++i) 13297c478bd9Sstevel@tonic-gate if (tsd_destructor[i] == NULL) 13307c478bd9Sstevel@tonic-gate break; 13317c478bd9Sstevel@tonic-gate 13327c478bd9Sstevel@tonic-gate /* 13337c478bd9Sstevel@tonic-gate * if no unused keys, increase the size of the destructor array 13347c478bd9Sstevel@tonic-gate */ 13357c478bd9Sstevel@tonic-gate if (i == tsd_nkeys) { 13367c478bd9Sstevel@tonic-gate if ((nkeys = (tsd_nkeys << 1)) == 0) 13377c478bd9Sstevel@tonic-gate nkeys = 1; 13387c478bd9Sstevel@tonic-gate tsd_destructor = 13397c478bd9Sstevel@tonic-gate (void (**)(void *))tsd_realloc((void *)tsd_destructor, 13407c478bd9Sstevel@tonic-gate (size_t)(tsd_nkeys * sizeof (void (*)(void *))), 13417c478bd9Sstevel@tonic-gate (size_t)(nkeys * sizeof (void (*)(void *)))); 13427c478bd9Sstevel@tonic-gate tsd_nkeys = nkeys; 13437c478bd9Sstevel@tonic-gate } 13447c478bd9Sstevel@tonic-gate 13457c478bd9Sstevel@tonic-gate /* 13467c478bd9Sstevel@tonic-gate * allocate the next available unused key 13477c478bd9Sstevel@tonic-gate */ 13487c478bd9Sstevel@tonic-gate tsd_destructor[i] = destructor; 13497c478bd9Sstevel@tonic-gate *keyp = i + 1; 13507c478bd9Sstevel@tonic-gate mutex_exit(&tsd_mutex); 13517c478bd9Sstevel@tonic-gate } 13527c478bd9Sstevel@tonic-gate 13537c478bd9Sstevel@tonic-gate /* 13547c478bd9Sstevel@tonic-gate * Destroy a key -- this is for unloadable modules 13557c478bd9Sstevel@tonic-gate * 13567c478bd9Sstevel@tonic-gate * Assumes that the caller is preventing tsd_set and tsd_get 13577c478bd9Sstevel@tonic-gate * Locks out tsd_create, tsd_destroy, and tsd_exit 13587c478bd9Sstevel@tonic-gate * May free memory with lock held 13597c478bd9Sstevel@tonic-gate */ 13607c478bd9Sstevel@tonic-gate void 13617c478bd9Sstevel@tonic-gate tsd_destroy(uint_t *keyp) 13627c478bd9Sstevel@tonic-gate { 13637c478bd9Sstevel@tonic-gate uint_t key; 13647c478bd9Sstevel@tonic-gate struct tsd_thread *tsd; 13657c478bd9Sstevel@tonic-gate 13667c478bd9Sstevel@tonic-gate /* 13677c478bd9Sstevel@tonic-gate * protect the key namespace and our destructor lists 13687c478bd9Sstevel@tonic-gate */ 13697c478bd9Sstevel@tonic-gate mutex_enter(&tsd_mutex); 13707c478bd9Sstevel@tonic-gate key = *keyp; 13717c478bd9Sstevel@tonic-gate *keyp = 0; 13727c478bd9Sstevel@tonic-gate 13737c478bd9Sstevel@tonic-gate ASSERT(key <= tsd_nkeys); 13747c478bd9Sstevel@tonic-gate 13757c478bd9Sstevel@tonic-gate /* 13767c478bd9Sstevel@tonic-gate * if the key is valid 13777c478bd9Sstevel@tonic-gate */ 13787c478bd9Sstevel@tonic-gate if (key != 0) { 13797c478bd9Sstevel@tonic-gate uint_t k = key - 1; 13807c478bd9Sstevel@tonic-gate /* 13817c478bd9Sstevel@tonic-gate * for every thread with TSD, call key's destructor 13827c478bd9Sstevel@tonic-gate */ 13837c478bd9Sstevel@tonic-gate for (tsd = tsd_list; tsd; tsd = tsd->ts_next) { 13847c478bd9Sstevel@tonic-gate /* 13857c478bd9Sstevel@tonic-gate * no TSD for key in this thread 13867c478bd9Sstevel@tonic-gate */ 13877c478bd9Sstevel@tonic-gate if (key > tsd->ts_nkeys) 13887c478bd9Sstevel@tonic-gate continue; 13897c478bd9Sstevel@tonic-gate /* 13907c478bd9Sstevel@tonic-gate * call destructor for key 13917c478bd9Sstevel@tonic-gate */ 13927c478bd9Sstevel@tonic-gate if (tsd->ts_value[k] && tsd_destructor[k]) 13937c478bd9Sstevel@tonic-gate (*tsd_destructor[k])(tsd->ts_value[k]); 13947c478bd9Sstevel@tonic-gate /* 13957c478bd9Sstevel@tonic-gate * reset value for key 13967c478bd9Sstevel@tonic-gate */ 13977c478bd9Sstevel@tonic-gate tsd->ts_value[k] = NULL; 13987c478bd9Sstevel@tonic-gate } 13997c478bd9Sstevel@tonic-gate /* 14007c478bd9Sstevel@tonic-gate * actually free the key (NULL destructor == unused) 14017c478bd9Sstevel@tonic-gate */ 14027c478bd9Sstevel@tonic-gate tsd_destructor[k] = NULL; 14037c478bd9Sstevel@tonic-gate } 14047c478bd9Sstevel@tonic-gate 14057c478bd9Sstevel@tonic-gate mutex_exit(&tsd_mutex); 14067c478bd9Sstevel@tonic-gate } 14077c478bd9Sstevel@tonic-gate 14087c478bd9Sstevel@tonic-gate /* 14097c478bd9Sstevel@tonic-gate * Quickly return the per thread value that was stored with the specified key 14107c478bd9Sstevel@tonic-gate * Assumes the caller is protecting key from tsd_create and tsd_destroy 14117c478bd9Sstevel@tonic-gate */ 14127c478bd9Sstevel@tonic-gate void * 14137c478bd9Sstevel@tonic-gate tsd_get(uint_t key) 14147c478bd9Sstevel@tonic-gate { 14157c478bd9Sstevel@tonic-gate return (tsd_agent_get(curthread, key)); 14167c478bd9Sstevel@tonic-gate } 14177c478bd9Sstevel@tonic-gate 14187c478bd9Sstevel@tonic-gate /* 14197c478bd9Sstevel@tonic-gate * Set a per thread value indexed with the specified key 14207c478bd9Sstevel@tonic-gate */ 14217c478bd9Sstevel@tonic-gate int 14227c478bd9Sstevel@tonic-gate tsd_set(uint_t key, void *value) 14237c478bd9Sstevel@tonic-gate { 14247c478bd9Sstevel@tonic-gate return (tsd_agent_set(curthread, key, value)); 14257c478bd9Sstevel@tonic-gate } 14267c478bd9Sstevel@tonic-gate 14277c478bd9Sstevel@tonic-gate /* 14287c478bd9Sstevel@tonic-gate * Like tsd_get(), except that the agent lwp can get the tsd of 14297c478bd9Sstevel@tonic-gate * another thread in the same process (the agent thread only runs when the 14307c478bd9Sstevel@tonic-gate * process is completely stopped by /proc), or syslwp is creating a new lwp. 14317c478bd9Sstevel@tonic-gate */ 14327c478bd9Sstevel@tonic-gate void * 14337c478bd9Sstevel@tonic-gate tsd_agent_get(kthread_t *t, uint_t key) 14347c478bd9Sstevel@tonic-gate { 14357c478bd9Sstevel@tonic-gate struct tsd_thread *tsd = t->t_tsd; 14367c478bd9Sstevel@tonic-gate 14377c478bd9Sstevel@tonic-gate ASSERT(t == curthread || 14387c478bd9Sstevel@tonic-gate ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 14397c478bd9Sstevel@tonic-gate 14407c478bd9Sstevel@tonic-gate if (key && tsd != NULL && key <= tsd->ts_nkeys) 14417c478bd9Sstevel@tonic-gate return (tsd->ts_value[key - 1]); 14427c478bd9Sstevel@tonic-gate return (NULL); 14437c478bd9Sstevel@tonic-gate } 14447c478bd9Sstevel@tonic-gate 14457c478bd9Sstevel@tonic-gate /* 14467c478bd9Sstevel@tonic-gate * Like tsd_set(), except that the agent lwp can set the tsd of 14477c478bd9Sstevel@tonic-gate * another thread in the same process, or syslwp can set the tsd 14487c478bd9Sstevel@tonic-gate * of a thread it's in the middle of creating. 14497c478bd9Sstevel@tonic-gate * 14507c478bd9Sstevel@tonic-gate * Assumes the caller is protecting key from tsd_create and tsd_destroy 14517c478bd9Sstevel@tonic-gate * May lock out tsd_destroy (and tsd_create), may allocate memory with 14527c478bd9Sstevel@tonic-gate * lock held 14537c478bd9Sstevel@tonic-gate */ 14547c478bd9Sstevel@tonic-gate int 14557c478bd9Sstevel@tonic-gate tsd_agent_set(kthread_t *t, uint_t key, void *value) 14567c478bd9Sstevel@tonic-gate { 14577c478bd9Sstevel@tonic-gate struct tsd_thread *tsd = t->t_tsd; 14587c478bd9Sstevel@tonic-gate 14597c478bd9Sstevel@tonic-gate ASSERT(t == curthread || 14607c478bd9Sstevel@tonic-gate ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 14617c478bd9Sstevel@tonic-gate 14627c478bd9Sstevel@tonic-gate if (key == 0) 14637c478bd9Sstevel@tonic-gate return (EINVAL); 14647c478bd9Sstevel@tonic-gate if (tsd == NULL) 14657c478bd9Sstevel@tonic-gate tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP); 14667c478bd9Sstevel@tonic-gate if (key <= tsd->ts_nkeys) { 14677c478bd9Sstevel@tonic-gate tsd->ts_value[key - 1] = value; 14687c478bd9Sstevel@tonic-gate return (0); 14697c478bd9Sstevel@tonic-gate } 14707c478bd9Sstevel@tonic-gate 14717c478bd9Sstevel@tonic-gate ASSERT(key <= tsd_nkeys); 14727c478bd9Sstevel@tonic-gate 14737c478bd9Sstevel@tonic-gate /* 14747c478bd9Sstevel@tonic-gate * lock out tsd_destroy() 14757c478bd9Sstevel@tonic-gate */ 14767c478bd9Sstevel@tonic-gate mutex_enter(&tsd_mutex); 14777c478bd9Sstevel@tonic-gate if (tsd->ts_nkeys == 0) { 14787c478bd9Sstevel@tonic-gate /* 14797c478bd9Sstevel@tonic-gate * Link onto list of threads with TSD 14807c478bd9Sstevel@tonic-gate */ 14817c478bd9Sstevel@tonic-gate if ((tsd->ts_next = tsd_list) != NULL) 14827c478bd9Sstevel@tonic-gate tsd_list->ts_prev = tsd; 14837c478bd9Sstevel@tonic-gate tsd_list = tsd; 14847c478bd9Sstevel@tonic-gate } 14857c478bd9Sstevel@tonic-gate 14867c478bd9Sstevel@tonic-gate /* 14877c478bd9Sstevel@tonic-gate * Allocate thread local storage and set the value for key 14887c478bd9Sstevel@tonic-gate */ 14897c478bd9Sstevel@tonic-gate tsd->ts_value = tsd_realloc(tsd->ts_value, 14907c478bd9Sstevel@tonic-gate tsd->ts_nkeys * sizeof (void *), 14917c478bd9Sstevel@tonic-gate key * sizeof (void *)); 14927c478bd9Sstevel@tonic-gate tsd->ts_nkeys = key; 14937c478bd9Sstevel@tonic-gate tsd->ts_value[key - 1] = value; 14947c478bd9Sstevel@tonic-gate mutex_exit(&tsd_mutex); 14957c478bd9Sstevel@tonic-gate 14967c478bd9Sstevel@tonic-gate return (0); 14977c478bd9Sstevel@tonic-gate } 14987c478bd9Sstevel@tonic-gate 14997c478bd9Sstevel@tonic-gate 15007c478bd9Sstevel@tonic-gate /* 15017c478bd9Sstevel@tonic-gate * Return the per thread value that was stored with the specified key 15027c478bd9Sstevel@tonic-gate * If necessary, create the key and the value 15037c478bd9Sstevel@tonic-gate * Assumes the caller is protecting *keyp from tsd_destroy 15047c478bd9Sstevel@tonic-gate */ 15057c478bd9Sstevel@tonic-gate void * 15067c478bd9Sstevel@tonic-gate tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void)) 15077c478bd9Sstevel@tonic-gate { 15087c478bd9Sstevel@tonic-gate void *value; 15097c478bd9Sstevel@tonic-gate uint_t key = *keyp; 15107c478bd9Sstevel@tonic-gate struct tsd_thread *tsd = curthread->t_tsd; 15117c478bd9Sstevel@tonic-gate 15127c478bd9Sstevel@tonic-gate if (tsd == NULL) 15137c478bd9Sstevel@tonic-gate tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP); 15147c478bd9Sstevel@tonic-gate if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1])) 15157c478bd9Sstevel@tonic-gate return (value); 15167c478bd9Sstevel@tonic-gate if (key == 0) 15177c478bd9Sstevel@tonic-gate tsd_create(keyp, destroy); 15187c478bd9Sstevel@tonic-gate (void) tsd_set(*keyp, value = (*allocate)()); 15197c478bd9Sstevel@tonic-gate 15207c478bd9Sstevel@tonic-gate return (value); 15217c478bd9Sstevel@tonic-gate } 15227c478bd9Sstevel@tonic-gate 15237c478bd9Sstevel@tonic-gate /* 15247c478bd9Sstevel@tonic-gate * Called from thread_exit() to run the destructor function for each tsd 15257c478bd9Sstevel@tonic-gate * Locks out tsd_create and tsd_destroy 15267c478bd9Sstevel@tonic-gate * Assumes that the destructor *DOES NOT* use tsd 15277c478bd9Sstevel@tonic-gate */ 15287c478bd9Sstevel@tonic-gate void 15297c478bd9Sstevel@tonic-gate tsd_exit(void) 15307c478bd9Sstevel@tonic-gate { 15317c478bd9Sstevel@tonic-gate int i; 15327c478bd9Sstevel@tonic-gate struct tsd_thread *tsd = curthread->t_tsd; 15337c478bd9Sstevel@tonic-gate 15347c478bd9Sstevel@tonic-gate if (tsd == NULL) 15357c478bd9Sstevel@tonic-gate return; 15367c478bd9Sstevel@tonic-gate 15377c478bd9Sstevel@tonic-gate if (tsd->ts_nkeys == 0) { 15387c478bd9Sstevel@tonic-gate kmem_free(tsd, sizeof (*tsd)); 15397c478bd9Sstevel@tonic-gate curthread->t_tsd = NULL; 15407c478bd9Sstevel@tonic-gate return; 15417c478bd9Sstevel@tonic-gate } 15427c478bd9Sstevel@tonic-gate 15437c478bd9Sstevel@tonic-gate /* 15447c478bd9Sstevel@tonic-gate * lock out tsd_create and tsd_destroy, call 15457c478bd9Sstevel@tonic-gate * the destructor, and mark the value as destroyed. 15467c478bd9Sstevel@tonic-gate */ 15477c478bd9Sstevel@tonic-gate mutex_enter(&tsd_mutex); 15487c478bd9Sstevel@tonic-gate 15497c478bd9Sstevel@tonic-gate for (i = 0; i < tsd->ts_nkeys; i++) { 15507c478bd9Sstevel@tonic-gate if (tsd->ts_value[i] && tsd_destructor[i]) 15517c478bd9Sstevel@tonic-gate (*tsd_destructor[i])(tsd->ts_value[i]); 15527c478bd9Sstevel@tonic-gate tsd->ts_value[i] = NULL; 15537c478bd9Sstevel@tonic-gate } 15547c478bd9Sstevel@tonic-gate 15557c478bd9Sstevel@tonic-gate /* 15567c478bd9Sstevel@tonic-gate * remove from linked list of threads with TSD 15577c478bd9Sstevel@tonic-gate */ 15587c478bd9Sstevel@tonic-gate if (tsd->ts_next) 15597c478bd9Sstevel@tonic-gate tsd->ts_next->ts_prev = tsd->ts_prev; 15607c478bd9Sstevel@tonic-gate if (tsd->ts_prev) 15617c478bd9Sstevel@tonic-gate tsd->ts_prev->ts_next = tsd->ts_next; 15627c478bd9Sstevel@tonic-gate if (tsd_list == tsd) 15637c478bd9Sstevel@tonic-gate tsd_list = tsd->ts_next; 15647c478bd9Sstevel@tonic-gate 15657c478bd9Sstevel@tonic-gate mutex_exit(&tsd_mutex); 15667c478bd9Sstevel@tonic-gate 15677c478bd9Sstevel@tonic-gate /* 15687c478bd9Sstevel@tonic-gate * free up the TSD 15697c478bd9Sstevel@tonic-gate */ 15707c478bd9Sstevel@tonic-gate kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *)); 15717c478bd9Sstevel@tonic-gate kmem_free(tsd, sizeof (struct tsd_thread)); 15727c478bd9Sstevel@tonic-gate curthread->t_tsd = NULL; 15737c478bd9Sstevel@tonic-gate } 15747c478bd9Sstevel@tonic-gate 15757c478bd9Sstevel@tonic-gate /* 15767c478bd9Sstevel@tonic-gate * realloc 15777c478bd9Sstevel@tonic-gate */ 15787c478bd9Sstevel@tonic-gate static void * 15797c478bd9Sstevel@tonic-gate tsd_realloc(void *old, size_t osize, size_t nsize) 15807c478bd9Sstevel@tonic-gate { 15817c478bd9Sstevel@tonic-gate void *new; 15827c478bd9Sstevel@tonic-gate 15837c478bd9Sstevel@tonic-gate new = kmem_zalloc(nsize, KM_SLEEP); 15847c478bd9Sstevel@tonic-gate if (old) { 15857c478bd9Sstevel@tonic-gate bcopy(old, new, osize); 15867c478bd9Sstevel@tonic-gate kmem_free(old, osize); 15877c478bd9Sstevel@tonic-gate } 15887c478bd9Sstevel@tonic-gate return (new); 15897c478bd9Sstevel@tonic-gate } 15907c478bd9Sstevel@tonic-gate 15917c478bd9Sstevel@tonic-gate /* 15927c478bd9Sstevel@tonic-gate * Check to see if an interrupt thread might be active at a given ipl. 15937c478bd9Sstevel@tonic-gate * If so return true. 15947c478bd9Sstevel@tonic-gate * We must be conservative--it is ok to give a false yes, but a false no 15957c478bd9Sstevel@tonic-gate * will cause disaster. (But if the situation changes after we check it is 15967c478bd9Sstevel@tonic-gate * ok--the caller is trying to ensure that an interrupt routine has been 15977c478bd9Sstevel@tonic-gate * exited). 15987c478bd9Sstevel@tonic-gate * This is used when trying to remove an interrupt handler from an autovector 15997c478bd9Sstevel@tonic-gate * list in avintr.c. 16007c478bd9Sstevel@tonic-gate */ 16017c478bd9Sstevel@tonic-gate int 16027c478bd9Sstevel@tonic-gate intr_active(struct cpu *cp, int level) 16037c478bd9Sstevel@tonic-gate { 16047c478bd9Sstevel@tonic-gate if (level <= LOCK_LEVEL) 16057c478bd9Sstevel@tonic-gate return (cp->cpu_thread != cp->cpu_dispthread); 16067c478bd9Sstevel@tonic-gate else 16077c478bd9Sstevel@tonic-gate return (CPU_ON_INTR(cp)); 16087c478bd9Sstevel@tonic-gate } 16097c478bd9Sstevel@tonic-gate 16107c478bd9Sstevel@tonic-gate /* 16117c478bd9Sstevel@tonic-gate * Return non-zero if an interrupt is being serviced. 16127c478bd9Sstevel@tonic-gate */ 16137c478bd9Sstevel@tonic-gate int 16147c478bd9Sstevel@tonic-gate servicing_interrupt() 16157c478bd9Sstevel@tonic-gate { 16169c6cb9fcSsudheer int onintr = 0; 16179c6cb9fcSsudheer 16189c6cb9fcSsudheer /* Are we an interrupt thread */ 16199c6cb9fcSsudheer if (curthread->t_flag & T_INTR_THREAD) 16209c6cb9fcSsudheer return (1); 16219c6cb9fcSsudheer /* Are we servicing a high level interrupt? */ 16229c6cb9fcSsudheer if (CPU_ON_INTR(CPU)) { 16239c6cb9fcSsudheer kpreempt_disable(); 16249c6cb9fcSsudheer onintr = CPU_ON_INTR(CPU); 16259c6cb9fcSsudheer kpreempt_enable(); 16269c6cb9fcSsudheer } 16279c6cb9fcSsudheer return (onintr); 16287c478bd9Sstevel@tonic-gate } 16297c478bd9Sstevel@tonic-gate 16307c478bd9Sstevel@tonic-gate 16317c478bd9Sstevel@tonic-gate /* 16327c478bd9Sstevel@tonic-gate * Change the dispatch priority of a thread in the system. 16337c478bd9Sstevel@tonic-gate * Used when raising or lowering a thread's priority. 16347c478bd9Sstevel@tonic-gate * (E.g., priority inheritance) 16357c478bd9Sstevel@tonic-gate * 16367c478bd9Sstevel@tonic-gate * Since threads are queued according to their priority, we 16377c478bd9Sstevel@tonic-gate * we must check the thread's state to determine whether it 16387c478bd9Sstevel@tonic-gate * is on a queue somewhere. If it is, we've got to: 16397c478bd9Sstevel@tonic-gate * 16407c478bd9Sstevel@tonic-gate * o Dequeue the thread. 16417c478bd9Sstevel@tonic-gate * o Change its effective priority. 16427c478bd9Sstevel@tonic-gate * o Enqueue the thread. 16437c478bd9Sstevel@tonic-gate * 16447c478bd9Sstevel@tonic-gate * Assumptions: The thread whose priority we wish to change 16457c478bd9Sstevel@tonic-gate * must be locked before we call thread_change_(e)pri(). 16467c478bd9Sstevel@tonic-gate * The thread_change(e)pri() function doesn't drop the thread 16477c478bd9Sstevel@tonic-gate * lock--that must be done by its caller. 16487c478bd9Sstevel@tonic-gate */ 16497c478bd9Sstevel@tonic-gate void 16507c478bd9Sstevel@tonic-gate thread_change_epri(kthread_t *t, pri_t disp_pri) 16517c478bd9Sstevel@tonic-gate { 16527c478bd9Sstevel@tonic-gate uint_t state; 16537c478bd9Sstevel@tonic-gate 16547c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(t)); 16557c478bd9Sstevel@tonic-gate 16567c478bd9Sstevel@tonic-gate /* 16577c478bd9Sstevel@tonic-gate * If the inherited priority hasn't actually changed, 16587c478bd9Sstevel@tonic-gate * just return. 16597c478bd9Sstevel@tonic-gate */ 16607c478bd9Sstevel@tonic-gate if (t->t_epri == disp_pri) 16617c478bd9Sstevel@tonic-gate return; 16627c478bd9Sstevel@tonic-gate 16637c478bd9Sstevel@tonic-gate state = t->t_state; 16647c478bd9Sstevel@tonic-gate 16657c478bd9Sstevel@tonic-gate /* 16667c478bd9Sstevel@tonic-gate * If it's not on a queue, change the priority with 16677c478bd9Sstevel@tonic-gate * impunity. 16687c478bd9Sstevel@tonic-gate */ 1669c97ad5cdSakolb if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) { 16707c478bd9Sstevel@tonic-gate t->t_epri = disp_pri; 16717c478bd9Sstevel@tonic-gate 16727c478bd9Sstevel@tonic-gate if (state == TS_ONPROC) { 16737c478bd9Sstevel@tonic-gate cpu_t *cp = t->t_disp_queue->disp_cpu; 16747c478bd9Sstevel@tonic-gate 16757c478bd9Sstevel@tonic-gate if (t == cp->cpu_dispthread) 16767c478bd9Sstevel@tonic-gate cp->cpu_dispatch_pri = DISP_PRIO(t); 16777c478bd9Sstevel@tonic-gate } 16787c478bd9Sstevel@tonic-gate return; 16797c478bd9Sstevel@tonic-gate } 16807c478bd9Sstevel@tonic-gate 16817c478bd9Sstevel@tonic-gate /* 16827c478bd9Sstevel@tonic-gate * It's either on a sleep queue or a run queue. 16837c478bd9Sstevel@tonic-gate */ 16847c478bd9Sstevel@tonic-gate if (state == TS_SLEEP) { 16857c478bd9Sstevel@tonic-gate /* 16867c478bd9Sstevel@tonic-gate * Take the thread out of its sleep queue. 16877c478bd9Sstevel@tonic-gate * Change the inherited priority. 16887c478bd9Sstevel@tonic-gate * Re-enqueue the thread. 16897c478bd9Sstevel@tonic-gate * Each synchronization object exports a function 16907c478bd9Sstevel@tonic-gate * to do this in an appropriate manner. 16917c478bd9Sstevel@tonic-gate */ 16927c478bd9Sstevel@tonic-gate SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri); 1693c97ad5cdSakolb } else if (state == TS_WAIT) { 1694c97ad5cdSakolb /* 1695c97ad5cdSakolb * Re-enqueue a thread on the wait queue if its 1696c97ad5cdSakolb * effective priority needs to change. 1697c97ad5cdSakolb */ 1698c97ad5cdSakolb if (disp_pri != t->t_epri) 1699c97ad5cdSakolb waitq_change_pri(t, disp_pri); 17007c478bd9Sstevel@tonic-gate } else { 17017c478bd9Sstevel@tonic-gate /* 17027c478bd9Sstevel@tonic-gate * The thread is on a run queue. 17037c478bd9Sstevel@tonic-gate * Note: setbackdq() may not put the thread 17047c478bd9Sstevel@tonic-gate * back on the same run queue where it originally 17057c478bd9Sstevel@tonic-gate * resided. 17067c478bd9Sstevel@tonic-gate */ 17077c478bd9Sstevel@tonic-gate (void) dispdeq(t); 17087c478bd9Sstevel@tonic-gate t->t_epri = disp_pri; 17097c478bd9Sstevel@tonic-gate setbackdq(t); 17107c478bd9Sstevel@tonic-gate } 17117c478bd9Sstevel@tonic-gate } /* end of thread_change_epri */ 17127c478bd9Sstevel@tonic-gate 17137c478bd9Sstevel@tonic-gate /* 17147c478bd9Sstevel@tonic-gate * Function: Change the t_pri field of a thread. 17157c478bd9Sstevel@tonic-gate * Side Effects: Adjust the thread ordering on a run queue 17167c478bd9Sstevel@tonic-gate * or sleep queue, if necessary. 17177c478bd9Sstevel@tonic-gate * Returns: 1 if the thread was on a run queue, else 0. 17187c478bd9Sstevel@tonic-gate */ 17197c478bd9Sstevel@tonic-gate int 17207c478bd9Sstevel@tonic-gate thread_change_pri(kthread_t *t, pri_t disp_pri, int front) 17217c478bd9Sstevel@tonic-gate { 17227c478bd9Sstevel@tonic-gate uint_t state; 17237c478bd9Sstevel@tonic-gate int on_rq = 0; 17247c478bd9Sstevel@tonic-gate 17257c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(t)); 17267c478bd9Sstevel@tonic-gate 17277c478bd9Sstevel@tonic-gate state = t->t_state; 17287c478bd9Sstevel@tonic-gate THREAD_WILLCHANGE_PRI(t, disp_pri); 17297c478bd9Sstevel@tonic-gate 17307c478bd9Sstevel@tonic-gate /* 17317c478bd9Sstevel@tonic-gate * If it's not on a queue, change the priority with 17327c478bd9Sstevel@tonic-gate * impunity. 17337c478bd9Sstevel@tonic-gate */ 1734c97ad5cdSakolb if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) { 17357c478bd9Sstevel@tonic-gate t->t_pri = disp_pri; 17367c478bd9Sstevel@tonic-gate 17377c478bd9Sstevel@tonic-gate if (state == TS_ONPROC) { 17387c478bd9Sstevel@tonic-gate cpu_t *cp = t->t_disp_queue->disp_cpu; 17397c478bd9Sstevel@tonic-gate 17407c478bd9Sstevel@tonic-gate if (t == cp->cpu_dispthread) 17417c478bd9Sstevel@tonic-gate cp->cpu_dispatch_pri = DISP_PRIO(t); 17427c478bd9Sstevel@tonic-gate } 17437c478bd9Sstevel@tonic-gate return (0); 17447c478bd9Sstevel@tonic-gate } 17457c478bd9Sstevel@tonic-gate 17467c478bd9Sstevel@tonic-gate /* 17477c478bd9Sstevel@tonic-gate * It's either on a sleep queue or a run queue. 17487c478bd9Sstevel@tonic-gate */ 17497c478bd9Sstevel@tonic-gate if (state == TS_SLEEP) { 17507c478bd9Sstevel@tonic-gate /* 17517c478bd9Sstevel@tonic-gate * If the priority has changed, take the thread out of 17527c478bd9Sstevel@tonic-gate * its sleep queue and change the priority. 17537c478bd9Sstevel@tonic-gate * Re-enqueue the thread. 17547c478bd9Sstevel@tonic-gate * Each synchronization object exports a function 17557c478bd9Sstevel@tonic-gate * to do this in an appropriate manner. 17567c478bd9Sstevel@tonic-gate */ 17577c478bd9Sstevel@tonic-gate if (disp_pri != t->t_pri) 17587c478bd9Sstevel@tonic-gate SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri); 1759c97ad5cdSakolb } else if (state == TS_WAIT) { 1760c97ad5cdSakolb /* 1761c97ad5cdSakolb * Re-enqueue a thread on the wait queue if its 1762c97ad5cdSakolb * priority needs to change. 1763c97ad5cdSakolb */ 1764c97ad5cdSakolb if (disp_pri != t->t_pri) 1765c97ad5cdSakolb waitq_change_pri(t, disp_pri); 17667c478bd9Sstevel@tonic-gate } else { 17677c478bd9Sstevel@tonic-gate /* 17687c478bd9Sstevel@tonic-gate * The thread is on a run queue. 17697c478bd9Sstevel@tonic-gate * Note: setbackdq() may not put the thread 17707c478bd9Sstevel@tonic-gate * back on the same run queue where it originally 17717c478bd9Sstevel@tonic-gate * resided. 17727c478bd9Sstevel@tonic-gate * 17737c478bd9Sstevel@tonic-gate * We still requeue the thread even if the priority 17747c478bd9Sstevel@tonic-gate * is unchanged to preserve round-robin (and other) 17757c478bd9Sstevel@tonic-gate * effects between threads of the same priority. 17767c478bd9Sstevel@tonic-gate */ 17777c478bd9Sstevel@tonic-gate on_rq = dispdeq(t); 17787c478bd9Sstevel@tonic-gate ASSERT(on_rq); 17797c478bd9Sstevel@tonic-gate t->t_pri = disp_pri; 17807c478bd9Sstevel@tonic-gate if (front) { 17817c478bd9Sstevel@tonic-gate setfrontdq(t); 17827c478bd9Sstevel@tonic-gate } else { 17837c478bd9Sstevel@tonic-gate setbackdq(t); 17847c478bd9Sstevel@tonic-gate } 17857c478bd9Sstevel@tonic-gate } 17867c478bd9Sstevel@tonic-gate return (on_rq); 17877c478bd9Sstevel@tonic-gate } 1788