17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 59acbbeafSnn35248 * Common Development and Distribution License (the "License"). 69acbbeafSnn35248 * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 2197eda132Sraf 227c478bd9Sstevel@tonic-gate /* 23061d7437SJakub Jermar * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 27f971a346SBryan Cantrill /* 28f971a346SBryan Cantrill * Copyright (c) 2013, Joyent, Inc. All rights reserved. 29f971a346SBryan Cantrill */ 30f971a346SBryan Cantrill 317c478bd9Sstevel@tonic-gate #include <sys/param.h> 327c478bd9Sstevel@tonic-gate #include <sys/types.h> 337c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 347c478bd9Sstevel@tonic-gate #include <sys/systm.h> 357c478bd9Sstevel@tonic-gate #include <sys/thread.h> 367c478bd9Sstevel@tonic-gate #include <sys/proc.h> 377c478bd9Sstevel@tonic-gate #include <sys/task.h> 387c478bd9Sstevel@tonic-gate #include <sys/project.h> 397c478bd9Sstevel@tonic-gate #include <sys/signal.h> 407c478bd9Sstevel@tonic-gate #include <sys/errno.h> 417c478bd9Sstevel@tonic-gate #include <sys/vmparam.h> 427c478bd9Sstevel@tonic-gate #include <sys/stack.h> 437c478bd9Sstevel@tonic-gate #include <sys/procfs.h> 447c478bd9Sstevel@tonic-gate #include <sys/prsystm.h> 457c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 467c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 477c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 487c478bd9Sstevel@tonic-gate #include <sys/door.h> 497c478bd9Sstevel@tonic-gate #include <vm/seg_kp.h> 507c478bd9Sstevel@tonic-gate #include <sys/debug.h> 517c478bd9Sstevel@tonic-gate #include <sys/tnf.h> 527c478bd9Sstevel@tonic-gate #include <sys/schedctl.h> 537c478bd9Sstevel@tonic-gate #include <sys/poll.h> 547c478bd9Sstevel@tonic-gate #include <sys/copyops.h> 557c478bd9Sstevel@tonic-gate #include <sys/lwp_upimutex_impl.h> 567c478bd9Sstevel@tonic-gate #include <sys/cpupart.h> 577c478bd9Sstevel@tonic-gate #include <sys/lgrp.h> 587c478bd9Sstevel@tonic-gate #include <sys/rctl.h> 597c478bd9Sstevel@tonic-gate #include <sys/contract_impl.h> 607c478bd9Sstevel@tonic-gate #include <sys/cpc_impl.h> 617c478bd9Sstevel@tonic-gate #include <sys/sdt.h> 627c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 639acbbeafSnn35248 #include <sys/brand.h> 64e0cf54a5SRoger A. Faulkner #include <sys/cyclic.h> 65936e3a33SGangadhar Mylapuram #include <sys/pool.h> 667c478bd9Sstevel@tonic-gate 676eb30ec3SRoger A. Faulkner /* hash function for the lwpid hash table, p->p_tidhash[] */ 686eb30ec3SRoger A. Faulkner #define TIDHASH(tid, hash_sz) ((tid) & ((hash_sz) - 1)) 696eb30ec3SRoger A. Faulkner 707c478bd9Sstevel@tonic-gate void *segkp_lwp; /* cookie for pool of segkp resources */ 71575a7426Spt157919 extern void reapq_move_lq_to_tq(kthread_t *); 72575a7426Spt157919 extern void freectx_ctx(struct ctxop *); 737c478bd9Sstevel@tonic-gate 747c478bd9Sstevel@tonic-gate /* 7535a5a358SJonathan Adams * Create a kernel thread associated with a particular system process. Give 7635a5a358SJonathan Adams * it an LWP so that microstate accounting will be available for it. 7735a5a358SJonathan Adams */ 7835a5a358SJonathan Adams kthread_t * 7935a5a358SJonathan Adams lwp_kernel_create(proc_t *p, void (*proc)(), void *arg, int state, pri_t pri) 8035a5a358SJonathan Adams { 8135a5a358SJonathan Adams klwp_t *lwp; 8235a5a358SJonathan Adams 8335a5a358SJonathan Adams VERIFY((p->p_flag & SSYS) != 0); 8435a5a358SJonathan Adams 8535a5a358SJonathan Adams lwp = lwp_create(proc, arg, 0, p, state, pri, &t0.t_hold, syscid, 0); 8635a5a358SJonathan Adams 8735a5a358SJonathan Adams VERIFY(lwp != NULL); 8835a5a358SJonathan Adams 8935a5a358SJonathan Adams return (lwptot(lwp)); 9035a5a358SJonathan Adams } 9135a5a358SJonathan Adams 9235a5a358SJonathan Adams /* 937c478bd9Sstevel@tonic-gate * Create a thread that appears to be stopped at sys_rtt. 947c478bd9Sstevel@tonic-gate */ 957c478bd9Sstevel@tonic-gate klwp_t * 967c478bd9Sstevel@tonic-gate lwp_create(void (*proc)(), caddr_t arg, size_t len, proc_t *p, 977c478bd9Sstevel@tonic-gate int state, int pri, const k_sigset_t *smask, int cid, id_t lwpid) 987c478bd9Sstevel@tonic-gate { 997c478bd9Sstevel@tonic-gate klwp_t *lwp = NULL; 1007c478bd9Sstevel@tonic-gate kthread_t *t; 1017c478bd9Sstevel@tonic-gate kthread_t *tx; 1027c478bd9Sstevel@tonic-gate cpupart_t *oldpart = NULL; 1037c478bd9Sstevel@tonic-gate size_t stksize; 1047c478bd9Sstevel@tonic-gate caddr_t lwpdata = NULL; 1057c478bd9Sstevel@tonic-gate processorid_t binding; 1067c478bd9Sstevel@tonic-gate int err = 0; 1077c478bd9Sstevel@tonic-gate kproject_t *oldkpj, *newkpj; 1087c478bd9Sstevel@tonic-gate void *bufp = NULL; 10935a5a358SJonathan Adams klwp_t *curlwp; 1107c478bd9Sstevel@tonic-gate lwpent_t *lep; 1117c478bd9Sstevel@tonic-gate lwpdir_t *old_dir = NULL; 1127c478bd9Sstevel@tonic-gate uint_t old_dirsz = 0; 1136eb30ec3SRoger A. Faulkner tidhash_t *old_hash = NULL; 1147c478bd9Sstevel@tonic-gate uint_t old_hashsz = 0; 1156eb30ec3SRoger A. Faulkner ret_tidhash_t *ret_tidhash = NULL; 1167c478bd9Sstevel@tonic-gate int i; 1177c478bd9Sstevel@tonic-gate int rctlfail = 0; 1189acbbeafSnn35248 boolean_t branded = 0; 119575a7426Spt157919 struct ctxop *ctx = NULL; 1207c478bd9Sstevel@tonic-gate 12135a5a358SJonathan Adams ASSERT(cid != sysdccid); /* system threads must start in SYS */ 12235a5a358SJonathan Adams 12335a5a358SJonathan Adams ASSERT(p != &p0); /* No new LWPs in p0. */ 12435a5a358SJonathan Adams 1257c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 1267c478bd9Sstevel@tonic-gate mutex_enter(&p->p_zone->zone_nlwps_lock); 1277c478bd9Sstevel@tonic-gate /* 1287c478bd9Sstevel@tonic-gate * don't enforce rctl limits on system processes 1297c478bd9Sstevel@tonic-gate */ 13035a5a358SJonathan Adams if (!CLASS_KERNEL(cid)) { 1317c478bd9Sstevel@tonic-gate if (p->p_task->tk_nlwps >= p->p_task->tk_nlwps_ctl) 1327c478bd9Sstevel@tonic-gate if (rctl_test(rc_task_lwps, p->p_task->tk_rctls, p, 1337c478bd9Sstevel@tonic-gate 1, 0) & RCT_DENY) 1347c478bd9Sstevel@tonic-gate rctlfail = 1; 1357c478bd9Sstevel@tonic-gate if (p->p_task->tk_proj->kpj_nlwps >= 1367c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps_ctl) 1377c478bd9Sstevel@tonic-gate if (rctl_test(rc_project_nlwps, 1387c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_rctls, p, 1, 0) 1397c478bd9Sstevel@tonic-gate & RCT_DENY) 1407c478bd9Sstevel@tonic-gate rctlfail = 1; 1417c478bd9Sstevel@tonic-gate if (p->p_zone->zone_nlwps >= p->p_zone->zone_nlwps_ctl) 1427c478bd9Sstevel@tonic-gate if (rctl_test(rc_zone_nlwps, p->p_zone->zone_rctls, p, 1437c478bd9Sstevel@tonic-gate 1, 0) & RCT_DENY) 1447c478bd9Sstevel@tonic-gate rctlfail = 1; 1457c478bd9Sstevel@tonic-gate } 1467c478bd9Sstevel@tonic-gate if (rctlfail) { 1477c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 1487c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 149*2dc692e0SJerry Jelinek atomic_inc_32(&p->p_zone->zone_ffcap); 1507c478bd9Sstevel@tonic-gate return (NULL); 1517c478bd9Sstevel@tonic-gate } 1527c478bd9Sstevel@tonic-gate p->p_task->tk_nlwps++; 1537c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps++; 1547c478bd9Sstevel@tonic-gate p->p_zone->zone_nlwps++; 1557c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 1567c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 1577c478bd9Sstevel@tonic-gate 15835a5a358SJonathan Adams curlwp = ttolwp(curthread); 1597c478bd9Sstevel@tonic-gate if (curlwp == NULL || (stksize = curlwp->lwp_childstksz) == 0) 1607c478bd9Sstevel@tonic-gate stksize = lwp_default_stksize; 1617c478bd9Sstevel@tonic-gate 16235a5a358SJonathan Adams if (CLASS_KERNEL(cid)) { 163d32efdadSJonathan Adams /* 164d32efdadSJonathan Adams * Since we are creating an LWP in an SSYS process, we do not 165d32efdadSJonathan Adams * inherit anything from the current thread's LWP. We set 166d32efdadSJonathan Adams * stksize and lwpdata to 0 in order to let thread_create() 167d32efdadSJonathan Adams * allocate a regular kernel thread stack for this thread. 168d32efdadSJonathan Adams */ 169d32efdadSJonathan Adams curlwp = NULL; 170d32efdadSJonathan Adams stksize = 0; 171d32efdadSJonathan Adams lwpdata = NULL; 17235a5a358SJonathan Adams 17335a5a358SJonathan Adams } else if (stksize == lwp_default_stksize) { 174d32efdadSJonathan Adams /* 175d32efdadSJonathan Adams * Try to reuse an <lwp,stack> from the LWP deathrow. 176d32efdadSJonathan Adams */ 1777c478bd9Sstevel@tonic-gate if (lwp_reapcnt > 0) { 1787c478bd9Sstevel@tonic-gate mutex_enter(&reaplock); 1797c478bd9Sstevel@tonic-gate if ((t = lwp_deathrow) != NULL) { 1807c478bd9Sstevel@tonic-gate ASSERT(t->t_swap); 1817c478bd9Sstevel@tonic-gate lwp_deathrow = t->t_forw; 1827c478bd9Sstevel@tonic-gate lwp_reapcnt--; 1837c478bd9Sstevel@tonic-gate lwpdata = t->t_swap; 1847c478bd9Sstevel@tonic-gate lwp = t->t_lwp; 185575a7426Spt157919 ctx = t->t_ctx; 186575a7426Spt157919 t->t_swap = NULL; 187575a7426Spt157919 t->t_lwp = NULL; 188575a7426Spt157919 t->t_ctx = NULL; 189575a7426Spt157919 reapq_move_lq_to_tq(t); 1907c478bd9Sstevel@tonic-gate } 1917c478bd9Sstevel@tonic-gate mutex_exit(&reaplock); 192575a7426Spt157919 if (lwp != NULL) { 193575a7426Spt157919 lwp_stk_fini(lwp); 194575a7426Spt157919 } 195575a7426Spt157919 if (ctx != NULL) { 196575a7426Spt157919 freectx_ctx(ctx); 1977c478bd9Sstevel@tonic-gate } 1987c478bd9Sstevel@tonic-gate } 1997c478bd9Sstevel@tonic-gate if (lwpdata == NULL && 2007c478bd9Sstevel@tonic-gate (lwpdata = (caddr_t)segkp_cache_get(segkp_lwp)) == NULL) { 2017c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 2027c478bd9Sstevel@tonic-gate mutex_enter(&p->p_zone->zone_nlwps_lock); 2037c478bd9Sstevel@tonic-gate p->p_task->tk_nlwps--; 2047c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps--; 2057c478bd9Sstevel@tonic-gate p->p_zone->zone_nlwps--; 2067c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 2077c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 208*2dc692e0SJerry Jelinek atomic_inc_32(&p->p_zone->zone_ffnomem); 2097c478bd9Sstevel@tonic-gate return (NULL); 2107c478bd9Sstevel@tonic-gate } 2117c478bd9Sstevel@tonic-gate } else { 2127c478bd9Sstevel@tonic-gate stksize = roundup(stksize, PAGESIZE); 2137c478bd9Sstevel@tonic-gate if ((lwpdata = (caddr_t)segkp_get(segkp, stksize, 2147c478bd9Sstevel@tonic-gate (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED))) == NULL) { 2157c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 2167c478bd9Sstevel@tonic-gate mutex_enter(&p->p_zone->zone_nlwps_lock); 2177c478bd9Sstevel@tonic-gate p->p_task->tk_nlwps--; 2187c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps--; 2197c478bd9Sstevel@tonic-gate p->p_zone->zone_nlwps--; 2207c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 2217c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 222*2dc692e0SJerry Jelinek atomic_inc_32(&p->p_zone->zone_ffnomem); 2237c478bd9Sstevel@tonic-gate return (NULL); 2247c478bd9Sstevel@tonic-gate } 2257c478bd9Sstevel@tonic-gate } 2267c478bd9Sstevel@tonic-gate 2277c478bd9Sstevel@tonic-gate /* 2287c478bd9Sstevel@tonic-gate * Create a thread, initializing the stack pointer 2297c478bd9Sstevel@tonic-gate */ 2307c478bd9Sstevel@tonic-gate t = thread_create(lwpdata, stksize, NULL, NULL, 0, p, TS_STOPPED, pri); 2317c478bd9Sstevel@tonic-gate 232d32efdadSJonathan Adams /* 233d32efdadSJonathan Adams * If a non-NULL stack base is passed in, thread_create() assumes 234d32efdadSJonathan Adams * that the stack might be statically allocated (as opposed to being 235d32efdadSJonathan Adams * allocated from segkp), and so it does not set t_swap. Since 236d32efdadSJonathan Adams * the lwpdata was allocated from segkp, we must set t_swap to point 237d32efdadSJonathan Adams * to it ourselves. 238d32efdadSJonathan Adams * 239d32efdadSJonathan Adams * This would be less confusing if t_swap had a better name; it really 240d32efdadSJonathan Adams * indicates that the stack is allocated from segkp, regardless of 241d32efdadSJonathan Adams * whether or not it is swappable. 242d32efdadSJonathan Adams */ 243d32efdadSJonathan Adams if (lwpdata != NULL) { 244d32efdadSJonathan Adams ASSERT(!CLASS_KERNEL(cid)); 245d32efdadSJonathan Adams ASSERT(t->t_swap == NULL); 2467c478bd9Sstevel@tonic-gate t->t_swap = lwpdata; /* Start of page-able data */ 247d32efdadSJonathan Adams } 248d32efdadSJonathan Adams 249d32efdadSJonathan Adams /* 250d32efdadSJonathan Adams * If the stack and lwp can be reused, mark the thread as such. 251d32efdadSJonathan Adams * When we get to reapq_add() from resume_from_zombie(), these 252d32efdadSJonathan Adams * threads will go onto lwp_deathrow instead of thread_deathrow. 253d32efdadSJonathan Adams */ 254d32efdadSJonathan Adams if (!CLASS_KERNEL(cid) && stksize == lwp_default_stksize) 255d32efdadSJonathan Adams t->t_flag |= T_LWPREUSE; 256d32efdadSJonathan Adams 2577c478bd9Sstevel@tonic-gate if (lwp == NULL) 2587c478bd9Sstevel@tonic-gate lwp = kmem_cache_alloc(lwp_cache, KM_SLEEP); 2597c478bd9Sstevel@tonic-gate bzero(lwp, sizeof (*lwp)); 2607c478bd9Sstevel@tonic-gate t->t_lwp = lwp; 2617c478bd9Sstevel@tonic-gate 2627c478bd9Sstevel@tonic-gate t->t_hold = *smask; 2637c478bd9Sstevel@tonic-gate lwp->lwp_thread = t; 2647c478bd9Sstevel@tonic-gate lwp->lwp_procp = p; 2657c478bd9Sstevel@tonic-gate lwp->lwp_sigaltstack.ss_flags = SS_DISABLE; 2667c478bd9Sstevel@tonic-gate if (curlwp != NULL && curlwp->lwp_childstksz != 0) 2677c478bd9Sstevel@tonic-gate lwp->lwp_childstksz = curlwp->lwp_childstksz; 2687c478bd9Sstevel@tonic-gate 2697c478bd9Sstevel@tonic-gate t->t_stk = lwp_stk_init(lwp, t->t_stk); 2707c478bd9Sstevel@tonic-gate thread_load(t, proc, arg, len); 2717c478bd9Sstevel@tonic-gate 2727c478bd9Sstevel@tonic-gate /* 2737c478bd9Sstevel@tonic-gate * Allocate the SIGPROF buffer if ITIMER_REALPROF is in effect. 2747c478bd9Sstevel@tonic-gate */ 275e0cf54a5SRoger A. Faulkner if (p->p_rprof_cyclic != CYCLIC_NONE) 2767c478bd9Sstevel@tonic-gate t->t_rprof = kmem_zalloc(sizeof (struct rprof), KM_SLEEP); 2777c478bd9Sstevel@tonic-gate 2787c478bd9Sstevel@tonic-gate if (cid != NOCLASS) 2797c478bd9Sstevel@tonic-gate (void) CL_ALLOC(&bufp, cid, KM_SLEEP); 2807c478bd9Sstevel@tonic-gate 2817c478bd9Sstevel@tonic-gate /* 2827c478bd9Sstevel@tonic-gate * Allocate an lwp directory entry for the new lwp. 2837c478bd9Sstevel@tonic-gate */ 2847c478bd9Sstevel@tonic-gate lep = kmem_zalloc(sizeof (*lep), KM_SLEEP); 2857c478bd9Sstevel@tonic-gate 2867c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 2877c478bd9Sstevel@tonic-gate grow: 2887c478bd9Sstevel@tonic-gate /* 2897c478bd9Sstevel@tonic-gate * Grow the lwp (thread) directory and lwpid hash table if necessary. 2907c478bd9Sstevel@tonic-gate * A note on the growth algorithm: 2917c478bd9Sstevel@tonic-gate * The new lwp directory size is computed as: 2927c478bd9Sstevel@tonic-gate * new = 2 * old + 2 2937c478bd9Sstevel@tonic-gate * Starting with an initial size of 2 (see exec_common()), 2947c478bd9Sstevel@tonic-gate * this yields numbers that are a power of two minus 2: 2957c478bd9Sstevel@tonic-gate * 2, 6, 14, 30, 62, 126, 254, 510, 1022, ... 2967c478bd9Sstevel@tonic-gate * The size of the lwpid hash table must be a power of two 2977c478bd9Sstevel@tonic-gate * and must be commensurate in size with the lwp directory 2987c478bd9Sstevel@tonic-gate * so that hash bucket chains remain short. Therefore, 2997c478bd9Sstevel@tonic-gate * the lwpid hash table size is computed as: 3007c478bd9Sstevel@tonic-gate * hashsz = (dirsz + 2) / 2 3017c478bd9Sstevel@tonic-gate * which leads to these hash table sizes corresponding to 3027c478bd9Sstevel@tonic-gate * the above directory sizes: 3037c478bd9Sstevel@tonic-gate * 2, 4, 8, 16, 32, 64, 128, 256, 512, ... 3046eb30ec3SRoger A. Faulkner * A note on growing the hash table: 3056eb30ec3SRoger A. Faulkner * For performance reasons, code in lwp_unpark() does not 3066eb30ec3SRoger A. Faulkner * acquire curproc->p_lock when searching the hash table. 3076eb30ec3SRoger A. Faulkner * Rather, it calls lwp_hash_lookup_and_lock() which 3086eb30ec3SRoger A. Faulkner * acquires only the individual hash bucket lock, taking 3096eb30ec3SRoger A. Faulkner * care to deal with reallocation of the hash table 3106eb30ec3SRoger A. Faulkner * during the time it takes to acquire the lock. 3116eb30ec3SRoger A. Faulkner * 3126eb30ec3SRoger A. Faulkner * This is sufficient to protect the integrity of the 3136eb30ec3SRoger A. Faulkner * hash table, but it requires us to acquire all of the 3146eb30ec3SRoger A. Faulkner * old hash bucket locks before growing the hash table 3156eb30ec3SRoger A. Faulkner * and to release them afterwards. It also requires us 3166eb30ec3SRoger A. Faulkner * not to free the old hash table because some thread 3176eb30ec3SRoger A. Faulkner * in lwp_hash_lookup_and_lock() might still be trying 3186eb30ec3SRoger A. Faulkner * to acquire the old bucket lock. 3196eb30ec3SRoger A. Faulkner * 3206eb30ec3SRoger A. Faulkner * So we adopt the tactic of keeping all of the retired 3216eb30ec3SRoger A. Faulkner * hash tables on a linked list, so they can be safely 3226eb30ec3SRoger A. Faulkner * freed when the process exits or execs. 3236eb30ec3SRoger A. Faulkner * 3246eb30ec3SRoger A. Faulkner * Because the hash table grows in powers of two, the 3256eb30ec3SRoger A. Faulkner * total size of all of the hash tables will be slightly 3266eb30ec3SRoger A. Faulkner * less than twice the size of the largest hash table. 3277c478bd9Sstevel@tonic-gate */ 3287c478bd9Sstevel@tonic-gate while (p->p_lwpfree == NULL) { 3297c478bd9Sstevel@tonic-gate uint_t dirsz = p->p_lwpdir_sz; 3307c478bd9Sstevel@tonic-gate lwpdir_t *new_dir; 3316eb30ec3SRoger A. Faulkner uint_t new_dirsz; 3327c478bd9Sstevel@tonic-gate lwpdir_t *ldp; 3336eb30ec3SRoger A. Faulkner tidhash_t *new_hash; 3346eb30ec3SRoger A. Faulkner uint_t new_hashsz; 3357c478bd9Sstevel@tonic-gate 3367c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 3377c478bd9Sstevel@tonic-gate 3386eb30ec3SRoger A. Faulkner /* 3396eb30ec3SRoger A. Faulkner * Prepare to remember the old p_tidhash for later 3406eb30ec3SRoger A. Faulkner * kmem_free()ing when the process exits or execs. 3416eb30ec3SRoger A. Faulkner */ 3426eb30ec3SRoger A. Faulkner if (ret_tidhash == NULL) 3436eb30ec3SRoger A. Faulkner ret_tidhash = kmem_zalloc(sizeof (ret_tidhash_t), 3446eb30ec3SRoger A. Faulkner KM_SLEEP); 3456eb30ec3SRoger A. Faulkner if (old_dir != NULL) 3467c478bd9Sstevel@tonic-gate kmem_free(old_dir, old_dirsz * sizeof (*old_dir)); 3476eb30ec3SRoger A. Faulkner if (old_hash != NULL) 3487c478bd9Sstevel@tonic-gate kmem_free(old_hash, old_hashsz * sizeof (*old_hash)); 3496eb30ec3SRoger A. Faulkner 3507c478bd9Sstevel@tonic-gate new_dirsz = 2 * dirsz + 2; 3517c478bd9Sstevel@tonic-gate new_dir = kmem_zalloc(new_dirsz * sizeof (lwpdir_t), KM_SLEEP); 3527c478bd9Sstevel@tonic-gate for (ldp = new_dir, i = 1; i < new_dirsz; i++, ldp++) 3537c478bd9Sstevel@tonic-gate ldp->ld_next = ldp + 1; 3547c478bd9Sstevel@tonic-gate new_hashsz = (new_dirsz + 2) / 2; 3556eb30ec3SRoger A. Faulkner new_hash = kmem_zalloc(new_hashsz * sizeof (tidhash_t), 3567c478bd9Sstevel@tonic-gate KM_SLEEP); 3577c478bd9Sstevel@tonic-gate 3587c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 3597c478bd9Sstevel@tonic-gate if (p == curproc) 3607c478bd9Sstevel@tonic-gate prbarrier(p); 3617c478bd9Sstevel@tonic-gate 3627c478bd9Sstevel@tonic-gate if (dirsz != p->p_lwpdir_sz || p->p_lwpfree != NULL) { 3637c478bd9Sstevel@tonic-gate /* 3647c478bd9Sstevel@tonic-gate * Someone else beat us to it or some lwp exited. 3657c478bd9Sstevel@tonic-gate * Set up to free our memory and take a lap. 3667c478bd9Sstevel@tonic-gate */ 3677c478bd9Sstevel@tonic-gate old_dir = new_dir; 3687c478bd9Sstevel@tonic-gate old_dirsz = new_dirsz; 3697c478bd9Sstevel@tonic-gate old_hash = new_hash; 3707c478bd9Sstevel@tonic-gate old_hashsz = new_hashsz; 3717c478bd9Sstevel@tonic-gate } else { 3726eb30ec3SRoger A. Faulkner /* 3736eb30ec3SRoger A. Faulkner * For the benefit of lwp_hash_lookup_and_lock(), 3746eb30ec3SRoger A. Faulkner * called from lwp_unpark(), which searches the 3756eb30ec3SRoger A. Faulkner * tid hash table without acquiring p->p_lock, 3766eb30ec3SRoger A. Faulkner * we must acquire all of the tid hash table 3776eb30ec3SRoger A. Faulkner * locks before replacing p->p_tidhash. 3786eb30ec3SRoger A. Faulkner */ 3797c478bd9Sstevel@tonic-gate old_hash = p->p_tidhash; 3807c478bd9Sstevel@tonic-gate old_hashsz = p->p_tidhash_sz; 3816eb30ec3SRoger A. Faulkner for (i = 0; i < old_hashsz; i++) { 3826eb30ec3SRoger A. Faulkner mutex_enter(&old_hash[i].th_lock); 3836eb30ec3SRoger A. Faulkner mutex_enter(&new_hash[i].th_lock); 3846eb30ec3SRoger A. Faulkner } 3856eb30ec3SRoger A. Faulkner 3867c478bd9Sstevel@tonic-gate /* 3877c478bd9Sstevel@tonic-gate * We simply hash in all of the old directory entries. 3887c478bd9Sstevel@tonic-gate * This works because the old directory has no empty 3897c478bd9Sstevel@tonic-gate * slots and the new hash table starts out empty. 3907c478bd9Sstevel@tonic-gate * This reproduces the original directory ordering 3917c478bd9Sstevel@tonic-gate * (required for /proc directory semantics). 3927c478bd9Sstevel@tonic-gate */ 3936eb30ec3SRoger A. Faulkner old_dir = p->p_lwpdir; 3946eb30ec3SRoger A. Faulkner old_dirsz = p->p_lwpdir_sz; 3956eb30ec3SRoger A. Faulkner p->p_lwpdir = new_dir; 3966eb30ec3SRoger A. Faulkner p->p_lwpfree = new_dir; 3976eb30ec3SRoger A. Faulkner p->p_lwpdir_sz = new_dirsz; 3986eb30ec3SRoger A. Faulkner for (ldp = old_dir, i = 0; i < old_dirsz; i++, ldp++) 3996eb30ec3SRoger A. Faulkner lwp_hash_in(p, ldp->ld_entry, 4006eb30ec3SRoger A. Faulkner new_hash, new_hashsz, 0); 4016eb30ec3SRoger A. Faulkner 4027c478bd9Sstevel@tonic-gate /* 4036eb30ec3SRoger A. Faulkner * Remember the old hash table along with all 4046eb30ec3SRoger A. Faulkner * of the previously-remembered hash tables. 4056eb30ec3SRoger A. Faulkner * We will free them at process exit or exec. 4067c478bd9Sstevel@tonic-gate */ 4076eb30ec3SRoger A. Faulkner ret_tidhash->rth_tidhash = old_hash; 4086eb30ec3SRoger A. Faulkner ret_tidhash->rth_tidhash_sz = old_hashsz; 4096eb30ec3SRoger A. Faulkner ret_tidhash->rth_next = p->p_ret_tidhash; 4106eb30ec3SRoger A. Faulkner p->p_ret_tidhash = ret_tidhash; 4116eb30ec3SRoger A. Faulkner 4126eb30ec3SRoger A. Faulkner /* 4136eb30ec3SRoger A. Faulkner * Now establish the new tid hash table. 4146eb30ec3SRoger A. Faulkner * As soon as we assign p->p_tidhash, 4156eb30ec3SRoger A. Faulkner * code in lwp_unpark() can start using it. 4166eb30ec3SRoger A. Faulkner */ 4176eb30ec3SRoger A. Faulkner membar_producer(); 4186eb30ec3SRoger A. Faulkner p->p_tidhash = new_hash; 4196eb30ec3SRoger A. Faulkner 4206eb30ec3SRoger A. Faulkner /* 4216eb30ec3SRoger A. Faulkner * It is necessary that p_tidhash reach global 4226eb30ec3SRoger A. Faulkner * visibility before p_tidhash_sz. Otherwise, 4236eb30ec3SRoger A. Faulkner * code in lwp_hash_lookup_and_lock() could 4246eb30ec3SRoger A. Faulkner * index into the old p_tidhash using the new 4256eb30ec3SRoger A. Faulkner * p_tidhash_sz and thereby access invalid data. 4266eb30ec3SRoger A. Faulkner */ 4276eb30ec3SRoger A. Faulkner membar_producer(); 4286eb30ec3SRoger A. Faulkner p->p_tidhash_sz = new_hashsz; 4296eb30ec3SRoger A. Faulkner 4306eb30ec3SRoger A. Faulkner /* 4316eb30ec3SRoger A. Faulkner * Release the locks; allow lwp_unpark() to carry on. 4326eb30ec3SRoger A. Faulkner */ 4336eb30ec3SRoger A. Faulkner for (i = 0; i < old_hashsz; i++) { 4346eb30ec3SRoger A. Faulkner mutex_exit(&old_hash[i].th_lock); 4356eb30ec3SRoger A. Faulkner mutex_exit(&new_hash[i].th_lock); 4366eb30ec3SRoger A. Faulkner } 4376eb30ec3SRoger A. Faulkner 4386eb30ec3SRoger A. Faulkner /* 4396eb30ec3SRoger A. Faulkner * Avoid freeing these objects below. 4406eb30ec3SRoger A. Faulkner */ 4416eb30ec3SRoger A. Faulkner ret_tidhash = NULL; 4426eb30ec3SRoger A. Faulkner old_hash = NULL; 4436eb30ec3SRoger A. Faulkner old_hashsz = 0; 4447c478bd9Sstevel@tonic-gate } 4457c478bd9Sstevel@tonic-gate } 4467c478bd9Sstevel@tonic-gate 4477c478bd9Sstevel@tonic-gate /* 4487c478bd9Sstevel@tonic-gate * Block the process against /proc while we manipulate p->p_tlist, 4497c478bd9Sstevel@tonic-gate * unless lwp_create() was called by /proc for the PCAGENT operation. 4507c478bd9Sstevel@tonic-gate * We want to do this early enough so that we don't drop p->p_lock 4517c478bd9Sstevel@tonic-gate * until the thread is put on the p->p_tlist. 4527c478bd9Sstevel@tonic-gate */ 4537c478bd9Sstevel@tonic-gate if (p == curproc) { 4547c478bd9Sstevel@tonic-gate prbarrier(p); 4557c478bd9Sstevel@tonic-gate /* 4567c478bd9Sstevel@tonic-gate * If the current lwp has been requested to stop, do so now. 4577c478bd9Sstevel@tonic-gate * Otherwise we have a race condition between /proc attempting 4587c478bd9Sstevel@tonic-gate * to stop the process and this thread creating a new lwp 4597c478bd9Sstevel@tonic-gate * that was not seen when the /proc PCSTOP request was issued. 4607c478bd9Sstevel@tonic-gate * We rely on stop() to call prbarrier(p) before returning. 4617c478bd9Sstevel@tonic-gate */ 4627c478bd9Sstevel@tonic-gate while ((curthread->t_proc_flag & TP_PRSTOP) && 463936e3a33SGangadhar Mylapuram !ttolwp(curthread)->lwp_nostop) { 464936e3a33SGangadhar Mylapuram /* 465936e3a33SGangadhar Mylapuram * We called pool_barrier_enter() before calling 466936e3a33SGangadhar Mylapuram * here to lwp_create(). We have to call 467936e3a33SGangadhar Mylapuram * pool_barrier_exit() before stopping. 468936e3a33SGangadhar Mylapuram */ 469936e3a33SGangadhar Mylapuram pool_barrier_exit(); 470936e3a33SGangadhar Mylapuram prbarrier(p); 4717c478bd9Sstevel@tonic-gate stop(PR_REQUESTED, 0); 472936e3a33SGangadhar Mylapuram /* 473936e3a33SGangadhar Mylapuram * And we have to repeat the call to 474936e3a33SGangadhar Mylapuram * pool_barrier_enter after stopping. 475936e3a33SGangadhar Mylapuram */ 476936e3a33SGangadhar Mylapuram pool_barrier_enter(); 477936e3a33SGangadhar Mylapuram prbarrier(p); 478936e3a33SGangadhar Mylapuram } 4797c478bd9Sstevel@tonic-gate 4807c478bd9Sstevel@tonic-gate /* 4817c478bd9Sstevel@tonic-gate * If process is exiting, there could be a race between 4827c478bd9Sstevel@tonic-gate * the agent lwp creation and the new lwp currently being 4837c478bd9Sstevel@tonic-gate * created. So to prevent this race lwp creation is failed 4847c478bd9Sstevel@tonic-gate * if the process is exiting. 4857c478bd9Sstevel@tonic-gate */ 4867c478bd9Sstevel@tonic-gate if (p->p_flag & (SEXITLWPS|SKILLED)) { 4877c478bd9Sstevel@tonic-gate err = 1; 4887c478bd9Sstevel@tonic-gate goto error; 4897c478bd9Sstevel@tonic-gate } 4907c478bd9Sstevel@tonic-gate 4917c478bd9Sstevel@tonic-gate /* 4927c478bd9Sstevel@tonic-gate * Since we might have dropped p->p_lock, the 4937c478bd9Sstevel@tonic-gate * lwp directory free list might have changed. 4947c478bd9Sstevel@tonic-gate */ 4957c478bd9Sstevel@tonic-gate if (p->p_lwpfree == NULL) 4967c478bd9Sstevel@tonic-gate goto grow; 4977c478bd9Sstevel@tonic-gate } 4987c478bd9Sstevel@tonic-gate 4997c478bd9Sstevel@tonic-gate kpreempt_disable(); /* can't grab cpu_lock here */ 5007c478bd9Sstevel@tonic-gate 5017c478bd9Sstevel@tonic-gate /* 50235a5a358SJonathan Adams * Inherit processor and processor set bindings from curthread. 50335a5a358SJonathan Adams * 50435a5a358SJonathan Adams * For kernel LWPs, we do not inherit processor set bindings at 50535a5a358SJonathan Adams * process creation time (i.e. when p != curproc). After the 50635a5a358SJonathan Adams * kernel process is created, any subsequent LWPs must be created 50735a5a358SJonathan Adams * by threads in the kernel process, at which point we *will* 50835a5a358SJonathan Adams * inherit processor set bindings. 5097c478bd9Sstevel@tonic-gate */ 51035a5a358SJonathan Adams if (CLASS_KERNEL(cid) && p != curproc) { 5117c478bd9Sstevel@tonic-gate t->t_bind_cpu = binding = PBIND_NONE; 5127c478bd9Sstevel@tonic-gate t->t_cpupart = oldpart = &cp_default; 5137c478bd9Sstevel@tonic-gate t->t_bind_pset = PS_NONE; 5140b70c467Sakolb t->t_bindflag = (uchar_t)default_binding_mode; 5157c478bd9Sstevel@tonic-gate } else { 5167c478bd9Sstevel@tonic-gate binding = curthread->t_bind_cpu; 5177c478bd9Sstevel@tonic-gate t->t_bind_cpu = binding; 5187c478bd9Sstevel@tonic-gate oldpart = t->t_cpupart; 5197c478bd9Sstevel@tonic-gate t->t_cpupart = curthread->t_cpupart; 5207c478bd9Sstevel@tonic-gate t->t_bind_pset = curthread->t_bind_pset; 5210b70c467Sakolb t->t_bindflag = curthread->t_bindflag | 5220b70c467Sakolb (uchar_t)default_binding_mode; 5237c478bd9Sstevel@tonic-gate } 5247c478bd9Sstevel@tonic-gate 5257c478bd9Sstevel@tonic-gate /* 5267c478bd9Sstevel@tonic-gate * thread_create() initializes this thread's home lgroup to the root. 5277c478bd9Sstevel@tonic-gate * Choose a more suitable lgroup, since this thread is associated 5287c478bd9Sstevel@tonic-gate * with an lwp. 5297c478bd9Sstevel@tonic-gate */ 5307c478bd9Sstevel@tonic-gate ASSERT(oldpart != NULL); 5317c478bd9Sstevel@tonic-gate if (binding != PBIND_NONE && t->t_affinitycnt == 0) { 5327c478bd9Sstevel@tonic-gate t->t_bound_cpu = cpu[binding]; 5337c478bd9Sstevel@tonic-gate if (t->t_lpl != t->t_bound_cpu->cpu_lpl) 5347c478bd9Sstevel@tonic-gate lgrp_move_thread(t, t->t_bound_cpu->cpu_lpl, 1); 535c2e5330eSJonathan Adams } else if (CLASS_KERNEL(cid)) { 536c2e5330eSJonathan Adams /* 537aab2fe41SJonathan Adams * Kernel threads are always in the root lgrp. 538c2e5330eSJonathan Adams */ 539c2e5330eSJonathan Adams lgrp_move_thread(t, 540aab2fe41SJonathan Adams &t->t_cpupart->cp_lgrploads[LGRP_ROOTID], 1); 5417c478bd9Sstevel@tonic-gate } else { 5427c478bd9Sstevel@tonic-gate lgrp_move_thread(t, lgrp_choose(t, t->t_cpupart), 1); 5437c478bd9Sstevel@tonic-gate } 5447c478bd9Sstevel@tonic-gate 5457c478bd9Sstevel@tonic-gate kpreempt_enable(); 5467c478bd9Sstevel@tonic-gate 5477c478bd9Sstevel@tonic-gate /* 5487c478bd9Sstevel@tonic-gate * make sure lpl points to our own partition 5497c478bd9Sstevel@tonic-gate */ 5507c478bd9Sstevel@tonic-gate ASSERT(t->t_lpl >= t->t_cpupart->cp_lgrploads); 5517c478bd9Sstevel@tonic-gate ASSERT(t->t_lpl < t->t_cpupart->cp_lgrploads + 5527c478bd9Sstevel@tonic-gate t->t_cpupart->cp_nlgrploads); 5537c478bd9Sstevel@tonic-gate 5547c478bd9Sstevel@tonic-gate /* 5557c478bd9Sstevel@tonic-gate * It is safe to point the thread to the new project without holding it 5567c478bd9Sstevel@tonic-gate * since we're holding the target process' p_lock here and therefore 5577c478bd9Sstevel@tonic-gate * we're guaranteed that it will not move to another project. 5587c478bd9Sstevel@tonic-gate */ 559aab2fe41SJonathan Adams newkpj = p->p_task->tk_proj; 5607c478bd9Sstevel@tonic-gate oldkpj = ttoproj(t); 5617c478bd9Sstevel@tonic-gate if (newkpj != oldkpj) { 5627c478bd9Sstevel@tonic-gate t->t_proj = newkpj; 5637c478bd9Sstevel@tonic-gate (void) project_hold(newkpj); 5647c478bd9Sstevel@tonic-gate project_rele(oldkpj); 5657c478bd9Sstevel@tonic-gate } 5667c478bd9Sstevel@tonic-gate 5677c478bd9Sstevel@tonic-gate if (cid != NOCLASS) { 5687c478bd9Sstevel@tonic-gate /* 5697c478bd9Sstevel@tonic-gate * If the lwp is being created in the current process 5707c478bd9Sstevel@tonic-gate * and matches the current thread's scheduling class, 5717c478bd9Sstevel@tonic-gate * we should propagate the current thread's scheduling 5727c478bd9Sstevel@tonic-gate * parameters by calling CL_FORK. Otherwise just use 5737c478bd9Sstevel@tonic-gate * the defaults by calling CL_ENTERCLASS. 5747c478bd9Sstevel@tonic-gate */ 5757c478bd9Sstevel@tonic-gate if (p != curproc || curthread->t_cid != cid) { 5767c478bd9Sstevel@tonic-gate err = CL_ENTERCLASS(t, cid, NULL, NULL, bufp); 5777c478bd9Sstevel@tonic-gate t->t_pri = pri; /* CL_ENTERCLASS may have changed it */ 578d4204c85Sraf /* 579d4204c85Sraf * We don't call schedctl_set_cidpri(t) here 580d4204c85Sraf * because the schedctl data is not yet set 581d4204c85Sraf * up for the newly-created lwp. 582d4204c85Sraf */ 5837c478bd9Sstevel@tonic-gate } else { 5847c478bd9Sstevel@tonic-gate t->t_clfuncs = &(sclass[cid].cl_funcs->thread); 5857c478bd9Sstevel@tonic-gate err = CL_FORK(curthread, t, bufp); 5867c478bd9Sstevel@tonic-gate t->t_cid = cid; 5877c478bd9Sstevel@tonic-gate } 588*2dc692e0SJerry Jelinek if (err) { 589*2dc692e0SJerry Jelinek atomic_inc_32(&p->p_zone->zone_ffmisc); 5907c478bd9Sstevel@tonic-gate goto error; 591*2dc692e0SJerry Jelinek } else { 5927c478bd9Sstevel@tonic-gate bufp = NULL; 5937c478bd9Sstevel@tonic-gate } 594*2dc692e0SJerry Jelinek } 5957c478bd9Sstevel@tonic-gate 5967c478bd9Sstevel@tonic-gate /* 5977c478bd9Sstevel@tonic-gate * If we were given an lwpid then use it, else allocate one. 5987c478bd9Sstevel@tonic-gate */ 5997c478bd9Sstevel@tonic-gate if (lwpid != 0) 6007c478bd9Sstevel@tonic-gate t->t_tid = lwpid; 6017c478bd9Sstevel@tonic-gate else { 6027c478bd9Sstevel@tonic-gate /* 6037c478bd9Sstevel@tonic-gate * lwp/thread id 0 is never valid; reserved for special checks. 6047c478bd9Sstevel@tonic-gate * lwp/thread id 1 is reserved for the main thread. 6057c478bd9Sstevel@tonic-gate * Start again at 2 when INT_MAX has been reached 6067c478bd9Sstevel@tonic-gate * (id_t is a signed 32-bit integer). 6077c478bd9Sstevel@tonic-gate */ 6087c478bd9Sstevel@tonic-gate id_t prev_id = p->p_lwpid; /* last allocated tid */ 6097c478bd9Sstevel@tonic-gate 6107c478bd9Sstevel@tonic-gate do { /* avoid lwpid duplication */ 6117c478bd9Sstevel@tonic-gate if (p->p_lwpid == INT_MAX) { 6127c478bd9Sstevel@tonic-gate p->p_flag |= SLWPWRAP; 6137c478bd9Sstevel@tonic-gate p->p_lwpid = 1; 6147c478bd9Sstevel@tonic-gate } 6157c478bd9Sstevel@tonic-gate if ((t->t_tid = ++p->p_lwpid) == prev_id) { 6167c478bd9Sstevel@tonic-gate /* 6177c478bd9Sstevel@tonic-gate * All lwpids are allocated; fail the request. 6187c478bd9Sstevel@tonic-gate */ 6197c478bd9Sstevel@tonic-gate err = 1; 620*2dc692e0SJerry Jelinek atomic_inc_32(&p->p_zone->zone_ffnoproc); 6217c478bd9Sstevel@tonic-gate goto error; 6227c478bd9Sstevel@tonic-gate } 6237c478bd9Sstevel@tonic-gate /* 6247c478bd9Sstevel@tonic-gate * We only need to worry about colliding with an id 6257c478bd9Sstevel@tonic-gate * that's already in use if this process has 6267c478bd9Sstevel@tonic-gate * cycled through all available lwp ids. 6277c478bd9Sstevel@tonic-gate */ 6287c478bd9Sstevel@tonic-gate if ((p->p_flag & SLWPWRAP) == 0) 6297c478bd9Sstevel@tonic-gate break; 6307c478bd9Sstevel@tonic-gate } while (lwp_hash_lookup(p, t->t_tid) != NULL); 6317c478bd9Sstevel@tonic-gate } 6329acbbeafSnn35248 6339acbbeafSnn35248 /* 6349acbbeafSnn35248 * If this is a branded process, let the brand do any necessary lwp 6359acbbeafSnn35248 * initialization. 6369acbbeafSnn35248 */ 6379acbbeafSnn35248 if (PROC_IS_BRANDED(p)) { 6389acbbeafSnn35248 if (BROP(p)->b_initlwp(lwp)) { 6399acbbeafSnn35248 err = 1; 640*2dc692e0SJerry Jelinek atomic_inc_32(&p->p_zone->zone_ffmisc); 6419acbbeafSnn35248 goto error; 6429acbbeafSnn35248 } 6439acbbeafSnn35248 branded = 1; 6449acbbeafSnn35248 } 6459acbbeafSnn35248 6462cb27123Saguzovsk if (t->t_tid == 1) { 6472cb27123Saguzovsk kpreempt_disable(); 6482cb27123Saguzovsk ASSERT(t->t_lpl != NULL); 6492cb27123Saguzovsk p->p_t1_lgrpid = t->t_lpl->lpl_lgrpid; 6502cb27123Saguzovsk kpreempt_enable(); 6512cb27123Saguzovsk if (p->p_tr_lgrpid != LGRP_NONE && 6522cb27123Saguzovsk p->p_tr_lgrpid != p->p_t1_lgrpid) { 6532cb27123Saguzovsk lgrp_update_trthr_migrations(1); 6542cb27123Saguzovsk } 6552cb27123Saguzovsk } 6562cb27123Saguzovsk 6577c478bd9Sstevel@tonic-gate p->p_lwpcnt++; 6587c478bd9Sstevel@tonic-gate t->t_waitfor = -1; 6597c478bd9Sstevel@tonic-gate 6607c478bd9Sstevel@tonic-gate /* 6617c478bd9Sstevel@tonic-gate * Turn microstate accounting on for thread if on for process. 6627c478bd9Sstevel@tonic-gate */ 6637c478bd9Sstevel@tonic-gate if (p->p_flag & SMSACCT) 6647c478bd9Sstevel@tonic-gate t->t_proc_flag |= TP_MSACCT; 6657c478bd9Sstevel@tonic-gate 6667c478bd9Sstevel@tonic-gate /* 6677c478bd9Sstevel@tonic-gate * If the process has watchpoints, mark the new thread as such. 6687c478bd9Sstevel@tonic-gate */ 6697c478bd9Sstevel@tonic-gate if (pr_watch_active(p)) 6707c478bd9Sstevel@tonic-gate watch_enable(t); 6717c478bd9Sstevel@tonic-gate 6727c478bd9Sstevel@tonic-gate /* 6737c478bd9Sstevel@tonic-gate * The lwp is being created in the stopped state. 6747c478bd9Sstevel@tonic-gate * We set all the necessary flags to indicate that fact here. 6757c478bd9Sstevel@tonic-gate * We omit the TS_CREATE flag from t_schedflag so that the lwp 6767c478bd9Sstevel@tonic-gate * cannot be set running until the caller is finished with it, 6777c478bd9Sstevel@tonic-gate * even if lwp_continue() is called on it after we drop p->p_lock. 6787c478bd9Sstevel@tonic-gate * When the caller is finished with the newly-created lwp, 6797c478bd9Sstevel@tonic-gate * the caller must call lwp_create_done() to allow the lwp 6807c478bd9Sstevel@tonic-gate * to be set running. If the TP_HOLDLWP is left set, the 6817c478bd9Sstevel@tonic-gate * lwp will suspend itself after reaching system call exit. 6827c478bd9Sstevel@tonic-gate */ 6837c478bd9Sstevel@tonic-gate init_mstate(t, LMS_STOPPED); 6847c478bd9Sstevel@tonic-gate t->t_proc_flag |= TP_HOLDLWP; 6857c478bd9Sstevel@tonic-gate t->t_schedflag |= (TS_ALLSTART & ~(TS_CSTART | TS_CREATE)); 6867c478bd9Sstevel@tonic-gate t->t_whystop = PR_SUSPENDED; 6877c478bd9Sstevel@tonic-gate t->t_whatstop = SUSPEND_NORMAL; 6887c478bd9Sstevel@tonic-gate t->t_sig_check = 1; /* ensure that TP_HOLDLWP is honored */ 6897c478bd9Sstevel@tonic-gate 6907c478bd9Sstevel@tonic-gate /* 6917c478bd9Sstevel@tonic-gate * Set system call processing flags in case tracing or profiling 6927c478bd9Sstevel@tonic-gate * is set. The first system call will evaluate these and turn 6937c478bd9Sstevel@tonic-gate * them off if they aren't needed. 6947c478bd9Sstevel@tonic-gate */ 6957c478bd9Sstevel@tonic-gate t->t_pre_sys = 1; 6967c478bd9Sstevel@tonic-gate t->t_post_sys = 1; 6977c478bd9Sstevel@tonic-gate 6987c478bd9Sstevel@tonic-gate /* 6997c478bd9Sstevel@tonic-gate * Insert the new thread into the list of all threads. 7007c478bd9Sstevel@tonic-gate */ 7017c478bd9Sstevel@tonic-gate if ((tx = p->p_tlist) == NULL) { 7027c478bd9Sstevel@tonic-gate t->t_back = t; 7037c478bd9Sstevel@tonic-gate t->t_forw = t; 7047c478bd9Sstevel@tonic-gate p->p_tlist = t; 7057c478bd9Sstevel@tonic-gate } else { 7067c478bd9Sstevel@tonic-gate t->t_forw = tx; 7077c478bd9Sstevel@tonic-gate t->t_back = tx->t_back; 7087c478bd9Sstevel@tonic-gate tx->t_back->t_forw = t; 7097c478bd9Sstevel@tonic-gate tx->t_back = t; 7107c478bd9Sstevel@tonic-gate } 7117c478bd9Sstevel@tonic-gate 7127c478bd9Sstevel@tonic-gate /* 7137c478bd9Sstevel@tonic-gate * Insert the new lwp into an lwp directory slot position 7147c478bd9Sstevel@tonic-gate * and into the lwpid hash table. 7157c478bd9Sstevel@tonic-gate */ 7167c478bd9Sstevel@tonic-gate lep->le_thread = t; 7177c478bd9Sstevel@tonic-gate lep->le_lwpid = t->t_tid; 7187c478bd9Sstevel@tonic-gate lep->le_start = t->t_start; 7196eb30ec3SRoger A. Faulkner lwp_hash_in(p, lep, p->p_tidhash, p->p_tidhash_sz, 1); 7207c478bd9Sstevel@tonic-gate 7217c478bd9Sstevel@tonic-gate if (state == TS_RUN) { 7227c478bd9Sstevel@tonic-gate /* 7237c478bd9Sstevel@tonic-gate * We set the new lwp running immediately. 7247c478bd9Sstevel@tonic-gate */ 7257c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_HOLDLWP; 7267c478bd9Sstevel@tonic-gate lwp_create_done(t); 7277c478bd9Sstevel@tonic-gate } 7287c478bd9Sstevel@tonic-gate 7297c478bd9Sstevel@tonic-gate error: 7307c478bd9Sstevel@tonic-gate if (err) { 73135a5a358SJonathan Adams if (CLASS_KERNEL(cid)) { 73235a5a358SJonathan Adams /* 73335a5a358SJonathan Adams * This should only happen if a system process runs 73435a5a358SJonathan Adams * out of lwpids, which shouldn't occur. 73535a5a358SJonathan Adams */ 73635a5a358SJonathan Adams panic("Failed to create a system LWP"); 73735a5a358SJonathan Adams } 7387c478bd9Sstevel@tonic-gate /* 7397c478bd9Sstevel@tonic-gate * We have failed to create an lwp, so decrement the number 7407c478bd9Sstevel@tonic-gate * of lwps in the task and let the lgroup load averages know 7417c478bd9Sstevel@tonic-gate * that this thread isn't going to show up. 7427c478bd9Sstevel@tonic-gate */ 7437c478bd9Sstevel@tonic-gate kpreempt_disable(); 7447c478bd9Sstevel@tonic-gate lgrp_move_thread(t, NULL, 1); 7457c478bd9Sstevel@tonic-gate kpreempt_enable(); 7467c478bd9Sstevel@tonic-gate 7477c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 7487c478bd9Sstevel@tonic-gate mutex_enter(&p->p_zone->zone_nlwps_lock); 7497c478bd9Sstevel@tonic-gate p->p_task->tk_nlwps--; 7507c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps--; 7517c478bd9Sstevel@tonic-gate p->p_zone->zone_nlwps--; 7527c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 7537c478bd9Sstevel@tonic-gate if (cid != NOCLASS && bufp != NULL) 7547c478bd9Sstevel@tonic-gate CL_FREE(cid, bufp); 7557c478bd9Sstevel@tonic-gate 7569acbbeafSnn35248 if (branded) 7579acbbeafSnn35248 BROP(p)->b_freelwp(lwp); 7589acbbeafSnn35248 7597c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 7607c478bd9Sstevel@tonic-gate t->t_state = TS_FREE; 7617c478bd9Sstevel@tonic-gate thread_rele(t); 7627c478bd9Sstevel@tonic-gate 7637c478bd9Sstevel@tonic-gate /* 7647c478bd9Sstevel@tonic-gate * We need to remove t from the list of all threads 7657c478bd9Sstevel@tonic-gate * because thread_exit()/lwp_exit() isn't called on t. 7667c478bd9Sstevel@tonic-gate */ 7677c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 7687c478bd9Sstevel@tonic-gate ASSERT(t != t->t_next); /* t0 never exits */ 7697c478bd9Sstevel@tonic-gate t->t_next->t_prev = t->t_prev; 7707c478bd9Sstevel@tonic-gate t->t_prev->t_next = t->t_next; 7717c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 7727c478bd9Sstevel@tonic-gate 7737c478bd9Sstevel@tonic-gate thread_free(t); 7747c478bd9Sstevel@tonic-gate kmem_free(lep, sizeof (*lep)); 7757c478bd9Sstevel@tonic-gate lwp = NULL; 7767c478bd9Sstevel@tonic-gate } else { 7777c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 7787c478bd9Sstevel@tonic-gate } 7797c478bd9Sstevel@tonic-gate 7806eb30ec3SRoger A. Faulkner if (old_dir != NULL) 7817c478bd9Sstevel@tonic-gate kmem_free(old_dir, old_dirsz * sizeof (*old_dir)); 7826eb30ec3SRoger A. Faulkner if (old_hash != NULL) 7837c478bd9Sstevel@tonic-gate kmem_free(old_hash, old_hashsz * sizeof (*old_hash)); 7846eb30ec3SRoger A. Faulkner if (ret_tidhash != NULL) 7856eb30ec3SRoger A. Faulkner kmem_free(ret_tidhash, sizeof (ret_tidhash_t)); 7867c478bd9Sstevel@tonic-gate 7877c478bd9Sstevel@tonic-gate DTRACE_PROC1(lwp__create, kthread_t *, t); 7887c478bd9Sstevel@tonic-gate return (lwp); 7897c478bd9Sstevel@tonic-gate } 7907c478bd9Sstevel@tonic-gate 7917c478bd9Sstevel@tonic-gate /* 7927c478bd9Sstevel@tonic-gate * lwp_create_done() is called by the caller of lwp_create() to set the 7937c478bd9Sstevel@tonic-gate * newly-created lwp running after the caller has finished manipulating it. 7947c478bd9Sstevel@tonic-gate */ 7957c478bd9Sstevel@tonic-gate void 7967c478bd9Sstevel@tonic-gate lwp_create_done(kthread_t *t) 7977c478bd9Sstevel@tonic-gate { 7987c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 7997c478bd9Sstevel@tonic-gate 8007c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 8017c478bd9Sstevel@tonic-gate 8027c478bd9Sstevel@tonic-gate /* 8037c478bd9Sstevel@tonic-gate * We set the TS_CREATE and TS_CSTART flags and call setrun_locked(). 8047c478bd9Sstevel@tonic-gate * (The absence of the TS_CREATE flag prevents the lwp from running 8057c478bd9Sstevel@tonic-gate * until we are finished with it, even if lwp_continue() is called on 8067c478bd9Sstevel@tonic-gate * it by some other lwp in the process or elsewhere in the kernel.) 8077c478bd9Sstevel@tonic-gate */ 8087c478bd9Sstevel@tonic-gate thread_lock(t); 8097c478bd9Sstevel@tonic-gate ASSERT(t->t_state == TS_STOPPED && !(t->t_schedflag & TS_CREATE)); 8107c478bd9Sstevel@tonic-gate /* 8117c478bd9Sstevel@tonic-gate * If TS_CSTART is set, lwp_continue(t) has been called and 8127c478bd9Sstevel@tonic-gate * has already incremented p_lwprcnt; avoid doing this twice. 8137c478bd9Sstevel@tonic-gate */ 8147c478bd9Sstevel@tonic-gate if (!(t->t_schedflag & TS_CSTART)) 8157c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 8167c478bd9Sstevel@tonic-gate t->t_schedflag |= (TS_CSTART | TS_CREATE); 8177c478bd9Sstevel@tonic-gate setrun_locked(t); 8187c478bd9Sstevel@tonic-gate thread_unlock(t); 8197c478bd9Sstevel@tonic-gate } 8207c478bd9Sstevel@tonic-gate 8217c478bd9Sstevel@tonic-gate /* 8227c478bd9Sstevel@tonic-gate * Copy an LWP's active templates, and clear the latest contracts. 8237c478bd9Sstevel@tonic-gate */ 8247c478bd9Sstevel@tonic-gate void 8257c478bd9Sstevel@tonic-gate lwp_ctmpl_copy(klwp_t *dst, klwp_t *src) 8267c478bd9Sstevel@tonic-gate { 8277c478bd9Sstevel@tonic-gate int i; 8287c478bd9Sstevel@tonic-gate 8297c478bd9Sstevel@tonic-gate for (i = 0; i < ct_ntypes; i++) { 8307c478bd9Sstevel@tonic-gate dst->lwp_ct_active[i] = ctmpl_dup(src->lwp_ct_active[i]); 8317c478bd9Sstevel@tonic-gate dst->lwp_ct_latest[i] = NULL; 8327c478bd9Sstevel@tonic-gate } 8337c478bd9Sstevel@tonic-gate } 8347c478bd9Sstevel@tonic-gate 8357c478bd9Sstevel@tonic-gate /* 8367c478bd9Sstevel@tonic-gate * Clear an LWP's contract template state. 8377c478bd9Sstevel@tonic-gate */ 8387c478bd9Sstevel@tonic-gate void 8397c478bd9Sstevel@tonic-gate lwp_ctmpl_clear(klwp_t *lwp) 8407c478bd9Sstevel@tonic-gate { 8417c478bd9Sstevel@tonic-gate ct_template_t *tmpl; 8427c478bd9Sstevel@tonic-gate int i; 8437c478bd9Sstevel@tonic-gate 8447c478bd9Sstevel@tonic-gate for (i = 0; i < ct_ntypes; i++) { 8457c478bd9Sstevel@tonic-gate if ((tmpl = lwp->lwp_ct_active[i]) != NULL) { 8467c478bd9Sstevel@tonic-gate ctmpl_free(tmpl); 8477c478bd9Sstevel@tonic-gate lwp->lwp_ct_active[i] = NULL; 8487c478bd9Sstevel@tonic-gate } 8497c478bd9Sstevel@tonic-gate 8507c478bd9Sstevel@tonic-gate if (lwp->lwp_ct_latest[i] != NULL) { 8517c478bd9Sstevel@tonic-gate contract_rele(lwp->lwp_ct_latest[i]); 8527c478bd9Sstevel@tonic-gate lwp->lwp_ct_latest[i] = NULL; 8537c478bd9Sstevel@tonic-gate } 8547c478bd9Sstevel@tonic-gate } 8557c478bd9Sstevel@tonic-gate } 8567c478bd9Sstevel@tonic-gate 8577c478bd9Sstevel@tonic-gate /* 8587c478bd9Sstevel@tonic-gate * Individual lwp exit. 8597c478bd9Sstevel@tonic-gate * If this is the last lwp, exit the whole process. 8607c478bd9Sstevel@tonic-gate */ 8617c478bd9Sstevel@tonic-gate void 8627c478bd9Sstevel@tonic-gate lwp_exit(void) 8637c478bd9Sstevel@tonic-gate { 8647c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 8657c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 8667c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 8677c478bd9Sstevel@tonic-gate 8687c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 8697c478bd9Sstevel@tonic-gate 8707c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 8717c478bd9Sstevel@tonic-gate 8727c478bd9Sstevel@tonic-gate #if defined(__sparc) 8737c478bd9Sstevel@tonic-gate /* 8747c478bd9Sstevel@tonic-gate * Ensure that the user stack is fully abandoned.. 8757c478bd9Sstevel@tonic-gate */ 8767c478bd9Sstevel@tonic-gate trash_user_windows(); 8777c478bd9Sstevel@tonic-gate #endif 8787c478bd9Sstevel@tonic-gate 8797c478bd9Sstevel@tonic-gate tsd_exit(); /* free thread specific data */ 8807c478bd9Sstevel@tonic-gate 8817c478bd9Sstevel@tonic-gate kcpc_passivate(); /* Clean up performance counter state */ 8827c478bd9Sstevel@tonic-gate 8837c478bd9Sstevel@tonic-gate pollcleanup(); 8847c478bd9Sstevel@tonic-gate 8857c478bd9Sstevel@tonic-gate if (t->t_door) 8867c478bd9Sstevel@tonic-gate door_slam(); 8877c478bd9Sstevel@tonic-gate 8887c478bd9Sstevel@tonic-gate if (t->t_schedctl != NULL) 8897c478bd9Sstevel@tonic-gate schedctl_lwp_cleanup(t); 8907c478bd9Sstevel@tonic-gate 8917c478bd9Sstevel@tonic-gate if (t->t_upimutex != NULL) 8927c478bd9Sstevel@tonic-gate upimutex_cleanup(); 8937c478bd9Sstevel@tonic-gate 8949acbbeafSnn35248 /* 8959acbbeafSnn35248 * Perform any brand specific exit processing, then release any 8969acbbeafSnn35248 * brand data associated with the lwp 8979acbbeafSnn35248 */ 8989acbbeafSnn35248 if (PROC_IS_BRANDED(p)) 8999acbbeafSnn35248 BROP(p)->b_lwpexit(lwp); 9009acbbeafSnn35248 9011bc02a70SJakub Jermar lwp_pcb_exit(); 9021bc02a70SJakub Jermar 9037c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 9047c478bd9Sstevel@tonic-gate lwp_cleanup(); 9057c478bd9Sstevel@tonic-gate 9067c478bd9Sstevel@tonic-gate /* 9077c478bd9Sstevel@tonic-gate * When this process is dumping core, its lwps are held here 9087c478bd9Sstevel@tonic-gate * until the core dump is finished. Then exitlwps() is called 9097c478bd9Sstevel@tonic-gate * again to release these lwps so that they can finish exiting. 9107c478bd9Sstevel@tonic-gate */ 9117c478bd9Sstevel@tonic-gate if (p->p_flag & SCOREDUMP) 9127c478bd9Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 9137c478bd9Sstevel@tonic-gate 9147c478bd9Sstevel@tonic-gate /* 9151bc02a70SJakub Jermar * Block the process against /proc now that we have really acquired 9161bc02a70SJakub Jermar * p->p_lock (to decrement p_lwpcnt and manipulate p_tlist at least). 9171bc02a70SJakub Jermar */ 9181bc02a70SJakub Jermar prbarrier(p); 9191bc02a70SJakub Jermar 9201bc02a70SJakub Jermar /* 9217c478bd9Sstevel@tonic-gate * Call proc_exit() if this is the last non-daemon lwp in the process. 9227c478bd9Sstevel@tonic-gate */ 9237c478bd9Sstevel@tonic-gate if (!(t->t_proc_flag & TP_DAEMON) && 9247c478bd9Sstevel@tonic-gate p->p_lwpcnt == p->p_lwpdaemon + 1) { 9257c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 92697eda132Sraf if (proc_exit(CLD_EXITED, 0) == 0) { 9277c478bd9Sstevel@tonic-gate /* Restarting init. */ 9287c478bd9Sstevel@tonic-gate return; 9297c478bd9Sstevel@tonic-gate } 9307c478bd9Sstevel@tonic-gate 9317c478bd9Sstevel@tonic-gate /* 9327c478bd9Sstevel@tonic-gate * proc_exit() returns a non-zero value when some other 9337c478bd9Sstevel@tonic-gate * lwp got there first. We just have to continue in 9347c478bd9Sstevel@tonic-gate * lwp_exit(). 9357c478bd9Sstevel@tonic-gate */ 9367c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 9377c478bd9Sstevel@tonic-gate ASSERT(curproc->p_flag & SEXITLWPS); 938061d7437SJakub Jermar prbarrier(p); 9391bc02a70SJakub Jermar } 940061d7437SJakub Jermar 9417c478bd9Sstevel@tonic-gate DTRACE_PROC(lwp__exit); 9427c478bd9Sstevel@tonic-gate 9437c478bd9Sstevel@tonic-gate /* 9447c478bd9Sstevel@tonic-gate * If the lwp is a detached lwp or if the process is exiting, 9457c478bd9Sstevel@tonic-gate * remove (lwp_hash_out()) the lwp from the lwp directory. 9467c478bd9Sstevel@tonic-gate * Otherwise null out the lwp's le_thread pointer in the lwp 9477c478bd9Sstevel@tonic-gate * directory so that other threads will see it as a zombie lwp. 9487c478bd9Sstevel@tonic-gate */ 9497c478bd9Sstevel@tonic-gate prlwpexit(t); /* notify /proc */ 9507c478bd9Sstevel@tonic-gate if (!(t->t_proc_flag & TP_TWAIT) || (p->p_flag & SEXITLWPS)) 9517c478bd9Sstevel@tonic-gate lwp_hash_out(p, t->t_tid); 9527c478bd9Sstevel@tonic-gate else { 9537c478bd9Sstevel@tonic-gate ASSERT(!(t->t_proc_flag & TP_DAEMON)); 9547c478bd9Sstevel@tonic-gate p->p_lwpdir[t->t_dslot].ld_entry->le_thread = NULL; 9557c478bd9Sstevel@tonic-gate p->p_zombcnt++; 9567c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_lwpexit); 9577c478bd9Sstevel@tonic-gate } 9587c478bd9Sstevel@tonic-gate if (t->t_proc_flag & TP_DAEMON) { 9597c478bd9Sstevel@tonic-gate p->p_lwpdaemon--; 9607c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_DAEMON; 9617c478bd9Sstevel@tonic-gate } 9627c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_TWAIT; 9637c478bd9Sstevel@tonic-gate 9647c478bd9Sstevel@tonic-gate /* 9657c478bd9Sstevel@tonic-gate * Maintain accurate lwp count for task.max-lwps resource control. 9667c478bd9Sstevel@tonic-gate */ 9677c478bd9Sstevel@tonic-gate mutex_enter(&p->p_zone->zone_nlwps_lock); 9687c478bd9Sstevel@tonic-gate p->p_task->tk_nlwps--; 9697c478bd9Sstevel@tonic-gate p->p_task->tk_proj->kpj_nlwps--; 9707c478bd9Sstevel@tonic-gate p->p_zone->zone_nlwps--; 9717c478bd9Sstevel@tonic-gate mutex_exit(&p->p_zone->zone_nlwps_lock); 9727c478bd9Sstevel@tonic-gate 9737c478bd9Sstevel@tonic-gate CL_EXIT(t); /* tell the scheduler that t is exiting */ 9747c478bd9Sstevel@tonic-gate ASSERT(p->p_lwpcnt != 0); 9757c478bd9Sstevel@tonic-gate p->p_lwpcnt--; 9767c478bd9Sstevel@tonic-gate 9777c478bd9Sstevel@tonic-gate /* 9787c478bd9Sstevel@tonic-gate * If all remaining non-daemon lwps are waiting in lwp_wait(), 9797c478bd9Sstevel@tonic-gate * wake them up so someone can return EDEADLK. 9807c478bd9Sstevel@tonic-gate * (See the block comment preceeding lwp_wait().) 9817c478bd9Sstevel@tonic-gate */ 9827c478bd9Sstevel@tonic-gate if (p->p_lwpcnt == p->p_lwpdaemon + (p->p_lwpwait - p->p_lwpdwait)) 9837c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_lwpexit); 9847c478bd9Sstevel@tonic-gate 9857c478bd9Sstevel@tonic-gate t->t_proc_flag |= TP_LWPEXIT; 9867c478bd9Sstevel@tonic-gate term_mstate(t); 987c97ad5cdSakolb 9887c478bd9Sstevel@tonic-gate #ifndef NPROBE 9897c478bd9Sstevel@tonic-gate /* Kernel probe */ 9907c478bd9Sstevel@tonic-gate if (t->t_tnf_tpdp) 9917c478bd9Sstevel@tonic-gate tnf_thread_exit(); 9927c478bd9Sstevel@tonic-gate #endif /* NPROBE */ 9937c478bd9Sstevel@tonic-gate 9947c478bd9Sstevel@tonic-gate t->t_forw->t_back = t->t_back; 9957c478bd9Sstevel@tonic-gate t->t_back->t_forw = t->t_forw; 9967c478bd9Sstevel@tonic-gate if (t == p->p_tlist) 9977c478bd9Sstevel@tonic-gate p->p_tlist = t->t_forw; 9987c478bd9Sstevel@tonic-gate 9997c478bd9Sstevel@tonic-gate /* 10007c478bd9Sstevel@tonic-gate * Clean up the signal state. 10017c478bd9Sstevel@tonic-gate */ 10027c478bd9Sstevel@tonic-gate if (t->t_sigqueue != NULL) 10037c478bd9Sstevel@tonic-gate sigdelq(p, t, 0); 10047c478bd9Sstevel@tonic-gate if (lwp->lwp_curinfo != NULL) { 10057c478bd9Sstevel@tonic-gate siginfofree(lwp->lwp_curinfo); 10067c478bd9Sstevel@tonic-gate lwp->lwp_curinfo = NULL; 10077c478bd9Sstevel@tonic-gate } 10087c478bd9Sstevel@tonic-gate 1009f971a346SBryan Cantrill /* 1010f971a346SBryan Cantrill * If we have spymaster information (that is, if we're an agent LWP), 1011f971a346SBryan Cantrill * free that now. 1012f971a346SBryan Cantrill */ 1013f971a346SBryan Cantrill if (lwp->lwp_spymaster != NULL) { 1014f971a346SBryan Cantrill kmem_free(lwp->lwp_spymaster, sizeof (psinfo_t)); 1015f971a346SBryan Cantrill lwp->lwp_spymaster = NULL; 1016f971a346SBryan Cantrill } 1017f971a346SBryan Cantrill 10187c478bd9Sstevel@tonic-gate thread_rele(t); 10197c478bd9Sstevel@tonic-gate 10207c478bd9Sstevel@tonic-gate /* 10217c478bd9Sstevel@tonic-gate * Terminated lwps are associated with process zero and are put onto 10227c478bd9Sstevel@tonic-gate * death-row by resume(). Avoid preemption after resetting t->t_procp. 10237c478bd9Sstevel@tonic-gate */ 10247c478bd9Sstevel@tonic-gate t->t_preempt++; 10250baeff3dSrab 10260baeff3dSrab if (t->t_ctx != NULL) 10270baeff3dSrab exitctx(t); 10280baeff3dSrab if (p->p_pctx != NULL) 10290baeff3dSrab exitpctx(p); 10300baeff3dSrab 10317c478bd9Sstevel@tonic-gate t->t_procp = &p0; 10327c478bd9Sstevel@tonic-gate 10337c478bd9Sstevel@tonic-gate /* 10347c478bd9Sstevel@tonic-gate * Notify the HAT about the change of address space 10357c478bd9Sstevel@tonic-gate */ 10367c478bd9Sstevel@tonic-gate hat_thread_exit(t); 10377c478bd9Sstevel@tonic-gate /* 10387c478bd9Sstevel@tonic-gate * When this is the last running lwp in this process and some lwp is 10397c478bd9Sstevel@tonic-gate * waiting for this condition to become true, or this thread was being 10407c478bd9Sstevel@tonic-gate * suspended, then the waiting lwp is awakened. 10417c478bd9Sstevel@tonic-gate * 10427c478bd9Sstevel@tonic-gate * Also, if the process is exiting, we may have a thread waiting in 10437c478bd9Sstevel@tonic-gate * exitlwps() that needs to be notified. 10447c478bd9Sstevel@tonic-gate */ 10457c478bd9Sstevel@tonic-gate if (--p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP) || 10467c478bd9Sstevel@tonic-gate (p->p_flag & SEXITLWPS)) 10477c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 10487c478bd9Sstevel@tonic-gate 10497c478bd9Sstevel@tonic-gate /* 10507c478bd9Sstevel@tonic-gate * Need to drop p_lock so we can reacquire pidlock. 10517c478bd9Sstevel@tonic-gate */ 10527c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 10537c478bd9Sstevel@tonic-gate mutex_enter(&pidlock); 10547c478bd9Sstevel@tonic-gate 10557c478bd9Sstevel@tonic-gate ASSERT(t != t->t_next); /* t0 never exits */ 10567c478bd9Sstevel@tonic-gate t->t_next->t_prev = t->t_prev; 10577c478bd9Sstevel@tonic-gate t->t_prev->t_next = t->t_next; 10587c478bd9Sstevel@tonic-gate cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */ 10597c478bd9Sstevel@tonic-gate mutex_exit(&pidlock); 10607c478bd9Sstevel@tonic-gate 10617c478bd9Sstevel@tonic-gate t->t_state = TS_ZOMB; 10627c478bd9Sstevel@tonic-gate swtch_from_zombie(); 10637c478bd9Sstevel@tonic-gate /* never returns */ 10647c478bd9Sstevel@tonic-gate } 10657c478bd9Sstevel@tonic-gate 10667c478bd9Sstevel@tonic-gate 10677c478bd9Sstevel@tonic-gate /* 10687c478bd9Sstevel@tonic-gate * Cleanup function for an exiting lwp. 10697c478bd9Sstevel@tonic-gate * Called both from lwp_exit() and from proc_exit(). 10707c478bd9Sstevel@tonic-gate * p->p_lock is repeatedly released and grabbed in this function. 10717c478bd9Sstevel@tonic-gate */ 10727c478bd9Sstevel@tonic-gate void 10737c478bd9Sstevel@tonic-gate lwp_cleanup(void) 10747c478bd9Sstevel@tonic-gate { 10757c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 10767c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 10777c478bd9Sstevel@tonic-gate 10787c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 10797c478bd9Sstevel@tonic-gate 10807c478bd9Sstevel@tonic-gate /* untimeout any lwp-bound realtime timers */ 10817c478bd9Sstevel@tonic-gate if (p->p_itimer != NULL) 10827c478bd9Sstevel@tonic-gate timer_lwpexit(); 10837c478bd9Sstevel@tonic-gate 10847c478bd9Sstevel@tonic-gate /* 10857c478bd9Sstevel@tonic-gate * If this is the /proc agent lwp that is exiting, readjust p_lwpid 10867c478bd9Sstevel@tonic-gate * so it appears that the agent never existed, and clear p_agenttp. 10877c478bd9Sstevel@tonic-gate */ 10887c478bd9Sstevel@tonic-gate if (t == p->p_agenttp) { 10897c478bd9Sstevel@tonic-gate ASSERT(t->t_tid == p->p_lwpid); 10907c478bd9Sstevel@tonic-gate p->p_lwpid--; 10917c478bd9Sstevel@tonic-gate p->p_agenttp = NULL; 10927c478bd9Sstevel@tonic-gate } 10937c478bd9Sstevel@tonic-gate 10947c478bd9Sstevel@tonic-gate /* 10957c478bd9Sstevel@tonic-gate * Do lgroup bookkeeping to account for thread exiting. 10967c478bd9Sstevel@tonic-gate */ 10977c478bd9Sstevel@tonic-gate kpreempt_disable(); 10987c478bd9Sstevel@tonic-gate lgrp_move_thread(t, NULL, 1); 10992cb27123Saguzovsk if (t->t_tid == 1) { 11002cb27123Saguzovsk p->p_t1_lgrpid = LGRP_NONE; 11012cb27123Saguzovsk } 11027c478bd9Sstevel@tonic-gate kpreempt_enable(); 11037c478bd9Sstevel@tonic-gate 11047c478bd9Sstevel@tonic-gate lwp_ctmpl_clear(ttolwp(t)); 11057c478bd9Sstevel@tonic-gate } 11067c478bd9Sstevel@tonic-gate 11077c478bd9Sstevel@tonic-gate int 11087c478bd9Sstevel@tonic-gate lwp_suspend(kthread_t *t) 11097c478bd9Sstevel@tonic-gate { 11107c478bd9Sstevel@tonic-gate int tid; 11117c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 11127c478bd9Sstevel@tonic-gate 11137c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 11147c478bd9Sstevel@tonic-gate 11157c478bd9Sstevel@tonic-gate /* 11167c478bd9Sstevel@tonic-gate * Set the thread's TP_HOLDLWP flag so it will stop in holdlwp(). 11177c478bd9Sstevel@tonic-gate * If an lwp is stopping itself, there is no need to wait. 11187c478bd9Sstevel@tonic-gate */ 11198132eb48Sraf top: 11207c478bd9Sstevel@tonic-gate t->t_proc_flag |= TP_HOLDLWP; 11217c478bd9Sstevel@tonic-gate if (t == curthread) { 11227c478bd9Sstevel@tonic-gate t->t_sig_check = 1; 11237c478bd9Sstevel@tonic-gate } else { 11247c478bd9Sstevel@tonic-gate /* 11257c478bd9Sstevel@tonic-gate * Make sure the lwp stops promptly. 11267c478bd9Sstevel@tonic-gate */ 11277c478bd9Sstevel@tonic-gate thread_lock(t); 11287c478bd9Sstevel@tonic-gate t->t_sig_check = 1; 11297c478bd9Sstevel@tonic-gate /* 11307c478bd9Sstevel@tonic-gate * XXX Should use virtual stop like /proc does instead of 11317c478bd9Sstevel@tonic-gate * XXX waking the thread to get it to stop. 11327c478bd9Sstevel@tonic-gate */ 1133c97ad5cdSakolb if (ISWAKEABLE(t) || ISWAITING(t)) { 11347c478bd9Sstevel@tonic-gate setrun_locked(t); 1135c97ad5cdSakolb } else if (t->t_state == TS_ONPROC && t->t_cpu != CPU) { 11367c478bd9Sstevel@tonic-gate poke_cpu(t->t_cpu->cpu_id); 1137c97ad5cdSakolb } 1138c97ad5cdSakolb 11397c478bd9Sstevel@tonic-gate tid = t->t_tid; /* remember thread ID */ 11407c478bd9Sstevel@tonic-gate /* 11417c478bd9Sstevel@tonic-gate * Wait for lwp to stop 11427c478bd9Sstevel@tonic-gate */ 11437c478bd9Sstevel@tonic-gate while (!SUSPENDED(t)) { 11447c478bd9Sstevel@tonic-gate /* 11457c478bd9Sstevel@tonic-gate * Drop the thread lock before waiting and reacquire it 11467c478bd9Sstevel@tonic-gate * afterwards, so the thread can change its t_state 11477c478bd9Sstevel@tonic-gate * field. 11487c478bd9Sstevel@tonic-gate */ 11497c478bd9Sstevel@tonic-gate thread_unlock(t); 11507c478bd9Sstevel@tonic-gate 11517c478bd9Sstevel@tonic-gate /* 11527c478bd9Sstevel@tonic-gate * Check if aborted by exitlwps(). 11537c478bd9Sstevel@tonic-gate */ 11547c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) 11557c478bd9Sstevel@tonic-gate lwp_exit(); 11567c478bd9Sstevel@tonic-gate 11577c478bd9Sstevel@tonic-gate /* 11587c478bd9Sstevel@tonic-gate * Cooperate with jobcontrol signals and /proc stopping 11597c478bd9Sstevel@tonic-gate * by calling cv_wait_sig() to wait for the target 11607c478bd9Sstevel@tonic-gate * lwp to stop. Just using cv_wait() can lead to 11617c478bd9Sstevel@tonic-gate * deadlock because, if some other lwp has stopped 11627c478bd9Sstevel@tonic-gate * by either of these mechanisms, then p_lwprcnt will 11637c478bd9Sstevel@tonic-gate * never become zero if we do a cv_wait(). 11647c478bd9Sstevel@tonic-gate */ 11657c478bd9Sstevel@tonic-gate if (!cv_wait_sig(&p->p_holdlwps, &p->p_lock)) 11667c478bd9Sstevel@tonic-gate return (EINTR); 11677c478bd9Sstevel@tonic-gate 11687c478bd9Sstevel@tonic-gate /* 11697c478bd9Sstevel@tonic-gate * Check to see if thread died while we were 11707c478bd9Sstevel@tonic-gate * waiting for it to suspend. 11717c478bd9Sstevel@tonic-gate */ 11727c478bd9Sstevel@tonic-gate if (idtot(p, tid) == NULL) 11737c478bd9Sstevel@tonic-gate return (ESRCH); 11747c478bd9Sstevel@tonic-gate 11757c478bd9Sstevel@tonic-gate thread_lock(t); 11767c478bd9Sstevel@tonic-gate /* 11778132eb48Sraf * If the TP_HOLDLWP flag went away, lwp_continue() 11788132eb48Sraf * or vfork() must have been called while we were 11798132eb48Sraf * waiting, so start over again. 11807c478bd9Sstevel@tonic-gate */ 11817c478bd9Sstevel@tonic-gate if ((t->t_proc_flag & TP_HOLDLWP) == 0) { 11827c478bd9Sstevel@tonic-gate thread_unlock(t); 11838132eb48Sraf goto top; 11847c478bd9Sstevel@tonic-gate } 11857c478bd9Sstevel@tonic-gate } 11867c478bd9Sstevel@tonic-gate thread_unlock(t); 11877c478bd9Sstevel@tonic-gate } 11887c478bd9Sstevel@tonic-gate return (0); 11897c478bd9Sstevel@tonic-gate } 11907c478bd9Sstevel@tonic-gate 11917c478bd9Sstevel@tonic-gate /* 11927c478bd9Sstevel@tonic-gate * continue a lwp that's been stopped by lwp_suspend(). 11937c478bd9Sstevel@tonic-gate */ 11947c478bd9Sstevel@tonic-gate void 11957c478bd9Sstevel@tonic-gate lwp_continue(kthread_t *t) 11967c478bd9Sstevel@tonic-gate { 11977c478bd9Sstevel@tonic-gate proc_t *p = ttoproc(t); 11987c478bd9Sstevel@tonic-gate int was_suspended = t->t_proc_flag & TP_HOLDLWP; 11997c478bd9Sstevel@tonic-gate 12007c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 12017c478bd9Sstevel@tonic-gate 12027c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_HOLDLWP; 12037c478bd9Sstevel@tonic-gate thread_lock(t); 12047c478bd9Sstevel@tonic-gate if (SUSPENDED(t) && 12057c478bd9Sstevel@tonic-gate !(p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH))) { 12067c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 12077c478bd9Sstevel@tonic-gate t->t_schedflag |= TS_CSTART; 12087c478bd9Sstevel@tonic-gate setrun_locked(t); 12097c478bd9Sstevel@tonic-gate } 12107c478bd9Sstevel@tonic-gate thread_unlock(t); 12117c478bd9Sstevel@tonic-gate /* 12127c478bd9Sstevel@tonic-gate * Wakeup anyone waiting for this thread to be suspended 12137c478bd9Sstevel@tonic-gate */ 12147c478bd9Sstevel@tonic-gate if (was_suspended) 12157c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 12167c478bd9Sstevel@tonic-gate } 12177c478bd9Sstevel@tonic-gate 12187c478bd9Sstevel@tonic-gate /* 12197c478bd9Sstevel@tonic-gate * ******************************** 12207c478bd9Sstevel@tonic-gate * Miscellaneous lwp routines * 12217c478bd9Sstevel@tonic-gate * ******************************** 12227c478bd9Sstevel@tonic-gate */ 12237c478bd9Sstevel@tonic-gate /* 12247c478bd9Sstevel@tonic-gate * When a process is undergoing a forkall(), its p_flag is set to SHOLDFORK. 12257c478bd9Sstevel@tonic-gate * This will cause the process's lwps to stop at a hold point. A hold 12267c478bd9Sstevel@tonic-gate * point is where a kernel thread has a flat stack. This is at the 12277c478bd9Sstevel@tonic-gate * return from a system call and at the return from a user level trap. 12287c478bd9Sstevel@tonic-gate * 12297c478bd9Sstevel@tonic-gate * When a process is undergoing a fork1() or vfork(), its p_flag is set to 12307c478bd9Sstevel@tonic-gate * SHOLDFORK1. This will cause the process's lwps to stop at a modified 12317c478bd9Sstevel@tonic-gate * hold point. The lwps in the process are not being cloned, so they 12327c478bd9Sstevel@tonic-gate * are held at the usual hold points and also within issig_forreal(). 12337c478bd9Sstevel@tonic-gate * This has the side-effect that their system calls do not return 12347c478bd9Sstevel@tonic-gate * showing EINTR. 12357c478bd9Sstevel@tonic-gate * 12367c478bd9Sstevel@tonic-gate * An lwp can also be held. This is identified by the TP_HOLDLWP flag on 12377c478bd9Sstevel@tonic-gate * the thread. The TP_HOLDLWP flag is set in lwp_suspend(), where the active 12387c478bd9Sstevel@tonic-gate * lwp is waiting for the target lwp to be stopped. 12397c478bd9Sstevel@tonic-gate */ 12407c478bd9Sstevel@tonic-gate void 12417c478bd9Sstevel@tonic-gate holdlwp(void) 12427c478bd9Sstevel@tonic-gate { 12437c478bd9Sstevel@tonic-gate proc_t *p = curproc; 12447c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 12457c478bd9Sstevel@tonic-gate 12467c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 12477c478bd9Sstevel@tonic-gate /* 12487c478bd9Sstevel@tonic-gate * Don't terminate immediately if the process is dumping core. 12497c478bd9Sstevel@tonic-gate * Once the process has dumped core, all lwps are terminated. 12507c478bd9Sstevel@tonic-gate */ 12517c478bd9Sstevel@tonic-gate if (!(p->p_flag & SCOREDUMP)) { 12527c478bd9Sstevel@tonic-gate if ((p->p_flag & SEXITLWPS) || (t->t_proc_flag & TP_EXITLWP)) 12537c478bd9Sstevel@tonic-gate lwp_exit(); 12547c478bd9Sstevel@tonic-gate } 12557c478bd9Sstevel@tonic-gate if (!(ISHOLD(p)) && !(p->p_flag & (SHOLDFORK1 | SHOLDWATCH))) { 12567c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 12577c478bd9Sstevel@tonic-gate return; 12587c478bd9Sstevel@tonic-gate } 12597c478bd9Sstevel@tonic-gate /* 12607c478bd9Sstevel@tonic-gate * stop() decrements p->p_lwprcnt and cv_signal()s &p->p_holdlwps 12617c478bd9Sstevel@tonic-gate * when p->p_lwprcnt becomes zero. 12627c478bd9Sstevel@tonic-gate */ 12637c478bd9Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 12647c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) 12657c478bd9Sstevel@tonic-gate lwp_exit(); 12667c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 12677c478bd9Sstevel@tonic-gate } 12687c478bd9Sstevel@tonic-gate 12697c478bd9Sstevel@tonic-gate /* 12707c478bd9Sstevel@tonic-gate * Have all lwps within the process hold at a point where they are 12717c478bd9Sstevel@tonic-gate * cloneable (SHOLDFORK) or just safe w.r.t. fork1 (SHOLDFORK1). 12727c478bd9Sstevel@tonic-gate */ 12737c478bd9Sstevel@tonic-gate int 12747c478bd9Sstevel@tonic-gate holdlwps(int holdflag) 12757c478bd9Sstevel@tonic-gate { 12767c478bd9Sstevel@tonic-gate proc_t *p = curproc; 12777c478bd9Sstevel@tonic-gate 12787c478bd9Sstevel@tonic-gate ASSERT(holdflag == SHOLDFORK || holdflag == SHOLDFORK1); 12797c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 12807c478bd9Sstevel@tonic-gate schedctl_finish_sigblock(curthread); 12817c478bd9Sstevel@tonic-gate again: 12827c478bd9Sstevel@tonic-gate while (p->p_flag & (SEXITLWPS | SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) { 12837c478bd9Sstevel@tonic-gate /* 12847c478bd9Sstevel@tonic-gate * If another lwp is doing a forkall() or proc_exit(), bail out. 12857c478bd9Sstevel@tonic-gate */ 12867c478bd9Sstevel@tonic-gate if (p->p_flag & (SEXITLWPS | SHOLDFORK)) { 12877c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 12887c478bd9Sstevel@tonic-gate return (0); 12897c478bd9Sstevel@tonic-gate } 12907c478bd9Sstevel@tonic-gate /* 12917c478bd9Sstevel@tonic-gate * Another lwp is doing a fork1() or is undergoing 12927c478bd9Sstevel@tonic-gate * watchpoint activity. We hold here for it to complete. 12937c478bd9Sstevel@tonic-gate */ 12947c478bd9Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 12957c478bd9Sstevel@tonic-gate } 12967c478bd9Sstevel@tonic-gate p->p_flag |= holdflag; 12977c478bd9Sstevel@tonic-gate pokelwps(p); 12987c478bd9Sstevel@tonic-gate --p->p_lwprcnt; 12997c478bd9Sstevel@tonic-gate /* 13007c478bd9Sstevel@tonic-gate * Wait for the process to become quiescent (p->p_lwprcnt == 0). 13017c478bd9Sstevel@tonic-gate */ 13027c478bd9Sstevel@tonic-gate while (p->p_lwprcnt > 0) { 13037c478bd9Sstevel@tonic-gate /* 13047c478bd9Sstevel@tonic-gate * Check if aborted by exitlwps(). 13057c478bd9Sstevel@tonic-gate * Also check if SHOLDWATCH is set; it takes precedence. 13067c478bd9Sstevel@tonic-gate */ 13077c478bd9Sstevel@tonic-gate if (p->p_flag & (SEXITLWPS | SHOLDWATCH)) { 13087c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 13097c478bd9Sstevel@tonic-gate p->p_flag &= ~holdflag; 13107c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 13117c478bd9Sstevel@tonic-gate goto again; 13127c478bd9Sstevel@tonic-gate } 13137c478bd9Sstevel@tonic-gate /* 13147c478bd9Sstevel@tonic-gate * Cooperate with jobcontrol signals and /proc stopping. 13157c478bd9Sstevel@tonic-gate * If some other lwp has stopped by either of these 13167c478bd9Sstevel@tonic-gate * mechanisms, then p_lwprcnt will never become zero 13177c478bd9Sstevel@tonic-gate * and the process will appear deadlocked unless we 13187c478bd9Sstevel@tonic-gate * stop here in sympathy with the other lwp before 13197c478bd9Sstevel@tonic-gate * doing the cv_wait() below. 13207c478bd9Sstevel@tonic-gate * 13217c478bd9Sstevel@tonic-gate * If the other lwp stops after we do the cv_wait(), it 13227c478bd9Sstevel@tonic-gate * will wake us up to loop around and do the sympathy stop. 13237c478bd9Sstevel@tonic-gate * 13247c478bd9Sstevel@tonic-gate * Since stop() drops p->p_lock, we must start from 13257c478bd9Sstevel@tonic-gate * the top again on returning from stop(). 13267c478bd9Sstevel@tonic-gate */ 13277c478bd9Sstevel@tonic-gate if (p->p_stopsig | (curthread->t_proc_flag & TP_PRSTOP)) { 13287c478bd9Sstevel@tonic-gate int whystop = p->p_stopsig? PR_JOBCONTROL : 13297c478bd9Sstevel@tonic-gate PR_REQUESTED; 13307c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 13317c478bd9Sstevel@tonic-gate p->p_flag &= ~holdflag; 13327c478bd9Sstevel@tonic-gate stop(whystop, p->p_stopsig); 13337c478bd9Sstevel@tonic-gate goto again; 13347c478bd9Sstevel@tonic-gate } 13357c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 13367c478bd9Sstevel@tonic-gate } 13377c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 13387c478bd9Sstevel@tonic-gate p->p_flag &= ~holdflag; 13397c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 13407c478bd9Sstevel@tonic-gate return (1); 13417c478bd9Sstevel@tonic-gate } 13427c478bd9Sstevel@tonic-gate 13437c478bd9Sstevel@tonic-gate /* 13447c478bd9Sstevel@tonic-gate * See comments for holdwatch(), below. 13457c478bd9Sstevel@tonic-gate */ 13467c478bd9Sstevel@tonic-gate static int 13477c478bd9Sstevel@tonic-gate holdcheck(int clearflags) 13487c478bd9Sstevel@tonic-gate { 13497c478bd9Sstevel@tonic-gate proc_t *p = curproc; 13507c478bd9Sstevel@tonic-gate 13517c478bd9Sstevel@tonic-gate /* 13527c478bd9Sstevel@tonic-gate * If we are trying to exit, that takes precedence over anything else. 13537c478bd9Sstevel@tonic-gate */ 13547c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) { 13557c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 13567c478bd9Sstevel@tonic-gate p->p_flag &= ~clearflags; 13577c478bd9Sstevel@tonic-gate lwp_exit(); 13587c478bd9Sstevel@tonic-gate } 13597c478bd9Sstevel@tonic-gate 13607c478bd9Sstevel@tonic-gate /* 13617c478bd9Sstevel@tonic-gate * If another thread is calling fork1(), stop the current thread so the 13627c478bd9Sstevel@tonic-gate * other can complete. 13637c478bd9Sstevel@tonic-gate */ 13647c478bd9Sstevel@tonic-gate if (p->p_flag & SHOLDFORK1) { 13657c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 13667c478bd9Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 13677c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) { 13687c478bd9Sstevel@tonic-gate p->p_flag &= ~clearflags; 13697c478bd9Sstevel@tonic-gate lwp_exit(); 13707c478bd9Sstevel@tonic-gate } 13717c478bd9Sstevel@tonic-gate return (-1); 13727c478bd9Sstevel@tonic-gate } 13737c478bd9Sstevel@tonic-gate 13747c478bd9Sstevel@tonic-gate /* 13757c478bd9Sstevel@tonic-gate * If another thread is calling fork(), then indicate we are doing 13767c478bd9Sstevel@tonic-gate * watchpoint activity. This will cause holdlwps() above to stop the 13777c478bd9Sstevel@tonic-gate * forking thread, at which point we can continue with watchpoint 13787c478bd9Sstevel@tonic-gate * activity. 13797c478bd9Sstevel@tonic-gate */ 13807c478bd9Sstevel@tonic-gate if (p->p_flag & SHOLDFORK) { 13817c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 13827c478bd9Sstevel@tonic-gate while (p->p_flag & SHOLDFORK) { 13837c478bd9Sstevel@tonic-gate p->p_flag |= SHOLDWATCH; 13847c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 13857c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 13867c478bd9Sstevel@tonic-gate p->p_flag &= ~SHOLDWATCH; 13877c478bd9Sstevel@tonic-gate } 13887c478bd9Sstevel@tonic-gate return (-1); 13897c478bd9Sstevel@tonic-gate } 13907c478bd9Sstevel@tonic-gate 13917c478bd9Sstevel@tonic-gate return (0); 13927c478bd9Sstevel@tonic-gate } 13937c478bd9Sstevel@tonic-gate 13947c478bd9Sstevel@tonic-gate /* 13957c478bd9Sstevel@tonic-gate * Stop all lwps within the process, holding themselves in the kernel while the 13967c478bd9Sstevel@tonic-gate * active lwp undergoes watchpoint activity. This is more complicated than 13977c478bd9Sstevel@tonic-gate * expected because stop() relies on calling holdwatch() in order to copyin data 13987c478bd9Sstevel@tonic-gate * from the user's address space. A double barrier is used to prevent an 13997c478bd9Sstevel@tonic-gate * infinite loop. 14007c478bd9Sstevel@tonic-gate * 14017c478bd9Sstevel@tonic-gate * o The first thread into holdwatch() is the 'master' thread and does 14027c478bd9Sstevel@tonic-gate * the following: 14037c478bd9Sstevel@tonic-gate * 14047c478bd9Sstevel@tonic-gate * - Sets SHOLDWATCH on the current process 14057c478bd9Sstevel@tonic-gate * - Sets TP_WATCHSTOP on the current thread 14067c478bd9Sstevel@tonic-gate * - Waits for all threads to be either stopped or have 14077c478bd9Sstevel@tonic-gate * TP_WATCHSTOP set. 14087c478bd9Sstevel@tonic-gate * - Sets the SWATCHOK flag on the process 14097c478bd9Sstevel@tonic-gate * - Unsets TP_WATCHSTOP 14107c478bd9Sstevel@tonic-gate * - Waits for the other threads to completely stop 14117c478bd9Sstevel@tonic-gate * - Unsets SWATCHOK 14127c478bd9Sstevel@tonic-gate * 14137c478bd9Sstevel@tonic-gate * o If SHOLDWATCH is already set when we enter this function, then another 14147c478bd9Sstevel@tonic-gate * thread is already trying to stop this thread. This 'slave' thread 14157c478bd9Sstevel@tonic-gate * does the following: 14167c478bd9Sstevel@tonic-gate * 14177c478bd9Sstevel@tonic-gate * - Sets TP_WATCHSTOP on the current thread 14187c478bd9Sstevel@tonic-gate * - Waits for SWATCHOK flag to be set 14197c478bd9Sstevel@tonic-gate * - Calls stop() 14207c478bd9Sstevel@tonic-gate * 14217c478bd9Sstevel@tonic-gate * o If SWATCHOK is set on the process, then this function immediately 14227c478bd9Sstevel@tonic-gate * returns, as we must have been called via stop(). 14237c478bd9Sstevel@tonic-gate * 14247c478bd9Sstevel@tonic-gate * In addition, there are other flags that take precedence over SHOLDWATCH: 14257c478bd9Sstevel@tonic-gate * 14267c478bd9Sstevel@tonic-gate * o If SEXITLWPS is set, exit immediately. 14277c478bd9Sstevel@tonic-gate * 14287c478bd9Sstevel@tonic-gate * o If SHOLDFORK1 is set, wait for fork1() to complete. 14297c478bd9Sstevel@tonic-gate * 14307c478bd9Sstevel@tonic-gate * o If SHOLDFORK is set, then watchpoint activity takes precedence In this 14317c478bd9Sstevel@tonic-gate * case, set SHOLDWATCH, signalling the forking thread to stop first. 14327c478bd9Sstevel@tonic-gate * 14337c478bd9Sstevel@tonic-gate * o If the process is being stopped via /proc (TP_PRSTOP is set), then we 14347c478bd9Sstevel@tonic-gate * stop the current thread. 14357c478bd9Sstevel@tonic-gate * 14367c478bd9Sstevel@tonic-gate * Returns 0 if all threads have been quiesced. Returns non-zero if not all 14377c478bd9Sstevel@tonic-gate * threads were stopped, or the list of watched pages has changed. 14387c478bd9Sstevel@tonic-gate */ 14397c478bd9Sstevel@tonic-gate int 14407c478bd9Sstevel@tonic-gate holdwatch(void) 14417c478bd9Sstevel@tonic-gate { 14427c478bd9Sstevel@tonic-gate proc_t *p = curproc; 14437c478bd9Sstevel@tonic-gate kthread_t *t = curthread; 14447c478bd9Sstevel@tonic-gate int ret = 0; 14457c478bd9Sstevel@tonic-gate 14467c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 14477c478bd9Sstevel@tonic-gate 14487c478bd9Sstevel@tonic-gate p->p_lwprcnt--; 14497c478bd9Sstevel@tonic-gate 14507c478bd9Sstevel@tonic-gate /* 14517c478bd9Sstevel@tonic-gate * Check for bail-out conditions as outlined above. 14527c478bd9Sstevel@tonic-gate */ 14537c478bd9Sstevel@tonic-gate if (holdcheck(0) != 0) { 14547c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 14557c478bd9Sstevel@tonic-gate return (-1); 14567c478bd9Sstevel@tonic-gate } 14577c478bd9Sstevel@tonic-gate 14587c478bd9Sstevel@tonic-gate if (!(p->p_flag & SHOLDWATCH)) { 14597c478bd9Sstevel@tonic-gate /* 14607c478bd9Sstevel@tonic-gate * We are the master watchpoint thread. Set SHOLDWATCH and poke 14617c478bd9Sstevel@tonic-gate * the other threads. 14627c478bd9Sstevel@tonic-gate */ 14637c478bd9Sstevel@tonic-gate p->p_flag |= SHOLDWATCH; 14647c478bd9Sstevel@tonic-gate pokelwps(p); 14657c478bd9Sstevel@tonic-gate 14667c478bd9Sstevel@tonic-gate /* 14677c478bd9Sstevel@tonic-gate * Wait for all threads to be stopped or have TP_WATCHSTOP set. 14687c478bd9Sstevel@tonic-gate */ 14697c478bd9Sstevel@tonic-gate while (pr_allstopped(p, 1) > 0) { 14707c478bd9Sstevel@tonic-gate if (holdcheck(SHOLDWATCH) != 0) { 14717c478bd9Sstevel@tonic-gate p->p_flag &= ~SHOLDWATCH; 14727c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 14737c478bd9Sstevel@tonic-gate return (-1); 14747c478bd9Sstevel@tonic-gate } 14757c478bd9Sstevel@tonic-gate 14767c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 14777c478bd9Sstevel@tonic-gate } 14787c478bd9Sstevel@tonic-gate 14797c478bd9Sstevel@tonic-gate /* 14807c478bd9Sstevel@tonic-gate * All threads are now stopped or in the process of stopping. 14817c478bd9Sstevel@tonic-gate * Set SWATCHOK and let them stop completely. 14827c478bd9Sstevel@tonic-gate */ 14837c478bd9Sstevel@tonic-gate p->p_flag |= SWATCHOK; 14847c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_WATCHSTOP; 14857c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 14867c478bd9Sstevel@tonic-gate 14877c478bd9Sstevel@tonic-gate while (pr_allstopped(p, 0) > 0) { 14887c478bd9Sstevel@tonic-gate /* 14897c478bd9Sstevel@tonic-gate * At first glance, it may appear that we don't need a 14907c478bd9Sstevel@tonic-gate * call to holdcheck() here. But if the process gets a 14917c478bd9Sstevel@tonic-gate * SIGKILL signal, one of our stopped threads may have 14927c478bd9Sstevel@tonic-gate * been awakened and is waiting in exitlwps(), which 14937c478bd9Sstevel@tonic-gate * takes precedence over watchpoints. 14947c478bd9Sstevel@tonic-gate */ 14957c478bd9Sstevel@tonic-gate if (holdcheck(SHOLDWATCH | SWATCHOK) != 0) { 14967c478bd9Sstevel@tonic-gate p->p_flag &= ~(SHOLDWATCH | SWATCHOK); 14977c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 14987c478bd9Sstevel@tonic-gate return (-1); 14997c478bd9Sstevel@tonic-gate } 15007c478bd9Sstevel@tonic-gate 15017c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 15027c478bd9Sstevel@tonic-gate } 15037c478bd9Sstevel@tonic-gate 15047c478bd9Sstevel@tonic-gate /* 15057c478bd9Sstevel@tonic-gate * All threads are now completely stopped. 15067c478bd9Sstevel@tonic-gate */ 15077c478bd9Sstevel@tonic-gate p->p_flag &= ~SWATCHOK; 15087c478bd9Sstevel@tonic-gate p->p_flag &= ~SHOLDWATCH; 15097c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 15107c478bd9Sstevel@tonic-gate 15117c478bd9Sstevel@tonic-gate } else if (!(p->p_flag & SWATCHOK)) { 15127c478bd9Sstevel@tonic-gate 15137c478bd9Sstevel@tonic-gate /* 15147c478bd9Sstevel@tonic-gate * SHOLDWATCH is set, so another thread is trying to do 15157c478bd9Sstevel@tonic-gate * watchpoint activity. Indicate this thread is stopping, and 15167c478bd9Sstevel@tonic-gate * wait for the OK from the master thread. 15177c478bd9Sstevel@tonic-gate */ 15187c478bd9Sstevel@tonic-gate t->t_proc_flag |= TP_WATCHSTOP; 15197c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 15207c478bd9Sstevel@tonic-gate 15217c478bd9Sstevel@tonic-gate while (!(p->p_flag & SWATCHOK)) { 15227c478bd9Sstevel@tonic-gate if (holdcheck(0) != 0) { 15237c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_WATCHSTOP; 15247c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 15257c478bd9Sstevel@tonic-gate return (-1); 15267c478bd9Sstevel@tonic-gate } 15277c478bd9Sstevel@tonic-gate 15287c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 15297c478bd9Sstevel@tonic-gate } 15307c478bd9Sstevel@tonic-gate 15317c478bd9Sstevel@tonic-gate /* 15327c478bd9Sstevel@tonic-gate * Once the master thread has given the OK, this thread can 15337c478bd9Sstevel@tonic-gate * actually call stop(). 15347c478bd9Sstevel@tonic-gate */ 15357c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_WATCHSTOP; 15367c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 15377c478bd9Sstevel@tonic-gate 15387c478bd9Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 15397c478bd9Sstevel@tonic-gate 15407c478bd9Sstevel@tonic-gate /* 15417c478bd9Sstevel@tonic-gate * It's not OK to do watchpoint activity, notify caller to 15427c478bd9Sstevel@tonic-gate * retry. 15437c478bd9Sstevel@tonic-gate */ 15447c478bd9Sstevel@tonic-gate ret = -1; 15457c478bd9Sstevel@tonic-gate 15467c478bd9Sstevel@tonic-gate } else { 15477c478bd9Sstevel@tonic-gate 15487c478bd9Sstevel@tonic-gate /* 15497c478bd9Sstevel@tonic-gate * The only way we can hit the case where SHOLDWATCH is set and 15507c478bd9Sstevel@tonic-gate * SWATCHOK is set is if we are triggering this from within a 15517c478bd9Sstevel@tonic-gate * stop() call. Assert that this is the case. 15527c478bd9Sstevel@tonic-gate */ 15537c478bd9Sstevel@tonic-gate 15547c478bd9Sstevel@tonic-gate ASSERT(t->t_proc_flag & TP_STOPPING); 15557c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 15567c478bd9Sstevel@tonic-gate } 15577c478bd9Sstevel@tonic-gate 15587c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 15597c478bd9Sstevel@tonic-gate 15607c478bd9Sstevel@tonic-gate return (ret); 15617c478bd9Sstevel@tonic-gate } 15627c478bd9Sstevel@tonic-gate 15637c478bd9Sstevel@tonic-gate /* 15647c478bd9Sstevel@tonic-gate * force all interruptible lwps to trap into the kernel. 15657c478bd9Sstevel@tonic-gate */ 15667c478bd9Sstevel@tonic-gate void 15677c478bd9Sstevel@tonic-gate pokelwps(proc_t *p) 15687c478bd9Sstevel@tonic-gate { 15697c478bd9Sstevel@tonic-gate kthread_t *t; 15707c478bd9Sstevel@tonic-gate 15717c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 15727c478bd9Sstevel@tonic-gate 15737c478bd9Sstevel@tonic-gate t = p->p_tlist; 15747c478bd9Sstevel@tonic-gate do { 15757c478bd9Sstevel@tonic-gate if (t == curthread) 15767c478bd9Sstevel@tonic-gate continue; 15777c478bd9Sstevel@tonic-gate thread_lock(t); 15787c478bd9Sstevel@tonic-gate aston(t); /* make thread trap or do post_syscall */ 1579c97ad5cdSakolb if (ISWAKEABLE(t) || ISWAITING(t)) { 15807c478bd9Sstevel@tonic-gate setrun_locked(t); 15817c478bd9Sstevel@tonic-gate } else if (t->t_state == TS_STOPPED) { 15827c478bd9Sstevel@tonic-gate /* 15837c478bd9Sstevel@tonic-gate * Ensure that proc_exit() is not blocked by lwps 15847c478bd9Sstevel@tonic-gate * that were stopped via jobcontrol or /proc. 15857c478bd9Sstevel@tonic-gate */ 15867c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) { 15877c478bd9Sstevel@tonic-gate p->p_stopsig = 0; 15887c478bd9Sstevel@tonic-gate t->t_schedflag |= (TS_XSTART | TS_PSTART); 15897c478bd9Sstevel@tonic-gate setrun_locked(t); 15907c478bd9Sstevel@tonic-gate } 15917c478bd9Sstevel@tonic-gate /* 15927c478bd9Sstevel@tonic-gate * If we are holding lwps for a forkall(), 15937c478bd9Sstevel@tonic-gate * force lwps that have been suspended via 15947c478bd9Sstevel@tonic-gate * lwp_suspend() and are suspended inside 15957c478bd9Sstevel@tonic-gate * of a system call to proceed to their 15967c478bd9Sstevel@tonic-gate * holdlwp() points where they are clonable. 15977c478bd9Sstevel@tonic-gate */ 15987c478bd9Sstevel@tonic-gate if ((p->p_flag & SHOLDFORK) && SUSPENDED(t)) { 15997c478bd9Sstevel@tonic-gate if ((t->t_schedflag & TS_CSTART) == 0) { 16007c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 16017c478bd9Sstevel@tonic-gate t->t_schedflag |= TS_CSTART; 16027c478bd9Sstevel@tonic-gate setrun_locked(t); 16037c478bd9Sstevel@tonic-gate } 16047c478bd9Sstevel@tonic-gate } 16057c478bd9Sstevel@tonic-gate } else if (t->t_state == TS_ONPROC) { 16067c478bd9Sstevel@tonic-gate if (t->t_cpu != CPU) 16077c478bd9Sstevel@tonic-gate poke_cpu(t->t_cpu->cpu_id); 16087c478bd9Sstevel@tonic-gate } 16097c478bd9Sstevel@tonic-gate thread_unlock(t); 16107c478bd9Sstevel@tonic-gate } while ((t = t->t_forw) != p->p_tlist); 16117c478bd9Sstevel@tonic-gate } 16127c478bd9Sstevel@tonic-gate 16137c478bd9Sstevel@tonic-gate /* 16147c478bd9Sstevel@tonic-gate * undo the effects of holdlwps() or holdwatch(). 16157c478bd9Sstevel@tonic-gate */ 16167c478bd9Sstevel@tonic-gate void 16177c478bd9Sstevel@tonic-gate continuelwps(proc_t *p) 16187c478bd9Sstevel@tonic-gate { 16197c478bd9Sstevel@tonic-gate kthread_t *t; 16207c478bd9Sstevel@tonic-gate 16217c478bd9Sstevel@tonic-gate /* 16227c478bd9Sstevel@tonic-gate * If this flag is set, then the original holdwatch() didn't actually 16237c478bd9Sstevel@tonic-gate * stop the process. See comments for holdwatch(). 16247c478bd9Sstevel@tonic-gate */ 16257c478bd9Sstevel@tonic-gate if (p->p_flag & SWATCHOK) { 16267c478bd9Sstevel@tonic-gate ASSERT(curthread->t_proc_flag & TP_STOPPING); 16277c478bd9Sstevel@tonic-gate return; 16287c478bd9Sstevel@tonic-gate } 16297c478bd9Sstevel@tonic-gate 16307c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 16317c478bd9Sstevel@tonic-gate ASSERT((p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) == 0); 16327c478bd9Sstevel@tonic-gate 16337c478bd9Sstevel@tonic-gate t = p->p_tlist; 16347c478bd9Sstevel@tonic-gate do { 16357c478bd9Sstevel@tonic-gate thread_lock(t); /* SUSPENDED looks at t_schedflag */ 16367c478bd9Sstevel@tonic-gate if (SUSPENDED(t) && !(t->t_proc_flag & TP_HOLDLWP)) { 16377c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 16387c478bd9Sstevel@tonic-gate t->t_schedflag |= TS_CSTART; 16397c478bd9Sstevel@tonic-gate setrun_locked(t); 16407c478bd9Sstevel@tonic-gate } 16417c478bd9Sstevel@tonic-gate thread_unlock(t); 16427c478bd9Sstevel@tonic-gate } while ((t = t->t_forw) != p->p_tlist); 16437c478bd9Sstevel@tonic-gate } 16447c478bd9Sstevel@tonic-gate 16457c478bd9Sstevel@tonic-gate /* 16467c478bd9Sstevel@tonic-gate * Force all other LWPs in the current process other than the caller to exit, 16477c478bd9Sstevel@tonic-gate * and then cv_wait() on p_holdlwps for them to exit. The exitlwps() function 16487c478bd9Sstevel@tonic-gate * is typically used in these situations: 16497c478bd9Sstevel@tonic-gate * 16507c478bd9Sstevel@tonic-gate * (a) prior to an exec() system call 16517c478bd9Sstevel@tonic-gate * (b) prior to dumping a core file 16527c478bd9Sstevel@tonic-gate * (c) prior to a uadmin() shutdown 16537c478bd9Sstevel@tonic-gate * 16547c478bd9Sstevel@tonic-gate * If the 'coredump' flag is set, other LWPs are quiesced but not destroyed. 16557c478bd9Sstevel@tonic-gate * Multiple threads in the process can call this function at one time by 16567c478bd9Sstevel@tonic-gate * triggering execs or core dumps simultaneously, so the SEXITLWPS bit is used 16577c478bd9Sstevel@tonic-gate * to declare one particular thread the winner who gets to kill the others. 16587c478bd9Sstevel@tonic-gate * If a thread wins the exitlwps() dance, zero is returned; otherwise an 16597c478bd9Sstevel@tonic-gate * appropriate errno value is returned to caller for its system call to return. 16607c478bd9Sstevel@tonic-gate */ 16617c478bd9Sstevel@tonic-gate int 16627c478bd9Sstevel@tonic-gate exitlwps(int coredump) 16637c478bd9Sstevel@tonic-gate { 16647c478bd9Sstevel@tonic-gate proc_t *p = curproc; 16657c478bd9Sstevel@tonic-gate int heldcnt; 16667c478bd9Sstevel@tonic-gate 16677c478bd9Sstevel@tonic-gate if (curthread->t_door) 16687c478bd9Sstevel@tonic-gate door_slam(); 16697c478bd9Sstevel@tonic-gate if (p->p_door_list) 16707c478bd9Sstevel@tonic-gate door_revoke_all(); 16717c478bd9Sstevel@tonic-gate if (curthread->t_schedctl != NULL) 16727c478bd9Sstevel@tonic-gate schedctl_lwp_cleanup(curthread); 16737c478bd9Sstevel@tonic-gate 16747c478bd9Sstevel@tonic-gate /* 16757c478bd9Sstevel@tonic-gate * Ensure that before starting to wait for other lwps to exit, 16767c478bd9Sstevel@tonic-gate * cleanup all upimutexes held by curthread. Otherwise, some other 16777c478bd9Sstevel@tonic-gate * lwp could be waiting (uninterruptibly) for a upimutex held by 16787c478bd9Sstevel@tonic-gate * curthread, and the call to pokelwps() below would deadlock. 16797c478bd9Sstevel@tonic-gate * Even if a blocked upimutex_lock is made interruptible, 16807c478bd9Sstevel@tonic-gate * curthread's upimutexes need to be unlocked: do it here. 16817c478bd9Sstevel@tonic-gate */ 16827c478bd9Sstevel@tonic-gate if (curthread->t_upimutex != NULL) 16837c478bd9Sstevel@tonic-gate upimutex_cleanup(); 16847c478bd9Sstevel@tonic-gate 16857c478bd9Sstevel@tonic-gate /* 16867c478bd9Sstevel@tonic-gate * Grab p_lock in order to check and set SEXITLWPS to declare a winner. 16877c478bd9Sstevel@tonic-gate * We must also block any further /proc access from this point forward. 16887c478bd9Sstevel@tonic-gate */ 16897c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 16907c478bd9Sstevel@tonic-gate prbarrier(p); 16917c478bd9Sstevel@tonic-gate 16927c478bd9Sstevel@tonic-gate if (p->p_flag & SEXITLWPS) { 16937c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 16947c478bd9Sstevel@tonic-gate aston(curthread); /* force a trip through post_syscall */ 16957c478bd9Sstevel@tonic-gate return (set_errno(EINTR)); 16967c478bd9Sstevel@tonic-gate } 16977c478bd9Sstevel@tonic-gate 16987c478bd9Sstevel@tonic-gate p->p_flag |= SEXITLWPS; 16997c478bd9Sstevel@tonic-gate if (coredump) /* tell other lwps to stop, not exit */ 17007c478bd9Sstevel@tonic-gate p->p_flag |= SCOREDUMP; 17017c478bd9Sstevel@tonic-gate 17027c478bd9Sstevel@tonic-gate /* 17037c478bd9Sstevel@tonic-gate * Give precedence to exitlwps() if a holdlwps() is 17047c478bd9Sstevel@tonic-gate * in progress. The lwp doing the holdlwps() operation 17057c478bd9Sstevel@tonic-gate * is aborted when it is awakened. 17067c478bd9Sstevel@tonic-gate */ 17077c478bd9Sstevel@tonic-gate while (p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) { 17087c478bd9Sstevel@tonic-gate cv_broadcast(&p->p_holdlwps); 17097c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 171097eda132Sraf prbarrier(p); 17117c478bd9Sstevel@tonic-gate } 17127c478bd9Sstevel@tonic-gate p->p_flag |= SHOLDFORK; 17137c478bd9Sstevel@tonic-gate pokelwps(p); 17147c478bd9Sstevel@tonic-gate 17157c478bd9Sstevel@tonic-gate /* 17167c478bd9Sstevel@tonic-gate * Wait for process to become quiescent. 17177c478bd9Sstevel@tonic-gate */ 17187c478bd9Sstevel@tonic-gate --p->p_lwprcnt; 171997eda132Sraf while (p->p_lwprcnt > 0) { 17207c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 172197eda132Sraf prbarrier(p); 172297eda132Sraf } 17237c478bd9Sstevel@tonic-gate p->p_lwprcnt++; 17247c478bd9Sstevel@tonic-gate ASSERT(p->p_lwprcnt == 1); 17257c478bd9Sstevel@tonic-gate 17267c478bd9Sstevel@tonic-gate /* 17277c478bd9Sstevel@tonic-gate * The SCOREDUMP flag puts the process into a quiescent 17287c478bd9Sstevel@tonic-gate * state. The process's lwps remain attached to this 17297c478bd9Sstevel@tonic-gate * process until exitlwps() is called again without the 17307c478bd9Sstevel@tonic-gate * 'coredump' flag set, then the lwps are terminated 17317c478bd9Sstevel@tonic-gate * and the process can exit. 17327c478bd9Sstevel@tonic-gate */ 17337c478bd9Sstevel@tonic-gate if (coredump) { 17347c478bd9Sstevel@tonic-gate p->p_flag &= ~(SCOREDUMP | SHOLDFORK | SEXITLWPS); 17357c478bd9Sstevel@tonic-gate goto out; 17367c478bd9Sstevel@tonic-gate } 17377c478bd9Sstevel@tonic-gate 17387c478bd9Sstevel@tonic-gate /* 17397c478bd9Sstevel@tonic-gate * Determine if there are any lwps left dangling in 17407c478bd9Sstevel@tonic-gate * the stopped state. This happens when exitlwps() 17417c478bd9Sstevel@tonic-gate * aborts a holdlwps() operation. 17427c478bd9Sstevel@tonic-gate */ 17437c478bd9Sstevel@tonic-gate p->p_flag &= ~SHOLDFORK; 17447c478bd9Sstevel@tonic-gate if ((heldcnt = p->p_lwpcnt) > 1) { 17457c478bd9Sstevel@tonic-gate kthread_t *t; 17467c478bd9Sstevel@tonic-gate for (t = curthread->t_forw; --heldcnt > 0; t = t->t_forw) { 17477c478bd9Sstevel@tonic-gate t->t_proc_flag &= ~TP_TWAIT; 17487c478bd9Sstevel@tonic-gate lwp_continue(t); 17497c478bd9Sstevel@tonic-gate } 17507c478bd9Sstevel@tonic-gate } 17517c478bd9Sstevel@tonic-gate 17527c478bd9Sstevel@tonic-gate /* 17537c478bd9Sstevel@tonic-gate * Wait for all other lwps to exit. 17547c478bd9Sstevel@tonic-gate */ 17557c478bd9Sstevel@tonic-gate --p->p_lwprcnt; 175697eda132Sraf while (p->p_lwpcnt > 1) { 17577c478bd9Sstevel@tonic-gate cv_wait(&p->p_holdlwps, &p->p_lock); 175897eda132Sraf prbarrier(p); 175997eda132Sraf } 17607c478bd9Sstevel@tonic-gate ++p->p_lwprcnt; 17617c478bd9Sstevel@tonic-gate ASSERT(p->p_lwpcnt == 1 && p->p_lwprcnt == 1); 17627c478bd9Sstevel@tonic-gate 17637c478bd9Sstevel@tonic-gate p->p_flag &= ~SEXITLWPS; 17647c478bd9Sstevel@tonic-gate curthread->t_proc_flag &= ~TP_TWAIT; 17657c478bd9Sstevel@tonic-gate 17667c478bd9Sstevel@tonic-gate out: 17677c478bd9Sstevel@tonic-gate if (!coredump && p->p_zombcnt) { /* cleanup the zombie lwps */ 17687c478bd9Sstevel@tonic-gate lwpdir_t *ldp; 17697c478bd9Sstevel@tonic-gate lwpent_t *lep; 17707c478bd9Sstevel@tonic-gate int i; 17717c478bd9Sstevel@tonic-gate 17727c478bd9Sstevel@tonic-gate for (ldp = p->p_lwpdir, i = 0; i < p->p_lwpdir_sz; i++, ldp++) { 17737c478bd9Sstevel@tonic-gate lep = ldp->ld_entry; 17747c478bd9Sstevel@tonic-gate if (lep != NULL && lep->le_thread != curthread) { 17757c478bd9Sstevel@tonic-gate ASSERT(lep->le_thread == NULL); 17767c478bd9Sstevel@tonic-gate p->p_zombcnt--; 17777c478bd9Sstevel@tonic-gate lwp_hash_out(p, lep->le_lwpid); 17787c478bd9Sstevel@tonic-gate } 17797c478bd9Sstevel@tonic-gate } 17807c478bd9Sstevel@tonic-gate ASSERT(p->p_zombcnt == 0); 17817c478bd9Sstevel@tonic-gate } 17827c478bd9Sstevel@tonic-gate 17837c478bd9Sstevel@tonic-gate /* 17847c478bd9Sstevel@tonic-gate * If some other LWP in the process wanted us to suspend ourself, 17857c478bd9Sstevel@tonic-gate * then we will not do it. The other LWP is now terminated and 17867c478bd9Sstevel@tonic-gate * no one will ever continue us again if we suspend ourself. 17877c478bd9Sstevel@tonic-gate */ 17887c478bd9Sstevel@tonic-gate curthread->t_proc_flag &= ~TP_HOLDLWP; 17897c478bd9Sstevel@tonic-gate p->p_flag &= ~(SHOLDFORK | SHOLDFORK1 | SHOLDWATCH | SLWPWRAP); 17907c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 17917c478bd9Sstevel@tonic-gate return (0); 17927c478bd9Sstevel@tonic-gate } 17937c478bd9Sstevel@tonic-gate 17947c478bd9Sstevel@tonic-gate /* 17957c478bd9Sstevel@tonic-gate * duplicate a lwp. 17967c478bd9Sstevel@tonic-gate */ 17977c478bd9Sstevel@tonic-gate klwp_t * 17987c478bd9Sstevel@tonic-gate forklwp(klwp_t *lwp, proc_t *cp, id_t lwpid) 17997c478bd9Sstevel@tonic-gate { 18007c478bd9Sstevel@tonic-gate klwp_t *clwp; 18017c478bd9Sstevel@tonic-gate void *tregs, *tfpu; 18027c478bd9Sstevel@tonic-gate kthread_t *t = lwptot(lwp); 18037c478bd9Sstevel@tonic-gate kthread_t *ct; 18047c478bd9Sstevel@tonic-gate proc_t *p = lwptoproc(lwp); 18057c478bd9Sstevel@tonic-gate int cid; 18067c478bd9Sstevel@tonic-gate void *bufp; 18079acbbeafSnn35248 void *brand_data; 18087c478bd9Sstevel@tonic-gate int val; 18097c478bd9Sstevel@tonic-gate 18107c478bd9Sstevel@tonic-gate ASSERT(p == curproc); 18117c478bd9Sstevel@tonic-gate ASSERT(t == curthread || (SUSPENDED(t) && lwp->lwp_asleep == 0)); 18127c478bd9Sstevel@tonic-gate 18137c478bd9Sstevel@tonic-gate #if defined(__sparc) 18147c478bd9Sstevel@tonic-gate if (t == curthread) 18157c478bd9Sstevel@tonic-gate (void) flush_user_windows_to_stack(NULL); 18167c478bd9Sstevel@tonic-gate #endif 18177c478bd9Sstevel@tonic-gate 18187c478bd9Sstevel@tonic-gate if (t == curthread) 18197c478bd9Sstevel@tonic-gate /* copy args out of registers first */ 18207c478bd9Sstevel@tonic-gate (void) save_syscall_args(); 18219acbbeafSnn35248 18227c478bd9Sstevel@tonic-gate clwp = lwp_create(cp->p_lwpcnt == 0 ? lwp_rtt_initial : lwp_rtt, 18237c478bd9Sstevel@tonic-gate NULL, 0, cp, TS_STOPPED, t->t_pri, &t->t_hold, NOCLASS, lwpid); 18247c478bd9Sstevel@tonic-gate if (clwp == NULL) 18257c478bd9Sstevel@tonic-gate return (NULL); 18267c478bd9Sstevel@tonic-gate 18277c478bd9Sstevel@tonic-gate /* 18287c478bd9Sstevel@tonic-gate * most of the parent's lwp can be copied to its duplicate, 18297c478bd9Sstevel@tonic-gate * except for the fields that are unique to each lwp, like 18307c478bd9Sstevel@tonic-gate * lwp_thread, lwp_procp, lwp_regs, and lwp_ap. 18317c478bd9Sstevel@tonic-gate */ 18327c478bd9Sstevel@tonic-gate ct = clwp->lwp_thread; 18337c478bd9Sstevel@tonic-gate tregs = clwp->lwp_regs; 18347c478bd9Sstevel@tonic-gate tfpu = clwp->lwp_fpu; 18359acbbeafSnn35248 brand_data = clwp->lwp_brand; 18367c478bd9Sstevel@tonic-gate 18375d3ff519Sjohansen /* 18385d3ff519Sjohansen * Copy parent lwp to child lwp. Hold child's p_lock to prevent 18395d3ff519Sjohansen * mstate_aggr_state() from reading stale mstate entries copied 18405d3ff519Sjohansen * from lwp to clwp. 18415d3ff519Sjohansen */ 18425d3ff519Sjohansen mutex_enter(&cp->p_lock); 18437c478bd9Sstevel@tonic-gate *clwp = *lwp; 18447c478bd9Sstevel@tonic-gate 18455d3ff519Sjohansen /* clear microstate and resource usage data in new lwp */ 18465d3ff519Sjohansen init_mstate(ct, LMS_STOPPED); 18475d3ff519Sjohansen bzero(&clwp->lwp_ru, sizeof (clwp->lwp_ru)); 18485d3ff519Sjohansen mutex_exit(&cp->p_lock); 18495d3ff519Sjohansen 18507c478bd9Sstevel@tonic-gate /* fix up child's lwp */ 18517c478bd9Sstevel@tonic-gate 18527712e92cSsudheer clwp->lwp_pcb.pcb_flags = 0; 18537712e92cSsudheer #if defined(__sparc) 18547c478bd9Sstevel@tonic-gate clwp->lwp_pcb.pcb_step = STEP_NONE; 18557c478bd9Sstevel@tonic-gate #endif 18567c478bd9Sstevel@tonic-gate clwp->lwp_cursig = 0; 18577c478bd9Sstevel@tonic-gate clwp->lwp_extsig = 0; 18587c478bd9Sstevel@tonic-gate clwp->lwp_curinfo = (struct sigqueue *)0; 18597c478bd9Sstevel@tonic-gate clwp->lwp_thread = ct; 18607c478bd9Sstevel@tonic-gate ct->t_sysnum = t->t_sysnum; 18617c478bd9Sstevel@tonic-gate clwp->lwp_regs = tregs; 18627c478bd9Sstevel@tonic-gate clwp->lwp_fpu = tfpu; 18639acbbeafSnn35248 clwp->lwp_brand = brand_data; 18647c478bd9Sstevel@tonic-gate clwp->lwp_ap = clwp->lwp_arg; 18657c478bd9Sstevel@tonic-gate clwp->lwp_procp = cp; 18667c478bd9Sstevel@tonic-gate bzero(clwp->lwp_timer, sizeof (clwp->lwp_timer)); 18677c478bd9Sstevel@tonic-gate clwp->lwp_lastfault = 0; 18687c478bd9Sstevel@tonic-gate clwp->lwp_lastfaddr = 0; 18697c478bd9Sstevel@tonic-gate 18707c478bd9Sstevel@tonic-gate /* copy parent's struct regs to child. */ 18717c478bd9Sstevel@tonic-gate lwp_forkregs(lwp, clwp); 18727c478bd9Sstevel@tonic-gate 18737c478bd9Sstevel@tonic-gate /* 18740baeff3dSrab * Fork thread context ops, if any. 18757c478bd9Sstevel@tonic-gate */ 18767c478bd9Sstevel@tonic-gate if (t->t_ctx) 18777c478bd9Sstevel@tonic-gate forkctx(t, ct); 18787c478bd9Sstevel@tonic-gate 18797c478bd9Sstevel@tonic-gate /* fix door state in the child */ 18807c478bd9Sstevel@tonic-gate if (t->t_door) 18817c478bd9Sstevel@tonic-gate door_fork(t, ct); 18827c478bd9Sstevel@tonic-gate 18837c478bd9Sstevel@tonic-gate /* copy current contract templates, clear latest contracts */ 18847c478bd9Sstevel@tonic-gate lwp_ctmpl_copy(clwp, lwp); 18857c478bd9Sstevel@tonic-gate 18867c478bd9Sstevel@tonic-gate mutex_enter(&cp->p_lock); 18877c478bd9Sstevel@tonic-gate /* lwp_create() set the TP_HOLDLWP flag */ 18887c478bd9Sstevel@tonic-gate if (!(t->t_proc_flag & TP_HOLDLWP)) 18897c478bd9Sstevel@tonic-gate ct->t_proc_flag &= ~TP_HOLDLWP; 18907c478bd9Sstevel@tonic-gate if (cp->p_flag & SMSACCT) 18917c478bd9Sstevel@tonic-gate ct->t_proc_flag |= TP_MSACCT; 18927c478bd9Sstevel@tonic-gate mutex_exit(&cp->p_lock); 18937c478bd9Sstevel@tonic-gate 18949acbbeafSnn35248 /* Allow brand to propagate brand-specific state */ 18959acbbeafSnn35248 if (PROC_IS_BRANDED(p)) 18969acbbeafSnn35248 BROP(p)->b_forklwp(lwp, clwp); 18979acbbeafSnn35248 18987c478bd9Sstevel@tonic-gate retry: 18997c478bd9Sstevel@tonic-gate cid = t->t_cid; 19007c478bd9Sstevel@tonic-gate 19017c478bd9Sstevel@tonic-gate val = CL_ALLOC(&bufp, cid, KM_SLEEP); 19027c478bd9Sstevel@tonic-gate ASSERT(val == 0); 19037c478bd9Sstevel@tonic-gate 19047c478bd9Sstevel@tonic-gate mutex_enter(&p->p_lock); 19057c478bd9Sstevel@tonic-gate if (cid != t->t_cid) { 19067c478bd9Sstevel@tonic-gate /* 19077c478bd9Sstevel@tonic-gate * Someone just changed this thread's scheduling class, 19087c478bd9Sstevel@tonic-gate * so try pre-allocating the buffer again. Hopefully we 19097c478bd9Sstevel@tonic-gate * don't hit this often. 19107c478bd9Sstevel@tonic-gate */ 19117c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 19127c478bd9Sstevel@tonic-gate CL_FREE(cid, bufp); 19137c478bd9Sstevel@tonic-gate goto retry; 19147c478bd9Sstevel@tonic-gate } 19157c478bd9Sstevel@tonic-gate 19167c478bd9Sstevel@tonic-gate ct->t_unpark = t->t_unpark; 19177c478bd9Sstevel@tonic-gate ct->t_clfuncs = t->t_clfuncs; 19187c478bd9Sstevel@tonic-gate CL_FORK(t, ct, bufp); 19197c478bd9Sstevel@tonic-gate ct->t_cid = t->t_cid; /* after data allocated so prgetpsinfo works */ 19207c478bd9Sstevel@tonic-gate mutex_exit(&p->p_lock); 19217c478bd9Sstevel@tonic-gate 19227c478bd9Sstevel@tonic-gate return (clwp); 19237c478bd9Sstevel@tonic-gate } 19247c478bd9Sstevel@tonic-gate 19257c478bd9Sstevel@tonic-gate /* 19267c478bd9Sstevel@tonic-gate * Add a new lwp entry to the lwp directory and to the lwpid hash table. 19277c478bd9Sstevel@tonic-gate */ 19287c478bd9Sstevel@tonic-gate void 19296eb30ec3SRoger A. Faulkner lwp_hash_in(proc_t *p, lwpent_t *lep, tidhash_t *tidhash, uint_t tidhash_sz, 19306eb30ec3SRoger A. Faulkner int do_lock) 19317c478bd9Sstevel@tonic-gate { 19326eb30ec3SRoger A. Faulkner tidhash_t *thp = &tidhash[TIDHASH(lep->le_lwpid, tidhash_sz)]; 19337c478bd9Sstevel@tonic-gate lwpdir_t **ldpp; 19347c478bd9Sstevel@tonic-gate lwpdir_t *ldp; 19357c478bd9Sstevel@tonic-gate kthread_t *t; 19367c478bd9Sstevel@tonic-gate 19377c478bd9Sstevel@tonic-gate /* 19387c478bd9Sstevel@tonic-gate * Allocate a directory element from the free list. 19397c478bd9Sstevel@tonic-gate * Code elsewhere guarantees a free slot. 19407c478bd9Sstevel@tonic-gate */ 19417c478bd9Sstevel@tonic-gate ldp = p->p_lwpfree; 19427c478bd9Sstevel@tonic-gate p->p_lwpfree = ldp->ld_next; 19437c478bd9Sstevel@tonic-gate ASSERT(ldp->ld_entry == NULL); 19447c478bd9Sstevel@tonic-gate ldp->ld_entry = lep; 19457c478bd9Sstevel@tonic-gate 19466eb30ec3SRoger A. Faulkner if (do_lock) 19476eb30ec3SRoger A. Faulkner mutex_enter(&thp->th_lock); 19486eb30ec3SRoger A. Faulkner 19497c478bd9Sstevel@tonic-gate /* 19507c478bd9Sstevel@tonic-gate * Insert it into the lwpid hash table. 19517c478bd9Sstevel@tonic-gate */ 19526eb30ec3SRoger A. Faulkner ldpp = &thp->th_list; 19537c478bd9Sstevel@tonic-gate ldp->ld_next = *ldpp; 19547c478bd9Sstevel@tonic-gate *ldpp = ldp; 19557c478bd9Sstevel@tonic-gate 19567c478bd9Sstevel@tonic-gate /* 19577c478bd9Sstevel@tonic-gate * Set the active thread's directory slot entry. 19587c478bd9Sstevel@tonic-gate */ 19597c478bd9Sstevel@tonic-gate if ((t = lep->le_thread) != NULL) { 19607c478bd9Sstevel@tonic-gate ASSERT(lep->le_lwpid == t->t_tid); 19617c478bd9Sstevel@tonic-gate t->t_dslot = (int)(ldp - p->p_lwpdir); 19627c478bd9Sstevel@tonic-gate } 19636eb30ec3SRoger A. Faulkner 19646eb30ec3SRoger A. Faulkner if (do_lock) 19656eb30ec3SRoger A. Faulkner mutex_exit(&thp->th_lock); 19667c478bd9Sstevel@tonic-gate } 19677c478bd9Sstevel@tonic-gate 19687c478bd9Sstevel@tonic-gate /* 19697c478bd9Sstevel@tonic-gate * Remove an lwp from the lwpid hash table and free its directory entry. 19707c478bd9Sstevel@tonic-gate * This is done when a detached lwp exits in lwp_exit() or 19717c478bd9Sstevel@tonic-gate * when a non-detached lwp is waited for in lwp_wait() or 19727c478bd9Sstevel@tonic-gate * when a zombie lwp is detached in lwp_detach(). 19737c478bd9Sstevel@tonic-gate */ 19747c478bd9Sstevel@tonic-gate void 19757c478bd9Sstevel@tonic-gate lwp_hash_out(proc_t *p, id_t lwpid) 19767c478bd9Sstevel@tonic-gate { 19776eb30ec3SRoger A. Faulkner tidhash_t *thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)]; 19787c478bd9Sstevel@tonic-gate lwpdir_t **ldpp; 19797c478bd9Sstevel@tonic-gate lwpdir_t *ldp; 19807c478bd9Sstevel@tonic-gate lwpent_t *lep; 19817c478bd9Sstevel@tonic-gate 19826eb30ec3SRoger A. Faulkner mutex_enter(&thp->th_lock); 19836eb30ec3SRoger A. Faulkner for (ldpp = &thp->th_list; 19847c478bd9Sstevel@tonic-gate (ldp = *ldpp) != NULL; ldpp = &ldp->ld_next) { 19857c478bd9Sstevel@tonic-gate lep = ldp->ld_entry; 19867c478bd9Sstevel@tonic-gate if (lep->le_lwpid == lwpid) { 19877c478bd9Sstevel@tonic-gate prlwpfree(p, lep); /* /proc deals with le_trace */ 19887c478bd9Sstevel@tonic-gate *ldpp = ldp->ld_next; 19897c478bd9Sstevel@tonic-gate ldp->ld_entry = NULL; 19907c478bd9Sstevel@tonic-gate ldp->ld_next = p->p_lwpfree; 19917c478bd9Sstevel@tonic-gate p->p_lwpfree = ldp; 19927c478bd9Sstevel@tonic-gate kmem_free(lep, sizeof (*lep)); 19937c478bd9Sstevel@tonic-gate break; 19947c478bd9Sstevel@tonic-gate } 19957c478bd9Sstevel@tonic-gate } 19966eb30ec3SRoger A. Faulkner mutex_exit(&thp->th_lock); 19977c478bd9Sstevel@tonic-gate } 19987c478bd9Sstevel@tonic-gate 19997c478bd9Sstevel@tonic-gate /* 20007c478bd9Sstevel@tonic-gate * Lookup an lwp in the lwpid hash table by lwpid. 20017c478bd9Sstevel@tonic-gate */ 20027c478bd9Sstevel@tonic-gate lwpdir_t * 20037c478bd9Sstevel@tonic-gate lwp_hash_lookup(proc_t *p, id_t lwpid) 20047c478bd9Sstevel@tonic-gate { 20056eb30ec3SRoger A. Faulkner tidhash_t *thp; 20067c478bd9Sstevel@tonic-gate lwpdir_t *ldp; 20077c478bd9Sstevel@tonic-gate 20087c478bd9Sstevel@tonic-gate /* 20097c478bd9Sstevel@tonic-gate * The process may be exiting, after p_tidhash has been set to NULL in 20107c478bd9Sstevel@tonic-gate * proc_exit() but before prfee() has been called. Return failure in 20117c478bd9Sstevel@tonic-gate * this case. 20127c478bd9Sstevel@tonic-gate */ 20137c478bd9Sstevel@tonic-gate if (p->p_tidhash == NULL) 20147c478bd9Sstevel@tonic-gate return (NULL); 20157c478bd9Sstevel@tonic-gate 20166eb30ec3SRoger A. Faulkner thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)]; 20176eb30ec3SRoger A. Faulkner for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) { 20187c478bd9Sstevel@tonic-gate if (ldp->ld_entry->le_lwpid == lwpid) 20197c478bd9Sstevel@tonic-gate return (ldp); 20207c478bd9Sstevel@tonic-gate } 20217c478bd9Sstevel@tonic-gate 20227c478bd9Sstevel@tonic-gate return (NULL); 20237c478bd9Sstevel@tonic-gate } 20247c478bd9Sstevel@tonic-gate 20257c478bd9Sstevel@tonic-gate /* 20266eb30ec3SRoger A. Faulkner * Same as lwp_hash_lookup(), but acquire and return 20276eb30ec3SRoger A. Faulkner * the tid hash table entry lock on success. 20286eb30ec3SRoger A. Faulkner */ 20296eb30ec3SRoger A. Faulkner lwpdir_t * 20306eb30ec3SRoger A. Faulkner lwp_hash_lookup_and_lock(proc_t *p, id_t lwpid, kmutex_t **mpp) 20316eb30ec3SRoger A. Faulkner { 20326eb30ec3SRoger A. Faulkner tidhash_t *tidhash; 20336eb30ec3SRoger A. Faulkner uint_t tidhash_sz; 20346eb30ec3SRoger A. Faulkner tidhash_t *thp; 20356eb30ec3SRoger A. Faulkner lwpdir_t *ldp; 20366eb30ec3SRoger A. Faulkner 20376eb30ec3SRoger A. Faulkner top: 20386eb30ec3SRoger A. Faulkner tidhash_sz = p->p_tidhash_sz; 20396eb30ec3SRoger A. Faulkner membar_consumer(); 20406eb30ec3SRoger A. Faulkner if ((tidhash = p->p_tidhash) == NULL) 20416eb30ec3SRoger A. Faulkner return (NULL); 20426eb30ec3SRoger A. Faulkner 20436eb30ec3SRoger A. Faulkner thp = &tidhash[TIDHASH(lwpid, tidhash_sz)]; 20446eb30ec3SRoger A. Faulkner mutex_enter(&thp->th_lock); 20456eb30ec3SRoger A. Faulkner 20466eb30ec3SRoger A. Faulkner /* 20476eb30ec3SRoger A. Faulkner * Since we are not holding p->p_lock, the tid hash table 20486eb30ec3SRoger A. Faulkner * may have changed. If so, start over. If not, then 20496eb30ec3SRoger A. Faulkner * it cannot change until after we drop &thp->th_lock; 20506eb30ec3SRoger A. Faulkner */ 20516eb30ec3SRoger A. Faulkner if (tidhash != p->p_tidhash || tidhash_sz != p->p_tidhash_sz) { 20526eb30ec3SRoger A. Faulkner mutex_exit(&thp->th_lock); 20536eb30ec3SRoger A. Faulkner goto top; 20546eb30ec3SRoger A. Faulkner } 20556eb30ec3SRoger A. Faulkner 20566eb30ec3SRoger A. Faulkner for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) { 20576eb30ec3SRoger A. Faulkner if (ldp->ld_entry->le_lwpid == lwpid) { 20586eb30ec3SRoger A. Faulkner *mpp = &thp->th_lock; 20596eb30ec3SRoger A. Faulkner return (ldp); 20606eb30ec3SRoger A. Faulkner } 20616eb30ec3SRoger A. Faulkner } 20626eb30ec3SRoger A. Faulkner 20636eb30ec3SRoger A. Faulkner mutex_exit(&thp->th_lock); 20646eb30ec3SRoger A. Faulkner return (NULL); 20656eb30ec3SRoger A. Faulkner } 20666eb30ec3SRoger A. Faulkner 20676eb30ec3SRoger A. Faulkner /* 20687c478bd9Sstevel@tonic-gate * Update the indicated LWP usage statistic for the current LWP. 20697c478bd9Sstevel@tonic-gate */ 20707c478bd9Sstevel@tonic-gate void 20717c478bd9Sstevel@tonic-gate lwp_stat_update(lwp_stat_id_t lwp_stat_id, long inc) 20727c478bd9Sstevel@tonic-gate { 20737c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread); 20747c478bd9Sstevel@tonic-gate 20757c478bd9Sstevel@tonic-gate if (lwp == NULL) 20767c478bd9Sstevel@tonic-gate return; 20777c478bd9Sstevel@tonic-gate 20787c478bd9Sstevel@tonic-gate switch (lwp_stat_id) { 20797c478bd9Sstevel@tonic-gate case LWP_STAT_INBLK: 20807c478bd9Sstevel@tonic-gate lwp->lwp_ru.inblock += inc; 20817c478bd9Sstevel@tonic-gate break; 20827c478bd9Sstevel@tonic-gate case LWP_STAT_OUBLK: 20837c478bd9Sstevel@tonic-gate lwp->lwp_ru.oublock += inc; 20847c478bd9Sstevel@tonic-gate break; 20857c478bd9Sstevel@tonic-gate case LWP_STAT_MSGRCV: 20867c478bd9Sstevel@tonic-gate lwp->lwp_ru.msgrcv += inc; 20877c478bd9Sstevel@tonic-gate break; 20887c478bd9Sstevel@tonic-gate case LWP_STAT_MSGSND: 20897c478bd9Sstevel@tonic-gate lwp->lwp_ru.msgsnd += inc; 20907c478bd9Sstevel@tonic-gate break; 20917c478bd9Sstevel@tonic-gate default: 20927c478bd9Sstevel@tonic-gate panic("lwp_stat_update: invalid lwp_stat_id 0x%x", lwp_stat_id); 20937c478bd9Sstevel@tonic-gate } 20947c478bd9Sstevel@tonic-gate } 2095