xref: /illumos-gate/usr/src/uts/common/os/lwp.c (revision 63aa537723d4883425b44d96b6316b7ad14053fc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/param.h>
28 #include <sys/types.h>
29 #include <sys/sysmacros.h>
30 #include <sys/systm.h>
31 #include <sys/thread.h>
32 #include <sys/proc.h>
33 #include <sys/task.h>
34 #include <sys/project.h>
35 #include <sys/signal.h>
36 #include <sys/errno.h>
37 #include <sys/vmparam.h>
38 #include <sys/stack.h>
39 #include <sys/procfs.h>
40 #include <sys/prsystm.h>
41 #include <sys/cpuvar.h>
42 #include <sys/kmem.h>
43 #include <sys/vtrace.h>
44 #include <sys/door.h>
45 #include <vm/seg_kp.h>
46 #include <sys/debug.h>
47 #include <sys/tnf.h>
48 #include <sys/schedctl.h>
49 #include <sys/poll.h>
50 #include <sys/copyops.h>
51 #include <sys/lwp_upimutex_impl.h>
52 #include <sys/cpupart.h>
53 #include <sys/lgrp.h>
54 #include <sys/rctl.h>
55 #include <sys/contract_impl.h>
56 #include <sys/cpc_impl.h>
57 #include <sys/sdt.h>
58 #include <sys/cmn_err.h>
59 #include <sys/brand.h>
60 #include <sys/cyclic.h>
61 #include <sys/pool.h>
62 
63 /* hash function for the lwpid hash table, p->p_tidhash[] */
64 #define	TIDHASH(tid, hash_sz)	((tid) & ((hash_sz) - 1))
65 
66 void *segkp_lwp;		/* cookie for pool of segkp resources */
67 extern void reapq_move_lq_to_tq(kthread_t *);
68 extern void freectx_ctx(struct ctxop *);
69 
70 /*
71  * Create a kernel thread associated with a particular system process.  Give
72  * it an LWP so that microstate accounting will be available for it.
73  */
74 kthread_t *
75 lwp_kernel_create(proc_t *p, void (*proc)(), void *arg, int state, pri_t pri)
76 {
77 	klwp_t *lwp;
78 
79 	VERIFY((p->p_flag & SSYS) != 0);
80 
81 	lwp = lwp_create(proc, arg, 0, p, state, pri, &t0.t_hold, syscid, 0);
82 
83 	VERIFY(lwp != NULL);
84 
85 	return (lwptot(lwp));
86 }
87 
88 /*
89  * Create a thread that appears to be stopped at sys_rtt.
90  */
91 klwp_t *
92 lwp_create(void (*proc)(), caddr_t arg, size_t len, proc_t *p,
93     int state, int pri, const k_sigset_t *smask, int cid, id_t lwpid)
94 {
95 	klwp_t *lwp = NULL;
96 	kthread_t *t;
97 	kthread_t *tx;
98 	cpupart_t *oldpart = NULL;
99 	size_t	stksize;
100 	caddr_t lwpdata = NULL;
101 	processorid_t	binding;
102 	int err = 0;
103 	kproject_t *oldkpj, *newkpj;
104 	void *bufp = NULL;
105 	klwp_t *curlwp;
106 	lwpent_t *lep;
107 	lwpdir_t *old_dir = NULL;
108 	uint_t old_dirsz = 0;
109 	tidhash_t *old_hash = NULL;
110 	uint_t old_hashsz = 0;
111 	ret_tidhash_t *ret_tidhash = NULL;
112 	int i;
113 	int rctlfail = 0;
114 	boolean_t branded = 0;
115 	struct ctxop *ctx = NULL;
116 
117 	ASSERT(cid != sysdccid);	/* system threads must start in SYS */
118 
119 	ASSERT(p != &p0);		/* No new LWPs in p0. */
120 
121 	mutex_enter(&p->p_lock);
122 	mutex_enter(&p->p_zone->zone_nlwps_lock);
123 	/*
124 	 * don't enforce rctl limits on system processes
125 	 */
126 	if (!CLASS_KERNEL(cid)) {
127 		if (p->p_task->tk_nlwps >= p->p_task->tk_nlwps_ctl)
128 			if (rctl_test(rc_task_lwps, p->p_task->tk_rctls, p,
129 			    1, 0) & RCT_DENY)
130 				rctlfail = 1;
131 		if (p->p_task->tk_proj->kpj_nlwps >=
132 		    p->p_task->tk_proj->kpj_nlwps_ctl)
133 			if (rctl_test(rc_project_nlwps,
134 			    p->p_task->tk_proj->kpj_rctls, p, 1, 0)
135 			    & RCT_DENY)
136 				rctlfail = 1;
137 		if (p->p_zone->zone_nlwps >= p->p_zone->zone_nlwps_ctl)
138 			if (rctl_test(rc_zone_nlwps, p->p_zone->zone_rctls, p,
139 			    1, 0) & RCT_DENY)
140 				rctlfail = 1;
141 	}
142 	if (rctlfail) {
143 		mutex_exit(&p->p_zone->zone_nlwps_lock);
144 		mutex_exit(&p->p_lock);
145 		return (NULL);
146 	}
147 	p->p_task->tk_nlwps++;
148 	p->p_task->tk_proj->kpj_nlwps++;
149 	p->p_zone->zone_nlwps++;
150 	mutex_exit(&p->p_zone->zone_nlwps_lock);
151 	mutex_exit(&p->p_lock);
152 
153 	if (CLASS_KERNEL(cid)) {
154 		curlwp = NULL;		/* don't inherit from curlwp */
155 		stksize = lwp_default_stksize;
156 	} else {
157 		curlwp = ttolwp(curthread);
158 		if (curlwp == NULL || (stksize = curlwp->lwp_childstksz) == 0)
159 			stksize = lwp_default_stksize;
160 	}
161 
162 	/*
163 	 * For system threads, we sleep for our swap reservation, and the
164 	 * thread stack can't be swapped.
165 	 *
166 	 * Otherwise, try to reclaim a <lwp,stack> from 'deathrow'
167 	 */
168 	if (CLASS_KERNEL(cid)) {
169 		lwpdata = (caddr_t)segkp_get(segkp, stksize,
170 		    (KPD_NO_ANON | KPD_HASREDZONE | KPD_LOCKED));
171 
172 	} else if (stksize == lwp_default_stksize) {
173 		if (lwp_reapcnt > 0) {
174 			mutex_enter(&reaplock);
175 			if ((t = lwp_deathrow) != NULL) {
176 				ASSERT(t->t_swap);
177 				lwp_deathrow = t->t_forw;
178 				lwp_reapcnt--;
179 				lwpdata = t->t_swap;
180 				lwp = t->t_lwp;
181 				ctx = t->t_ctx;
182 				t->t_swap = NULL;
183 				t->t_lwp = NULL;
184 				t->t_ctx = NULL;
185 				reapq_move_lq_to_tq(t);
186 			}
187 			mutex_exit(&reaplock);
188 			if (lwp != NULL) {
189 				lwp_stk_fini(lwp);
190 			}
191 			if (ctx != NULL) {
192 				freectx_ctx(ctx);
193 			}
194 		}
195 		if (lwpdata == NULL &&
196 		    (lwpdata = (caddr_t)segkp_cache_get(segkp_lwp)) == NULL) {
197 			mutex_enter(&p->p_lock);
198 			mutex_enter(&p->p_zone->zone_nlwps_lock);
199 			p->p_task->tk_nlwps--;
200 			p->p_task->tk_proj->kpj_nlwps--;
201 			p->p_zone->zone_nlwps--;
202 			mutex_exit(&p->p_zone->zone_nlwps_lock);
203 			mutex_exit(&p->p_lock);
204 			return (NULL);
205 		}
206 	} else {
207 		stksize = roundup(stksize, PAGESIZE);
208 		if ((lwpdata = (caddr_t)segkp_get(segkp, stksize,
209 		    (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED))) == NULL) {
210 			mutex_enter(&p->p_lock);
211 			mutex_enter(&p->p_zone->zone_nlwps_lock);
212 			p->p_task->tk_nlwps--;
213 			p->p_task->tk_proj->kpj_nlwps--;
214 			p->p_zone->zone_nlwps--;
215 			mutex_exit(&p->p_zone->zone_nlwps_lock);
216 			mutex_exit(&p->p_lock);
217 			return (NULL);
218 		}
219 	}
220 
221 	/*
222 	 * Create a thread, initializing the stack pointer
223 	 */
224 	t = thread_create(lwpdata, stksize, NULL, NULL, 0, p, TS_STOPPED, pri);
225 
226 	t->t_swap = lwpdata;	/* Start of page-able data */
227 	if (lwp == NULL)
228 		lwp = kmem_cache_alloc(lwp_cache, KM_SLEEP);
229 	bzero(lwp, sizeof (*lwp));
230 	t->t_lwp = lwp;
231 
232 	t->t_hold = *smask;
233 	lwp->lwp_thread = t;
234 	lwp->lwp_procp = p;
235 	lwp->lwp_sigaltstack.ss_flags = SS_DISABLE;
236 	if (curlwp != NULL && curlwp->lwp_childstksz != 0)
237 		lwp->lwp_childstksz = curlwp->lwp_childstksz;
238 
239 	t->t_stk = lwp_stk_init(lwp, t->t_stk);
240 	thread_load(t, proc, arg, len);
241 
242 	/*
243 	 * Allocate the SIGPROF buffer if ITIMER_REALPROF is in effect.
244 	 */
245 	if (p->p_rprof_cyclic != CYCLIC_NONE)
246 		t->t_rprof = kmem_zalloc(sizeof (struct rprof), KM_SLEEP);
247 
248 	if (cid != NOCLASS)
249 		(void) CL_ALLOC(&bufp, cid, KM_SLEEP);
250 
251 	/*
252 	 * Allocate an lwp directory entry for the new lwp.
253 	 */
254 	lep = kmem_zalloc(sizeof (*lep), KM_SLEEP);
255 
256 	mutex_enter(&p->p_lock);
257 grow:
258 	/*
259 	 * Grow the lwp (thread) directory and lwpid hash table if necessary.
260 	 * A note on the growth algorithm:
261 	 *	The new lwp directory size is computed as:
262 	 *		new = 2 * old + 2
263 	 *	Starting with an initial size of 2 (see exec_common()),
264 	 *	this yields numbers that are a power of two minus 2:
265 	 *		2, 6, 14, 30, 62, 126, 254, 510, 1022, ...
266 	 *	The size of the lwpid hash table must be a power of two
267 	 *	and must be commensurate in size with the lwp directory
268 	 *	so that hash bucket chains remain short.  Therefore,
269 	 *	the lwpid hash table size is computed as:
270 	 *		hashsz = (dirsz + 2) / 2
271 	 *	which leads to these hash table sizes corresponding to
272 	 *	the above directory sizes:
273 	 *		2, 4, 8, 16, 32, 64, 128, 256, 512, ...
274 	 * A note on growing the hash table:
275 	 *	For performance reasons, code in lwp_unpark() does not
276 	 *	acquire curproc->p_lock when searching the hash table.
277 	 *	Rather, it calls lwp_hash_lookup_and_lock() which
278 	 *	acquires only the individual hash bucket lock, taking
279 	 *	care to deal with reallocation of the hash table
280 	 *	during the time it takes to acquire the lock.
281 	 *
282 	 *	This is sufficient to protect the integrity of the
283 	 *	hash table, but it requires us to acquire all of the
284 	 *	old hash bucket locks before growing the hash table
285 	 *	and to release them afterwards.  It also requires us
286 	 *	not to free the old hash table because some thread
287 	 *	in lwp_hash_lookup_and_lock() might still be trying
288 	 *	to acquire the old bucket lock.
289 	 *
290 	 *	So we adopt the tactic of keeping all of the retired
291 	 *	hash tables on a linked list, so they can be safely
292 	 *	freed when the process exits or execs.
293 	 *
294 	 *	Because the hash table grows in powers of two, the
295 	 *	total size of all of the hash tables will be slightly
296 	 *	less than twice the size of the largest hash table.
297 	 */
298 	while (p->p_lwpfree == NULL) {
299 		uint_t dirsz = p->p_lwpdir_sz;
300 		lwpdir_t *new_dir;
301 		uint_t new_dirsz;
302 		lwpdir_t *ldp;
303 		tidhash_t *new_hash;
304 		uint_t new_hashsz;
305 
306 		mutex_exit(&p->p_lock);
307 
308 		/*
309 		 * Prepare to remember the old p_tidhash for later
310 		 * kmem_free()ing when the process exits or execs.
311 		 */
312 		if (ret_tidhash == NULL)
313 			ret_tidhash = kmem_zalloc(sizeof (ret_tidhash_t),
314 			    KM_SLEEP);
315 		if (old_dir != NULL)
316 			kmem_free(old_dir, old_dirsz * sizeof (*old_dir));
317 		if (old_hash != NULL)
318 			kmem_free(old_hash, old_hashsz * sizeof (*old_hash));
319 
320 		new_dirsz = 2 * dirsz + 2;
321 		new_dir = kmem_zalloc(new_dirsz * sizeof (lwpdir_t), KM_SLEEP);
322 		for (ldp = new_dir, i = 1; i < new_dirsz; i++, ldp++)
323 			ldp->ld_next = ldp + 1;
324 		new_hashsz = (new_dirsz + 2) / 2;
325 		new_hash = kmem_zalloc(new_hashsz * sizeof (tidhash_t),
326 		    KM_SLEEP);
327 
328 		mutex_enter(&p->p_lock);
329 		if (p == curproc)
330 			prbarrier(p);
331 
332 		if (dirsz != p->p_lwpdir_sz || p->p_lwpfree != NULL) {
333 			/*
334 			 * Someone else beat us to it or some lwp exited.
335 			 * Set up to free our memory and take a lap.
336 			 */
337 			old_dir = new_dir;
338 			old_dirsz = new_dirsz;
339 			old_hash = new_hash;
340 			old_hashsz = new_hashsz;
341 		} else {
342 			/*
343 			 * For the benefit of lwp_hash_lookup_and_lock(),
344 			 * called from lwp_unpark(), which searches the
345 			 * tid hash table without acquiring p->p_lock,
346 			 * we must acquire all of the tid hash table
347 			 * locks before replacing p->p_tidhash.
348 			 */
349 			old_hash = p->p_tidhash;
350 			old_hashsz = p->p_tidhash_sz;
351 			for (i = 0; i < old_hashsz; i++) {
352 				mutex_enter(&old_hash[i].th_lock);
353 				mutex_enter(&new_hash[i].th_lock);
354 			}
355 
356 			/*
357 			 * We simply hash in all of the old directory entries.
358 			 * This works because the old directory has no empty
359 			 * slots and the new hash table starts out empty.
360 			 * This reproduces the original directory ordering
361 			 * (required for /proc directory semantics).
362 			 */
363 			old_dir = p->p_lwpdir;
364 			old_dirsz = p->p_lwpdir_sz;
365 			p->p_lwpdir = new_dir;
366 			p->p_lwpfree = new_dir;
367 			p->p_lwpdir_sz = new_dirsz;
368 			for (ldp = old_dir, i = 0; i < old_dirsz; i++, ldp++)
369 				lwp_hash_in(p, ldp->ld_entry,
370 				    new_hash, new_hashsz, 0);
371 
372 			/*
373 			 * Remember the old hash table along with all
374 			 * of the previously-remembered hash tables.
375 			 * We will free them at process exit or exec.
376 			 */
377 			ret_tidhash->rth_tidhash = old_hash;
378 			ret_tidhash->rth_tidhash_sz = old_hashsz;
379 			ret_tidhash->rth_next = p->p_ret_tidhash;
380 			p->p_ret_tidhash = ret_tidhash;
381 
382 			/*
383 			 * Now establish the new tid hash table.
384 			 * As soon as we assign p->p_tidhash,
385 			 * code in lwp_unpark() can start using it.
386 			 */
387 			membar_producer();
388 			p->p_tidhash = new_hash;
389 
390 			/*
391 			 * It is necessary that p_tidhash reach global
392 			 * visibility before p_tidhash_sz.  Otherwise,
393 			 * code in lwp_hash_lookup_and_lock() could
394 			 * index into the old p_tidhash using the new
395 			 * p_tidhash_sz and thereby access invalid data.
396 			 */
397 			membar_producer();
398 			p->p_tidhash_sz = new_hashsz;
399 
400 			/*
401 			 * Release the locks; allow lwp_unpark() to carry on.
402 			 */
403 			for (i = 0; i < old_hashsz; i++) {
404 				mutex_exit(&old_hash[i].th_lock);
405 				mutex_exit(&new_hash[i].th_lock);
406 			}
407 
408 			/*
409 			 * Avoid freeing these objects below.
410 			 */
411 			ret_tidhash = NULL;
412 			old_hash = NULL;
413 			old_hashsz = 0;
414 		}
415 	}
416 
417 	/*
418 	 * Block the process against /proc while we manipulate p->p_tlist,
419 	 * unless lwp_create() was called by /proc for the PCAGENT operation.
420 	 * We want to do this early enough so that we don't drop p->p_lock
421 	 * until the thread is put on the p->p_tlist.
422 	 */
423 	if (p == curproc) {
424 		prbarrier(p);
425 		/*
426 		 * If the current lwp has been requested to stop, do so now.
427 		 * Otherwise we have a race condition between /proc attempting
428 		 * to stop the process and this thread creating a new lwp
429 		 * that was not seen when the /proc PCSTOP request was issued.
430 		 * We rely on stop() to call prbarrier(p) before returning.
431 		 */
432 		while ((curthread->t_proc_flag & TP_PRSTOP) &&
433 		    !ttolwp(curthread)->lwp_nostop) {
434 			/*
435 			 * We called pool_barrier_enter() before calling
436 			 * here to lwp_create(). We have to call
437 			 * pool_barrier_exit() before stopping.
438 			 */
439 			pool_barrier_exit();
440 			prbarrier(p);
441 			stop(PR_REQUESTED, 0);
442 			/*
443 			 * And we have to repeat the call to
444 			 * pool_barrier_enter after stopping.
445 			 */
446 			pool_barrier_enter();
447 			prbarrier(p);
448 		}
449 
450 		/*
451 		 * If process is exiting, there could be a race between
452 		 * the agent lwp creation and the new lwp currently being
453 		 * created. So to prevent this race lwp creation is failed
454 		 * if the process is exiting.
455 		 */
456 		if (p->p_flag & (SEXITLWPS|SKILLED)) {
457 			err = 1;
458 			goto error;
459 		}
460 
461 		/*
462 		 * Since we might have dropped p->p_lock, the
463 		 * lwp directory free list might have changed.
464 		 */
465 		if (p->p_lwpfree == NULL)
466 			goto grow;
467 	}
468 
469 	kpreempt_disable();	/* can't grab cpu_lock here */
470 
471 	/*
472 	 * Inherit processor and processor set bindings from curthread.
473 	 *
474 	 * For kernel LWPs, we do not inherit processor set bindings at
475 	 * process creation time (i.e. when p != curproc).  After the
476 	 * kernel process is created, any subsequent LWPs must be created
477 	 * by threads in the kernel process, at which point we *will*
478 	 * inherit processor set bindings.
479 	 */
480 	if (CLASS_KERNEL(cid) && p != curproc) {
481 		t->t_bind_cpu = binding = PBIND_NONE;
482 		t->t_cpupart = oldpart = &cp_default;
483 		t->t_bind_pset = PS_NONE;
484 		t->t_bindflag = (uchar_t)default_binding_mode;
485 	} else {
486 		binding = curthread->t_bind_cpu;
487 		t->t_bind_cpu = binding;
488 		oldpart = t->t_cpupart;
489 		t->t_cpupart = curthread->t_cpupart;
490 		t->t_bind_pset = curthread->t_bind_pset;
491 		t->t_bindflag = curthread->t_bindflag |
492 		    (uchar_t)default_binding_mode;
493 	}
494 
495 	/*
496 	 * thread_create() initializes this thread's home lgroup to the root.
497 	 * Choose a more suitable lgroup, since this thread is associated
498 	 * with an lwp.
499 	 */
500 	ASSERT(oldpart != NULL);
501 	if (binding != PBIND_NONE && t->t_affinitycnt == 0) {
502 		t->t_bound_cpu = cpu[binding];
503 		if (t->t_lpl != t->t_bound_cpu->cpu_lpl)
504 			lgrp_move_thread(t, t->t_bound_cpu->cpu_lpl, 1);
505 	} else {
506 		lgrp_move_thread(t, lgrp_choose(t, t->t_cpupart), 1);
507 	}
508 
509 	kpreempt_enable();
510 
511 	/*
512 	 * make sure lpl points to our own partition
513 	 */
514 	ASSERT(t->t_lpl >= t->t_cpupart->cp_lgrploads);
515 	ASSERT(t->t_lpl < t->t_cpupart->cp_lgrploads +
516 	    t->t_cpupart->cp_nlgrploads);
517 
518 	/*
519 	 * If we're creating a new process, then inherit the project from our
520 	 * parent. If we're only creating an additional lwp then use the
521 	 * project pointer of the target process.
522 	 */
523 	if (p->p_task == NULL)
524 		newkpj = ttoproj(curthread);
525 	else
526 		newkpj = p->p_task->tk_proj;
527 
528 	/*
529 	 * It is safe to point the thread to the new project without holding it
530 	 * since we're holding the target process' p_lock here and therefore
531 	 * we're guaranteed that it will not move to another project.
532 	 */
533 	oldkpj = ttoproj(t);
534 	if (newkpj != oldkpj) {
535 		t->t_proj = newkpj;
536 		(void) project_hold(newkpj);
537 		project_rele(oldkpj);
538 	}
539 
540 	if (cid != NOCLASS) {
541 		/*
542 		 * If the lwp is being created in the current process
543 		 * and matches the current thread's scheduling class,
544 		 * we should propagate the current thread's scheduling
545 		 * parameters by calling CL_FORK.  Otherwise just use
546 		 * the defaults by calling CL_ENTERCLASS.
547 		 */
548 		if (p != curproc || curthread->t_cid != cid) {
549 			err = CL_ENTERCLASS(t, cid, NULL, NULL, bufp);
550 			t->t_pri = pri;	/* CL_ENTERCLASS may have changed it */
551 			/*
552 			 * We don't call schedctl_set_cidpri(t) here
553 			 * because the schedctl data is not yet set
554 			 * up for the newly-created lwp.
555 			 */
556 		} else {
557 			t->t_clfuncs = &(sclass[cid].cl_funcs->thread);
558 			err = CL_FORK(curthread, t, bufp);
559 			t->t_cid = cid;
560 		}
561 		if (err)
562 			goto error;
563 		else
564 			bufp = NULL;
565 	}
566 
567 	/*
568 	 * If we were given an lwpid then use it, else allocate one.
569 	 */
570 	if (lwpid != 0)
571 		t->t_tid = lwpid;
572 	else {
573 		/*
574 		 * lwp/thread id 0 is never valid; reserved for special checks.
575 		 * lwp/thread id 1 is reserved for the main thread.
576 		 * Start again at 2 when INT_MAX has been reached
577 		 * (id_t is a signed 32-bit integer).
578 		 */
579 		id_t prev_id = p->p_lwpid;	/* last allocated tid */
580 
581 		do {			/* avoid lwpid duplication */
582 			if (p->p_lwpid == INT_MAX) {
583 				p->p_flag |= SLWPWRAP;
584 				p->p_lwpid = 1;
585 			}
586 			if ((t->t_tid = ++p->p_lwpid) == prev_id) {
587 				/*
588 				 * All lwpids are allocated; fail the request.
589 				 */
590 				err = 1;
591 				goto error;
592 			}
593 			/*
594 			 * We only need to worry about colliding with an id
595 			 * that's already in use if this process has
596 			 * cycled through all available lwp ids.
597 			 */
598 			if ((p->p_flag & SLWPWRAP) == 0)
599 				break;
600 		} while (lwp_hash_lookup(p, t->t_tid) != NULL);
601 	}
602 
603 	/*
604 	 * If this is a branded process, let the brand do any necessary lwp
605 	 * initialization.
606 	 */
607 	if (PROC_IS_BRANDED(p)) {
608 		if (BROP(p)->b_initlwp(lwp)) {
609 			err = 1;
610 			goto error;
611 		}
612 		branded = 1;
613 	}
614 
615 	if (t->t_tid == 1) {
616 		kpreempt_disable();
617 		ASSERT(t->t_lpl != NULL);
618 		p->p_t1_lgrpid = t->t_lpl->lpl_lgrpid;
619 		kpreempt_enable();
620 		if (p->p_tr_lgrpid != LGRP_NONE &&
621 		    p->p_tr_lgrpid != p->p_t1_lgrpid) {
622 			lgrp_update_trthr_migrations(1);
623 		}
624 	}
625 
626 	p->p_lwpcnt++;
627 	t->t_waitfor = -1;
628 
629 	/*
630 	 * Turn microstate accounting on for thread if on for process.
631 	 */
632 	if (p->p_flag & SMSACCT)
633 		t->t_proc_flag |= TP_MSACCT;
634 
635 	/*
636 	 * If the process has watchpoints, mark the new thread as such.
637 	 */
638 	if (pr_watch_active(p))
639 		watch_enable(t);
640 
641 	/*
642 	 * The lwp is being created in the stopped state.
643 	 * We set all the necessary flags to indicate that fact here.
644 	 * We omit the TS_CREATE flag from t_schedflag so that the lwp
645 	 * cannot be set running until the caller is finished with it,
646 	 * even if lwp_continue() is called on it after we drop p->p_lock.
647 	 * When the caller is finished with the newly-created lwp,
648 	 * the caller must call lwp_create_done() to allow the lwp
649 	 * to be set running.  If the TP_HOLDLWP is left set, the
650 	 * lwp will suspend itself after reaching system call exit.
651 	 */
652 	init_mstate(t, LMS_STOPPED);
653 	t->t_proc_flag |= TP_HOLDLWP;
654 	t->t_schedflag |= (TS_ALLSTART & ~(TS_CSTART | TS_CREATE));
655 	t->t_whystop = PR_SUSPENDED;
656 	t->t_whatstop = SUSPEND_NORMAL;
657 	t->t_sig_check = 1;	/* ensure that TP_HOLDLWP is honored */
658 
659 	/*
660 	 * Set system call processing flags in case tracing or profiling
661 	 * is set.  The first system call will evaluate these and turn
662 	 * them off if they aren't needed.
663 	 */
664 	t->t_pre_sys = 1;
665 	t->t_post_sys = 1;
666 
667 	/*
668 	 * Insert the new thread into the list of all threads.
669 	 */
670 	if ((tx = p->p_tlist) == NULL) {
671 		t->t_back = t;
672 		t->t_forw = t;
673 		p->p_tlist = t;
674 	} else {
675 		t->t_forw = tx;
676 		t->t_back = tx->t_back;
677 		tx->t_back->t_forw = t;
678 		tx->t_back = t;
679 	}
680 
681 	/*
682 	 * Insert the new lwp into an lwp directory slot position
683 	 * and into the lwpid hash table.
684 	 */
685 	lep->le_thread = t;
686 	lep->le_lwpid = t->t_tid;
687 	lep->le_start = t->t_start;
688 	lwp_hash_in(p, lep, p->p_tidhash, p->p_tidhash_sz, 1);
689 
690 	if (state == TS_RUN) {
691 		/*
692 		 * We set the new lwp running immediately.
693 		 */
694 		t->t_proc_flag &= ~TP_HOLDLWP;
695 		lwp_create_done(t);
696 	}
697 
698 error:
699 	if (err) {
700 		if (CLASS_KERNEL(cid)) {
701 			/*
702 			 * This should only happen if a system process runs
703 			 * out of lwpids, which shouldn't occur.
704 			 */
705 			panic("Failed to create a system LWP");
706 		}
707 		/*
708 		 * We have failed to create an lwp, so decrement the number
709 		 * of lwps in the task and let the lgroup load averages know
710 		 * that this thread isn't going to show up.
711 		 */
712 		kpreempt_disable();
713 		lgrp_move_thread(t, NULL, 1);
714 		kpreempt_enable();
715 
716 		ASSERT(MUTEX_HELD(&p->p_lock));
717 		mutex_enter(&p->p_zone->zone_nlwps_lock);
718 		p->p_task->tk_nlwps--;
719 		p->p_task->tk_proj->kpj_nlwps--;
720 		p->p_zone->zone_nlwps--;
721 		mutex_exit(&p->p_zone->zone_nlwps_lock);
722 		if (cid != NOCLASS && bufp != NULL)
723 			CL_FREE(cid, bufp);
724 
725 		if (branded)
726 			BROP(p)->b_freelwp(lwp);
727 
728 		mutex_exit(&p->p_lock);
729 		t->t_state = TS_FREE;
730 		thread_rele(t);
731 
732 		/*
733 		 * We need to remove t from the list of all threads
734 		 * because thread_exit()/lwp_exit() isn't called on t.
735 		 */
736 		mutex_enter(&pidlock);
737 		ASSERT(t != t->t_next);		/* t0 never exits */
738 		t->t_next->t_prev = t->t_prev;
739 		t->t_prev->t_next = t->t_next;
740 		mutex_exit(&pidlock);
741 
742 		thread_free(t);
743 		kmem_free(lep, sizeof (*lep));
744 		lwp = NULL;
745 	} else {
746 		mutex_exit(&p->p_lock);
747 	}
748 
749 	if (old_dir != NULL)
750 		kmem_free(old_dir, old_dirsz * sizeof (*old_dir));
751 	if (old_hash != NULL)
752 		kmem_free(old_hash, old_hashsz * sizeof (*old_hash));
753 	if (ret_tidhash != NULL)
754 		kmem_free(ret_tidhash, sizeof (ret_tidhash_t));
755 
756 	DTRACE_PROC1(lwp__create, kthread_t *, t);
757 	return (lwp);
758 }
759 
760 /*
761  * lwp_create_done() is called by the caller of lwp_create() to set the
762  * newly-created lwp running after the caller has finished manipulating it.
763  */
764 void
765 lwp_create_done(kthread_t *t)
766 {
767 	proc_t *p = ttoproc(t);
768 
769 	ASSERT(MUTEX_HELD(&p->p_lock));
770 
771 	/*
772 	 * We set the TS_CREATE and TS_CSTART flags and call setrun_locked().
773 	 * (The absence of the TS_CREATE flag prevents the lwp from running
774 	 * until we are finished with it, even if lwp_continue() is called on
775 	 * it by some other lwp in the process or elsewhere in the kernel.)
776 	 */
777 	thread_lock(t);
778 	ASSERT(t->t_state == TS_STOPPED && !(t->t_schedflag & TS_CREATE));
779 	/*
780 	 * If TS_CSTART is set, lwp_continue(t) has been called and
781 	 * has already incremented p_lwprcnt; avoid doing this twice.
782 	 */
783 	if (!(t->t_schedflag & TS_CSTART))
784 		p->p_lwprcnt++;
785 	t->t_schedflag |= (TS_CSTART | TS_CREATE);
786 	setrun_locked(t);
787 	thread_unlock(t);
788 }
789 
790 /*
791  * Copy an LWP's active templates, and clear the latest contracts.
792  */
793 void
794 lwp_ctmpl_copy(klwp_t *dst, klwp_t *src)
795 {
796 	int i;
797 
798 	for (i = 0; i < ct_ntypes; i++) {
799 		dst->lwp_ct_active[i] = ctmpl_dup(src->lwp_ct_active[i]);
800 		dst->lwp_ct_latest[i] = NULL;
801 	}
802 }
803 
804 /*
805  * Clear an LWP's contract template state.
806  */
807 void
808 lwp_ctmpl_clear(klwp_t *lwp)
809 {
810 	ct_template_t *tmpl;
811 	int i;
812 
813 	for (i = 0; i < ct_ntypes; i++) {
814 		if ((tmpl = lwp->lwp_ct_active[i]) != NULL) {
815 			ctmpl_free(tmpl);
816 			lwp->lwp_ct_active[i] = NULL;
817 		}
818 
819 		if (lwp->lwp_ct_latest[i] != NULL) {
820 			contract_rele(lwp->lwp_ct_latest[i]);
821 			lwp->lwp_ct_latest[i] = NULL;
822 		}
823 	}
824 }
825 
826 /*
827  * Individual lwp exit.
828  * If this is the last lwp, exit the whole process.
829  */
830 void
831 lwp_exit(void)
832 {
833 	kthread_t *t = curthread;
834 	klwp_t *lwp = ttolwp(t);
835 	proc_t *p = ttoproc(t);
836 
837 	ASSERT(MUTEX_HELD(&p->p_lock));
838 
839 	mutex_exit(&p->p_lock);
840 
841 #if defined(__sparc)
842 	/*
843 	 * Ensure that the user stack is fully abandoned..
844 	 */
845 	trash_user_windows();
846 #endif
847 
848 	tsd_exit();			/* free thread specific data */
849 
850 	kcpc_passivate();		/* Clean up performance counter state */
851 
852 	pollcleanup();
853 
854 	if (t->t_door)
855 		door_slam();
856 
857 	if (t->t_schedctl != NULL)
858 		schedctl_lwp_cleanup(t);
859 
860 	if (t->t_upimutex != NULL)
861 		upimutex_cleanup();
862 
863 	/*
864 	 * Perform any brand specific exit processing, then release any
865 	 * brand data associated with the lwp
866 	 */
867 	if (PROC_IS_BRANDED(p))
868 		BROP(p)->b_lwpexit(lwp);
869 
870 	mutex_enter(&p->p_lock);
871 	lwp_cleanup();
872 
873 	/*
874 	 * When this process is dumping core, its lwps are held here
875 	 * until the core dump is finished. Then exitlwps() is called
876 	 * again to release these lwps so that they can finish exiting.
877 	 */
878 	if (p->p_flag & SCOREDUMP)
879 		stop(PR_SUSPENDED, SUSPEND_NORMAL);
880 
881 	/*
882 	 * Block the process against /proc now that we have really acquired
883 	 * p->p_lock (to decrement p_lwpcnt and manipulate p_tlist at least).
884 	 */
885 	prbarrier(p);
886 
887 	/*
888 	 * Call proc_exit() if this is the last non-daemon lwp in the process.
889 	 */
890 	if (!(t->t_proc_flag & TP_DAEMON) &&
891 	    p->p_lwpcnt == p->p_lwpdaemon + 1) {
892 		mutex_exit(&p->p_lock);
893 		if (proc_exit(CLD_EXITED, 0) == 0) {
894 			/* Restarting init. */
895 			return;
896 		}
897 
898 		/*
899 		 * proc_exit() returns a non-zero value when some other
900 		 * lwp got there first.  We just have to continue in
901 		 * lwp_exit().
902 		 */
903 		mutex_enter(&p->p_lock);
904 		ASSERT(curproc->p_flag & SEXITLWPS);
905 		prbarrier(p);
906 	}
907 
908 	DTRACE_PROC(lwp__exit);
909 
910 	/*
911 	 * If the lwp is a detached lwp or if the process is exiting,
912 	 * remove (lwp_hash_out()) the lwp from the lwp directory.
913 	 * Otherwise null out the lwp's le_thread pointer in the lwp
914 	 * directory so that other threads will see it as a zombie lwp.
915 	 */
916 	prlwpexit(t);		/* notify /proc */
917 	if (!(t->t_proc_flag & TP_TWAIT) || (p->p_flag & SEXITLWPS))
918 		lwp_hash_out(p, t->t_tid);
919 	else {
920 		ASSERT(!(t->t_proc_flag & TP_DAEMON));
921 		p->p_lwpdir[t->t_dslot].ld_entry->le_thread = NULL;
922 		p->p_zombcnt++;
923 		cv_broadcast(&p->p_lwpexit);
924 	}
925 	if (t->t_proc_flag & TP_DAEMON) {
926 		p->p_lwpdaemon--;
927 		t->t_proc_flag &= ~TP_DAEMON;
928 	}
929 	t->t_proc_flag &= ~TP_TWAIT;
930 
931 	/*
932 	 * Maintain accurate lwp count for task.max-lwps resource control.
933 	 */
934 	mutex_enter(&p->p_zone->zone_nlwps_lock);
935 	p->p_task->tk_nlwps--;
936 	p->p_task->tk_proj->kpj_nlwps--;
937 	p->p_zone->zone_nlwps--;
938 	mutex_exit(&p->p_zone->zone_nlwps_lock);
939 
940 	CL_EXIT(t);		/* tell the scheduler that t is exiting */
941 	ASSERT(p->p_lwpcnt != 0);
942 	p->p_lwpcnt--;
943 
944 	/*
945 	 * If all remaining non-daemon lwps are waiting in lwp_wait(),
946 	 * wake them up so someone can return EDEADLK.
947 	 * (See the block comment preceeding lwp_wait().)
948 	 */
949 	if (p->p_lwpcnt == p->p_lwpdaemon + (p->p_lwpwait - p->p_lwpdwait))
950 		cv_broadcast(&p->p_lwpexit);
951 
952 	t->t_proc_flag |= TP_LWPEXIT;
953 	term_mstate(t);
954 
955 #ifndef NPROBE
956 	/* Kernel probe */
957 	if (t->t_tnf_tpdp)
958 		tnf_thread_exit();
959 #endif /* NPROBE */
960 
961 	t->t_forw->t_back = t->t_back;
962 	t->t_back->t_forw = t->t_forw;
963 	if (t == p->p_tlist)
964 		p->p_tlist = t->t_forw;
965 
966 	/*
967 	 * Clean up the signal state.
968 	 */
969 	if (t->t_sigqueue != NULL)
970 		sigdelq(p, t, 0);
971 	if (lwp->lwp_curinfo != NULL) {
972 		siginfofree(lwp->lwp_curinfo);
973 		lwp->lwp_curinfo = NULL;
974 	}
975 
976 	thread_rele(t);
977 
978 	/*
979 	 * Terminated lwps are associated with process zero and are put onto
980 	 * death-row by resume().  Avoid preemption after resetting t->t_procp.
981 	 */
982 	t->t_preempt++;
983 
984 	if (t->t_ctx != NULL)
985 		exitctx(t);
986 	if (p->p_pctx != NULL)
987 		exitpctx(p);
988 
989 	t->t_procp = &p0;
990 
991 	/*
992 	 * Notify the HAT about the change of address space
993 	 */
994 	hat_thread_exit(t);
995 	/*
996 	 * When this is the last running lwp in this process and some lwp is
997 	 * waiting for this condition to become true, or this thread was being
998 	 * suspended, then the waiting lwp is awakened.
999 	 *
1000 	 * Also, if the process is exiting, we may have a thread waiting in
1001 	 * exitlwps() that needs to be notified.
1002 	 */
1003 	if (--p->p_lwprcnt == 0 || (t->t_proc_flag & TP_HOLDLWP) ||
1004 	    (p->p_flag & SEXITLWPS))
1005 		cv_broadcast(&p->p_holdlwps);
1006 
1007 	/*
1008 	 * Need to drop p_lock so we can reacquire pidlock.
1009 	 */
1010 	mutex_exit(&p->p_lock);
1011 	mutex_enter(&pidlock);
1012 
1013 	ASSERT(t != t->t_next);		/* t0 never exits */
1014 	t->t_next->t_prev = t->t_prev;
1015 	t->t_prev->t_next = t->t_next;
1016 	cv_broadcast(&t->t_joincv);	/* wake up anyone in thread_join */
1017 	mutex_exit(&pidlock);
1018 
1019 	lwp_pcb_exit();
1020 
1021 	t->t_state = TS_ZOMB;
1022 	swtch_from_zombie();
1023 	/* never returns */
1024 }
1025 
1026 
1027 /*
1028  * Cleanup function for an exiting lwp.
1029  * Called both from lwp_exit() and from proc_exit().
1030  * p->p_lock is repeatedly released and grabbed in this function.
1031  */
1032 void
1033 lwp_cleanup(void)
1034 {
1035 	kthread_t *t = curthread;
1036 	proc_t *p = ttoproc(t);
1037 
1038 	ASSERT(MUTEX_HELD(&p->p_lock));
1039 
1040 	/* untimeout any lwp-bound realtime timers */
1041 	if (p->p_itimer != NULL)
1042 		timer_lwpexit();
1043 
1044 	/*
1045 	 * If this is the /proc agent lwp that is exiting, readjust p_lwpid
1046 	 * so it appears that the agent never existed, and clear p_agenttp.
1047 	 */
1048 	if (t == p->p_agenttp) {
1049 		ASSERT(t->t_tid == p->p_lwpid);
1050 		p->p_lwpid--;
1051 		p->p_agenttp = NULL;
1052 	}
1053 
1054 	/*
1055 	 * Do lgroup bookkeeping to account for thread exiting.
1056 	 */
1057 	kpreempt_disable();
1058 	lgrp_move_thread(t, NULL, 1);
1059 	if (t->t_tid == 1) {
1060 		p->p_t1_lgrpid = LGRP_NONE;
1061 	}
1062 	kpreempt_enable();
1063 
1064 	lwp_ctmpl_clear(ttolwp(t));
1065 }
1066 
1067 int
1068 lwp_suspend(kthread_t *t)
1069 {
1070 	int tid;
1071 	proc_t *p = ttoproc(t);
1072 
1073 	ASSERT(MUTEX_HELD(&p->p_lock));
1074 
1075 	/*
1076 	 * Set the thread's TP_HOLDLWP flag so it will stop in holdlwp().
1077 	 * If an lwp is stopping itself, there is no need to wait.
1078 	 */
1079 top:
1080 	t->t_proc_flag |= TP_HOLDLWP;
1081 	if (t == curthread) {
1082 		t->t_sig_check = 1;
1083 	} else {
1084 		/*
1085 		 * Make sure the lwp stops promptly.
1086 		 */
1087 		thread_lock(t);
1088 		t->t_sig_check = 1;
1089 		/*
1090 		 * XXX Should use virtual stop like /proc does instead of
1091 		 * XXX waking the thread to get it to stop.
1092 		 */
1093 		if (ISWAKEABLE(t) || ISWAITING(t)) {
1094 			setrun_locked(t);
1095 		} else if (t->t_state == TS_ONPROC && t->t_cpu != CPU) {
1096 			poke_cpu(t->t_cpu->cpu_id);
1097 		}
1098 
1099 		tid = t->t_tid;	 /* remember thread ID */
1100 		/*
1101 		 * Wait for lwp to stop
1102 		 */
1103 		while (!SUSPENDED(t)) {
1104 			/*
1105 			 * Drop the thread lock before waiting and reacquire it
1106 			 * afterwards, so the thread can change its t_state
1107 			 * field.
1108 			 */
1109 			thread_unlock(t);
1110 
1111 			/*
1112 			 * Check if aborted by exitlwps().
1113 			 */
1114 			if (p->p_flag & SEXITLWPS)
1115 				lwp_exit();
1116 
1117 			/*
1118 			 * Cooperate with jobcontrol signals and /proc stopping
1119 			 * by calling cv_wait_sig() to wait for the target
1120 			 * lwp to stop.  Just using cv_wait() can lead to
1121 			 * deadlock because, if some other lwp has stopped
1122 			 * by either of these mechanisms, then p_lwprcnt will
1123 			 * never become zero if we do a cv_wait().
1124 			 */
1125 			if (!cv_wait_sig(&p->p_holdlwps, &p->p_lock))
1126 				return (EINTR);
1127 
1128 			/*
1129 			 * Check to see if thread died while we were
1130 			 * waiting for it to suspend.
1131 			 */
1132 			if (idtot(p, tid) == NULL)
1133 				return (ESRCH);
1134 
1135 			thread_lock(t);
1136 			/*
1137 			 * If the TP_HOLDLWP flag went away, lwp_continue()
1138 			 * or vfork() must have been called while we were
1139 			 * waiting, so start over again.
1140 			 */
1141 			if ((t->t_proc_flag & TP_HOLDLWP) == 0) {
1142 				thread_unlock(t);
1143 				goto top;
1144 			}
1145 		}
1146 		thread_unlock(t);
1147 	}
1148 	return (0);
1149 }
1150 
1151 /*
1152  * continue a lwp that's been stopped by lwp_suspend().
1153  */
1154 void
1155 lwp_continue(kthread_t *t)
1156 {
1157 	proc_t *p = ttoproc(t);
1158 	int was_suspended = t->t_proc_flag & TP_HOLDLWP;
1159 
1160 	ASSERT(MUTEX_HELD(&p->p_lock));
1161 
1162 	t->t_proc_flag &= ~TP_HOLDLWP;
1163 	thread_lock(t);
1164 	if (SUSPENDED(t) &&
1165 	    !(p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH))) {
1166 		p->p_lwprcnt++;
1167 		t->t_schedflag |= TS_CSTART;
1168 		setrun_locked(t);
1169 	}
1170 	thread_unlock(t);
1171 	/*
1172 	 * Wakeup anyone waiting for this thread to be suspended
1173 	 */
1174 	if (was_suspended)
1175 		cv_broadcast(&p->p_holdlwps);
1176 }
1177 
1178 /*
1179  * ********************************
1180  *  Miscellaneous lwp routines	  *
1181  * ********************************
1182  */
1183 /*
1184  * When a process is undergoing a forkall(), its p_flag is set to SHOLDFORK.
1185  * This will cause the process's lwps to stop at a hold point.  A hold
1186  * point is where a kernel thread has a flat stack.  This is at the
1187  * return from a system call and at the return from a user level trap.
1188  *
1189  * When a process is undergoing a fork1() or vfork(), its p_flag is set to
1190  * SHOLDFORK1.  This will cause the process's lwps to stop at a modified
1191  * hold point.  The lwps in the process are not being cloned, so they
1192  * are held at the usual hold points and also within issig_forreal().
1193  * This has the side-effect that their system calls do not return
1194  * showing EINTR.
1195  *
1196  * An lwp can also be held.  This is identified by the TP_HOLDLWP flag on
1197  * the thread.  The TP_HOLDLWP flag is set in lwp_suspend(), where the active
1198  * lwp is waiting for the target lwp to be stopped.
1199  */
1200 void
1201 holdlwp(void)
1202 {
1203 	proc_t *p = curproc;
1204 	kthread_t *t = curthread;
1205 
1206 	mutex_enter(&p->p_lock);
1207 	/*
1208 	 * Don't terminate immediately if the process is dumping core.
1209 	 * Once the process has dumped core, all lwps are terminated.
1210 	 */
1211 	if (!(p->p_flag & SCOREDUMP)) {
1212 		if ((p->p_flag & SEXITLWPS) || (t->t_proc_flag & TP_EXITLWP))
1213 			lwp_exit();
1214 	}
1215 	if (!(ISHOLD(p)) && !(p->p_flag & (SHOLDFORK1 | SHOLDWATCH))) {
1216 		mutex_exit(&p->p_lock);
1217 		return;
1218 	}
1219 	/*
1220 	 * stop() decrements p->p_lwprcnt and cv_signal()s &p->p_holdlwps
1221 	 * when p->p_lwprcnt becomes zero.
1222 	 */
1223 	stop(PR_SUSPENDED, SUSPEND_NORMAL);
1224 	if (p->p_flag & SEXITLWPS)
1225 		lwp_exit();
1226 	mutex_exit(&p->p_lock);
1227 }
1228 
1229 /*
1230  * Have all lwps within the process hold at a point where they are
1231  * cloneable (SHOLDFORK) or just safe w.r.t. fork1 (SHOLDFORK1).
1232  */
1233 int
1234 holdlwps(int holdflag)
1235 {
1236 	proc_t *p = curproc;
1237 
1238 	ASSERT(holdflag == SHOLDFORK || holdflag == SHOLDFORK1);
1239 	mutex_enter(&p->p_lock);
1240 	schedctl_finish_sigblock(curthread);
1241 again:
1242 	while (p->p_flag & (SEXITLWPS | SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) {
1243 		/*
1244 		 * If another lwp is doing a forkall() or proc_exit(), bail out.
1245 		 */
1246 		if (p->p_flag & (SEXITLWPS | SHOLDFORK)) {
1247 			mutex_exit(&p->p_lock);
1248 			return (0);
1249 		}
1250 		/*
1251 		 * Another lwp is doing a fork1() or is undergoing
1252 		 * watchpoint activity.  We hold here for it to complete.
1253 		 */
1254 		stop(PR_SUSPENDED, SUSPEND_NORMAL);
1255 	}
1256 	p->p_flag |= holdflag;
1257 	pokelwps(p);
1258 	--p->p_lwprcnt;
1259 	/*
1260 	 * Wait for the process to become quiescent (p->p_lwprcnt == 0).
1261 	 */
1262 	while (p->p_lwprcnt > 0) {
1263 		/*
1264 		 * Check if aborted by exitlwps().
1265 		 * Also check if SHOLDWATCH is set; it takes precedence.
1266 		 */
1267 		if (p->p_flag & (SEXITLWPS | SHOLDWATCH)) {
1268 			p->p_lwprcnt++;
1269 			p->p_flag &= ~holdflag;
1270 			cv_broadcast(&p->p_holdlwps);
1271 			goto again;
1272 		}
1273 		/*
1274 		 * Cooperate with jobcontrol signals and /proc stopping.
1275 		 * If some other lwp has stopped by either of these
1276 		 * mechanisms, then p_lwprcnt will never become zero
1277 		 * and the process will appear deadlocked unless we
1278 		 * stop here in sympathy with the other lwp before
1279 		 * doing the cv_wait() below.
1280 		 *
1281 		 * If the other lwp stops after we do the cv_wait(), it
1282 		 * will wake us up to loop around and do the sympathy stop.
1283 		 *
1284 		 * Since stop() drops p->p_lock, we must start from
1285 		 * the top again on returning from stop().
1286 		 */
1287 		if (p->p_stopsig | (curthread->t_proc_flag & TP_PRSTOP)) {
1288 			int whystop = p->p_stopsig? PR_JOBCONTROL :
1289 			    PR_REQUESTED;
1290 			p->p_lwprcnt++;
1291 			p->p_flag &= ~holdflag;
1292 			stop(whystop, p->p_stopsig);
1293 			goto again;
1294 		}
1295 		cv_wait(&p->p_holdlwps, &p->p_lock);
1296 	}
1297 	p->p_lwprcnt++;
1298 	p->p_flag &= ~holdflag;
1299 	mutex_exit(&p->p_lock);
1300 	return (1);
1301 }
1302 
1303 /*
1304  * See comments for holdwatch(), below.
1305  */
1306 static int
1307 holdcheck(int clearflags)
1308 {
1309 	proc_t *p = curproc;
1310 
1311 	/*
1312 	 * If we are trying to exit, that takes precedence over anything else.
1313 	 */
1314 	if (p->p_flag & SEXITLWPS) {
1315 		p->p_lwprcnt++;
1316 		p->p_flag &= ~clearflags;
1317 		lwp_exit();
1318 	}
1319 
1320 	/*
1321 	 * If another thread is calling fork1(), stop the current thread so the
1322 	 * other can complete.
1323 	 */
1324 	if (p->p_flag & SHOLDFORK1) {
1325 		p->p_lwprcnt++;
1326 		stop(PR_SUSPENDED, SUSPEND_NORMAL);
1327 		if (p->p_flag & SEXITLWPS) {
1328 			p->p_flag &= ~clearflags;
1329 			lwp_exit();
1330 		}
1331 		return (-1);
1332 	}
1333 
1334 	/*
1335 	 * If another thread is calling fork(), then indicate we are doing
1336 	 * watchpoint activity.  This will cause holdlwps() above to stop the
1337 	 * forking thread, at which point we can continue with watchpoint
1338 	 * activity.
1339 	 */
1340 	if (p->p_flag & SHOLDFORK) {
1341 		p->p_lwprcnt++;
1342 		while (p->p_flag & SHOLDFORK) {
1343 			p->p_flag |= SHOLDWATCH;
1344 			cv_broadcast(&p->p_holdlwps);
1345 			cv_wait(&p->p_holdlwps, &p->p_lock);
1346 			p->p_flag &= ~SHOLDWATCH;
1347 		}
1348 		return (-1);
1349 	}
1350 
1351 	return (0);
1352 }
1353 
1354 /*
1355  * Stop all lwps within the process, holding themselves in the kernel while the
1356  * active lwp undergoes watchpoint activity.  This is more complicated than
1357  * expected because stop() relies on calling holdwatch() in order to copyin data
1358  * from the user's address space.  A double barrier is used to prevent an
1359  * infinite loop.
1360  *
1361  * 	o The first thread into holdwatch() is the 'master' thread and does
1362  *        the following:
1363  *
1364  *              - Sets SHOLDWATCH on the current process
1365  *              - Sets TP_WATCHSTOP on the current thread
1366  *              - Waits for all threads to be either stopped or have
1367  *                TP_WATCHSTOP set.
1368  *              - Sets the SWATCHOK flag on the process
1369  *              - Unsets TP_WATCHSTOP
1370  *              - Waits for the other threads to completely stop
1371  *              - Unsets SWATCHOK
1372  *
1373  * 	o If SHOLDWATCH is already set when we enter this function, then another
1374  *        thread is already trying to stop this thread.  This 'slave' thread
1375  *        does the following:
1376  *
1377  *              - Sets TP_WATCHSTOP on the current thread
1378  *              - Waits for SWATCHOK flag to be set
1379  *              - Calls stop()
1380  *
1381  * 	o If SWATCHOK is set on the process, then this function immediately
1382  *        returns, as we must have been called via stop().
1383  *
1384  * In addition, there are other flags that take precedence over SHOLDWATCH:
1385  *
1386  * 	o If SEXITLWPS is set, exit immediately.
1387  *
1388  * 	o If SHOLDFORK1 is set, wait for fork1() to complete.
1389  *
1390  * 	o If SHOLDFORK is set, then watchpoint activity takes precedence In this
1391  *        case, set SHOLDWATCH, signalling the forking thread to stop first.
1392  *
1393  * 	o If the process is being stopped via /proc (TP_PRSTOP is set), then we
1394  *        stop the current thread.
1395  *
1396  * Returns 0 if all threads have been quiesced.  Returns non-zero if not all
1397  * threads were stopped, or the list of watched pages has changed.
1398  */
1399 int
1400 holdwatch(void)
1401 {
1402 	proc_t *p = curproc;
1403 	kthread_t *t = curthread;
1404 	int ret = 0;
1405 
1406 	mutex_enter(&p->p_lock);
1407 
1408 	p->p_lwprcnt--;
1409 
1410 	/*
1411 	 * Check for bail-out conditions as outlined above.
1412 	 */
1413 	if (holdcheck(0) != 0) {
1414 		mutex_exit(&p->p_lock);
1415 		return (-1);
1416 	}
1417 
1418 	if (!(p->p_flag & SHOLDWATCH)) {
1419 		/*
1420 		 * We are the master watchpoint thread.  Set SHOLDWATCH and poke
1421 		 * the other threads.
1422 		 */
1423 		p->p_flag |= SHOLDWATCH;
1424 		pokelwps(p);
1425 
1426 		/*
1427 		 * Wait for all threads to be stopped or have TP_WATCHSTOP set.
1428 		 */
1429 		while (pr_allstopped(p, 1) > 0) {
1430 			if (holdcheck(SHOLDWATCH) != 0) {
1431 				p->p_flag &= ~SHOLDWATCH;
1432 				mutex_exit(&p->p_lock);
1433 				return (-1);
1434 			}
1435 
1436 			cv_wait(&p->p_holdlwps, &p->p_lock);
1437 		}
1438 
1439 		/*
1440 		 * All threads are now stopped or in the process of stopping.
1441 		 * Set SWATCHOK and let them stop completely.
1442 		 */
1443 		p->p_flag |= SWATCHOK;
1444 		t->t_proc_flag &= ~TP_WATCHSTOP;
1445 		cv_broadcast(&p->p_holdlwps);
1446 
1447 		while (pr_allstopped(p, 0) > 0) {
1448 			/*
1449 			 * At first glance, it may appear that we don't need a
1450 			 * call to holdcheck() here.  But if the process gets a
1451 			 * SIGKILL signal, one of our stopped threads may have
1452 			 * been awakened and is waiting in exitlwps(), which
1453 			 * takes precedence over watchpoints.
1454 			 */
1455 			if (holdcheck(SHOLDWATCH | SWATCHOK) != 0) {
1456 				p->p_flag &= ~(SHOLDWATCH | SWATCHOK);
1457 				mutex_exit(&p->p_lock);
1458 				return (-1);
1459 			}
1460 
1461 			cv_wait(&p->p_holdlwps, &p->p_lock);
1462 		}
1463 
1464 		/*
1465 		 * All threads are now completely stopped.
1466 		 */
1467 		p->p_flag &= ~SWATCHOK;
1468 		p->p_flag &= ~SHOLDWATCH;
1469 		p->p_lwprcnt++;
1470 
1471 	} else if (!(p->p_flag & SWATCHOK)) {
1472 
1473 		/*
1474 		 * SHOLDWATCH is set, so another thread is trying to do
1475 		 * watchpoint activity.  Indicate this thread is stopping, and
1476 		 * wait for the OK from the master thread.
1477 		 */
1478 		t->t_proc_flag |= TP_WATCHSTOP;
1479 		cv_broadcast(&p->p_holdlwps);
1480 
1481 		while (!(p->p_flag & SWATCHOK)) {
1482 			if (holdcheck(0) != 0) {
1483 				t->t_proc_flag &= ~TP_WATCHSTOP;
1484 				mutex_exit(&p->p_lock);
1485 				return (-1);
1486 			}
1487 
1488 			cv_wait(&p->p_holdlwps, &p->p_lock);
1489 		}
1490 
1491 		/*
1492 		 * Once the master thread has given the OK, this thread can
1493 		 * actually call stop().
1494 		 */
1495 		t->t_proc_flag &= ~TP_WATCHSTOP;
1496 		p->p_lwprcnt++;
1497 
1498 		stop(PR_SUSPENDED, SUSPEND_NORMAL);
1499 
1500 		/*
1501 		 * It's not OK to do watchpoint activity, notify caller to
1502 		 * retry.
1503 		 */
1504 		ret = -1;
1505 
1506 	} else {
1507 
1508 		/*
1509 		 * The only way we can hit the case where SHOLDWATCH is set and
1510 		 * SWATCHOK is set is if we are triggering this from within a
1511 		 * stop() call.  Assert that this is the case.
1512 		 */
1513 
1514 		ASSERT(t->t_proc_flag & TP_STOPPING);
1515 		p->p_lwprcnt++;
1516 	}
1517 
1518 	mutex_exit(&p->p_lock);
1519 
1520 	return (ret);
1521 }
1522 
1523 /*
1524  * force all interruptible lwps to trap into the kernel.
1525  */
1526 void
1527 pokelwps(proc_t *p)
1528 {
1529 	kthread_t *t;
1530 
1531 	ASSERT(MUTEX_HELD(&p->p_lock));
1532 
1533 	t = p->p_tlist;
1534 	do {
1535 		if (t == curthread)
1536 			continue;
1537 		thread_lock(t);
1538 		aston(t);	/* make thread trap or do post_syscall */
1539 		if (ISWAKEABLE(t) || ISWAITING(t)) {
1540 			setrun_locked(t);
1541 		} else if (t->t_state == TS_STOPPED) {
1542 			/*
1543 			 * Ensure that proc_exit() is not blocked by lwps
1544 			 * that were stopped via jobcontrol or /proc.
1545 			 */
1546 			if (p->p_flag & SEXITLWPS) {
1547 				p->p_stopsig = 0;
1548 				t->t_schedflag |= (TS_XSTART | TS_PSTART);
1549 				setrun_locked(t);
1550 			}
1551 			/*
1552 			 * If we are holding lwps for a forkall(),
1553 			 * force lwps that have been suspended via
1554 			 * lwp_suspend() and are suspended inside
1555 			 * of a system call to proceed to their
1556 			 * holdlwp() points where they are clonable.
1557 			 */
1558 			if ((p->p_flag & SHOLDFORK) && SUSPENDED(t)) {
1559 				if ((t->t_schedflag & TS_CSTART) == 0) {
1560 					p->p_lwprcnt++;
1561 					t->t_schedflag |= TS_CSTART;
1562 					setrun_locked(t);
1563 				}
1564 			}
1565 		} else if (t->t_state == TS_ONPROC) {
1566 			if (t->t_cpu != CPU)
1567 				poke_cpu(t->t_cpu->cpu_id);
1568 		}
1569 		thread_unlock(t);
1570 	} while ((t = t->t_forw) != p->p_tlist);
1571 }
1572 
1573 /*
1574  * undo the effects of holdlwps() or holdwatch().
1575  */
1576 void
1577 continuelwps(proc_t *p)
1578 {
1579 	kthread_t *t;
1580 
1581 	/*
1582 	 * If this flag is set, then the original holdwatch() didn't actually
1583 	 * stop the process.  See comments for holdwatch().
1584 	 */
1585 	if (p->p_flag & SWATCHOK) {
1586 		ASSERT(curthread->t_proc_flag & TP_STOPPING);
1587 		return;
1588 	}
1589 
1590 	ASSERT(MUTEX_HELD(&p->p_lock));
1591 	ASSERT((p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) == 0);
1592 
1593 	t = p->p_tlist;
1594 	do {
1595 		thread_lock(t);		/* SUSPENDED looks at t_schedflag */
1596 		if (SUSPENDED(t) && !(t->t_proc_flag & TP_HOLDLWP)) {
1597 			p->p_lwprcnt++;
1598 			t->t_schedflag |= TS_CSTART;
1599 			setrun_locked(t);
1600 		}
1601 		thread_unlock(t);
1602 	} while ((t = t->t_forw) != p->p_tlist);
1603 }
1604 
1605 /*
1606  * Force all other LWPs in the current process other than the caller to exit,
1607  * and then cv_wait() on p_holdlwps for them to exit.  The exitlwps() function
1608  * is typically used in these situations:
1609  *
1610  *   (a) prior to an exec() system call
1611  *   (b) prior to dumping a core file
1612  *   (c) prior to a uadmin() shutdown
1613  *
1614  * If the 'coredump' flag is set, other LWPs are quiesced but not destroyed.
1615  * Multiple threads in the process can call this function at one time by
1616  * triggering execs or core dumps simultaneously, so the SEXITLWPS bit is used
1617  * to declare one particular thread the winner who gets to kill the others.
1618  * If a thread wins the exitlwps() dance, zero is returned; otherwise an
1619  * appropriate errno value is returned to caller for its system call to return.
1620  */
1621 int
1622 exitlwps(int coredump)
1623 {
1624 	proc_t *p = curproc;
1625 	int heldcnt;
1626 
1627 	if (curthread->t_door)
1628 		door_slam();
1629 	if (p->p_door_list)
1630 		door_revoke_all();
1631 	if (curthread->t_schedctl != NULL)
1632 		schedctl_lwp_cleanup(curthread);
1633 
1634 	/*
1635 	 * Ensure that before starting to wait for other lwps to exit,
1636 	 * cleanup all upimutexes held by curthread. Otherwise, some other
1637 	 * lwp could be waiting (uninterruptibly) for a upimutex held by
1638 	 * curthread, and the call to pokelwps() below would deadlock.
1639 	 * Even if a blocked upimutex_lock is made interruptible,
1640 	 * curthread's upimutexes need to be unlocked: do it here.
1641 	 */
1642 	if (curthread->t_upimutex != NULL)
1643 		upimutex_cleanup();
1644 
1645 	/*
1646 	 * Grab p_lock in order to check and set SEXITLWPS to declare a winner.
1647 	 * We must also block any further /proc access from this point forward.
1648 	 */
1649 	mutex_enter(&p->p_lock);
1650 	prbarrier(p);
1651 
1652 	if (p->p_flag & SEXITLWPS) {
1653 		mutex_exit(&p->p_lock);
1654 		aston(curthread);	/* force a trip through post_syscall */
1655 		return (set_errno(EINTR));
1656 	}
1657 
1658 	p->p_flag |= SEXITLWPS;
1659 	if (coredump)		/* tell other lwps to stop, not exit */
1660 		p->p_flag |= SCOREDUMP;
1661 
1662 	/*
1663 	 * Give precedence to exitlwps() if a holdlwps() is
1664 	 * in progress. The lwp doing the holdlwps() operation
1665 	 * is aborted when it is awakened.
1666 	 */
1667 	while (p->p_flag & (SHOLDFORK | SHOLDFORK1 | SHOLDWATCH)) {
1668 		cv_broadcast(&p->p_holdlwps);
1669 		cv_wait(&p->p_holdlwps, &p->p_lock);
1670 		prbarrier(p);
1671 	}
1672 	p->p_flag |= SHOLDFORK;
1673 	pokelwps(p);
1674 
1675 	/*
1676 	 * Wait for process to become quiescent.
1677 	 */
1678 	--p->p_lwprcnt;
1679 	while (p->p_lwprcnt > 0) {
1680 		cv_wait(&p->p_holdlwps, &p->p_lock);
1681 		prbarrier(p);
1682 	}
1683 	p->p_lwprcnt++;
1684 	ASSERT(p->p_lwprcnt == 1);
1685 
1686 	/*
1687 	 * The SCOREDUMP flag puts the process into a quiescent
1688 	 * state.  The process's lwps remain attached to this
1689 	 * process until exitlwps() is called again without the
1690 	 * 'coredump' flag set, then the lwps are terminated
1691 	 * and the process can exit.
1692 	 */
1693 	if (coredump) {
1694 		p->p_flag &= ~(SCOREDUMP | SHOLDFORK | SEXITLWPS);
1695 		goto out;
1696 	}
1697 
1698 	/*
1699 	 * Determine if there are any lwps left dangling in
1700 	 * the stopped state.  This happens when exitlwps()
1701 	 * aborts a holdlwps() operation.
1702 	 */
1703 	p->p_flag &= ~SHOLDFORK;
1704 	if ((heldcnt = p->p_lwpcnt) > 1) {
1705 		kthread_t *t;
1706 		for (t = curthread->t_forw; --heldcnt > 0; t = t->t_forw) {
1707 			t->t_proc_flag &= ~TP_TWAIT;
1708 			lwp_continue(t);
1709 		}
1710 	}
1711 
1712 	/*
1713 	 * Wait for all other lwps to exit.
1714 	 */
1715 	--p->p_lwprcnt;
1716 	while (p->p_lwpcnt > 1) {
1717 		cv_wait(&p->p_holdlwps, &p->p_lock);
1718 		prbarrier(p);
1719 	}
1720 	++p->p_lwprcnt;
1721 	ASSERT(p->p_lwpcnt == 1 && p->p_lwprcnt == 1);
1722 
1723 	p->p_flag &= ~SEXITLWPS;
1724 	curthread->t_proc_flag &= ~TP_TWAIT;
1725 
1726 out:
1727 	if (!coredump && p->p_zombcnt) {	/* cleanup the zombie lwps */
1728 		lwpdir_t *ldp;
1729 		lwpent_t *lep;
1730 		int i;
1731 
1732 		for (ldp = p->p_lwpdir, i = 0; i < p->p_lwpdir_sz; i++, ldp++) {
1733 			lep = ldp->ld_entry;
1734 			if (lep != NULL && lep->le_thread != curthread) {
1735 				ASSERT(lep->le_thread == NULL);
1736 				p->p_zombcnt--;
1737 				lwp_hash_out(p, lep->le_lwpid);
1738 			}
1739 		}
1740 		ASSERT(p->p_zombcnt == 0);
1741 	}
1742 
1743 	/*
1744 	 * If some other LWP in the process wanted us to suspend ourself,
1745 	 * then we will not do it.  The other LWP is now terminated and
1746 	 * no one will ever continue us again if we suspend ourself.
1747 	 */
1748 	curthread->t_proc_flag &= ~TP_HOLDLWP;
1749 	p->p_flag &= ~(SHOLDFORK | SHOLDFORK1 | SHOLDWATCH | SLWPWRAP);
1750 	mutex_exit(&p->p_lock);
1751 	return (0);
1752 }
1753 
1754 /*
1755  * duplicate a lwp.
1756  */
1757 klwp_t *
1758 forklwp(klwp_t *lwp, proc_t *cp, id_t lwpid)
1759 {
1760 	klwp_t *clwp;
1761 	void *tregs, *tfpu;
1762 	kthread_t *t = lwptot(lwp);
1763 	kthread_t *ct;
1764 	proc_t *p = lwptoproc(lwp);
1765 	int cid;
1766 	void *bufp;
1767 	void *brand_data;
1768 	int val;
1769 
1770 	ASSERT(p == curproc);
1771 	ASSERT(t == curthread || (SUSPENDED(t) && lwp->lwp_asleep == 0));
1772 
1773 #if defined(__sparc)
1774 	if (t == curthread)
1775 		(void) flush_user_windows_to_stack(NULL);
1776 #endif
1777 
1778 	if (t == curthread)
1779 		/* copy args out of registers first */
1780 		(void) save_syscall_args();
1781 
1782 	clwp = lwp_create(cp->p_lwpcnt == 0 ? lwp_rtt_initial : lwp_rtt,
1783 	    NULL, 0, cp, TS_STOPPED, t->t_pri, &t->t_hold, NOCLASS, lwpid);
1784 	if (clwp == NULL)
1785 		return (NULL);
1786 
1787 	/*
1788 	 * most of the parent's lwp can be copied to its duplicate,
1789 	 * except for the fields that are unique to each lwp, like
1790 	 * lwp_thread, lwp_procp, lwp_regs, and lwp_ap.
1791 	 */
1792 	ct = clwp->lwp_thread;
1793 	tregs = clwp->lwp_regs;
1794 	tfpu = clwp->lwp_fpu;
1795 	brand_data = clwp->lwp_brand;
1796 
1797 	/*
1798 	 * Copy parent lwp to child lwp.  Hold child's p_lock to prevent
1799 	 * mstate_aggr_state() from reading stale mstate entries copied
1800 	 * from lwp to clwp.
1801 	 */
1802 	mutex_enter(&cp->p_lock);
1803 	*clwp = *lwp;
1804 
1805 	/* clear microstate and resource usage data in new lwp */
1806 	init_mstate(ct, LMS_STOPPED);
1807 	bzero(&clwp->lwp_ru, sizeof (clwp->lwp_ru));
1808 	mutex_exit(&cp->p_lock);
1809 
1810 	/* fix up child's lwp */
1811 
1812 	clwp->lwp_pcb.pcb_flags = 0;
1813 #if defined(__sparc)
1814 	clwp->lwp_pcb.pcb_step = STEP_NONE;
1815 #endif
1816 	clwp->lwp_cursig = 0;
1817 	clwp->lwp_extsig = 0;
1818 	clwp->lwp_curinfo = (struct sigqueue *)0;
1819 	clwp->lwp_thread = ct;
1820 	ct->t_sysnum = t->t_sysnum;
1821 	clwp->lwp_regs = tregs;
1822 	clwp->lwp_fpu = tfpu;
1823 	clwp->lwp_brand = brand_data;
1824 	clwp->lwp_ap = clwp->lwp_arg;
1825 	clwp->lwp_procp = cp;
1826 	bzero(clwp->lwp_timer, sizeof (clwp->lwp_timer));
1827 	clwp->lwp_lastfault = 0;
1828 	clwp->lwp_lastfaddr = 0;
1829 
1830 	/* copy parent's struct regs to child. */
1831 	lwp_forkregs(lwp, clwp);
1832 
1833 	/*
1834 	 * Fork thread context ops, if any.
1835 	 */
1836 	if (t->t_ctx)
1837 		forkctx(t, ct);
1838 
1839 	/* fix door state in the child */
1840 	if (t->t_door)
1841 		door_fork(t, ct);
1842 
1843 	/* copy current contract templates, clear latest contracts */
1844 	lwp_ctmpl_copy(clwp, lwp);
1845 
1846 	mutex_enter(&cp->p_lock);
1847 	/* lwp_create() set the TP_HOLDLWP flag */
1848 	if (!(t->t_proc_flag & TP_HOLDLWP))
1849 		ct->t_proc_flag &= ~TP_HOLDLWP;
1850 	if (cp->p_flag & SMSACCT)
1851 		ct->t_proc_flag |= TP_MSACCT;
1852 	mutex_exit(&cp->p_lock);
1853 
1854 	/* Allow brand to propagate brand-specific state */
1855 	if (PROC_IS_BRANDED(p))
1856 		BROP(p)->b_forklwp(lwp, clwp);
1857 
1858 retry:
1859 	cid = t->t_cid;
1860 
1861 	val = CL_ALLOC(&bufp, cid, KM_SLEEP);
1862 	ASSERT(val == 0);
1863 
1864 	mutex_enter(&p->p_lock);
1865 	if (cid != t->t_cid) {
1866 		/*
1867 		 * Someone just changed this thread's scheduling class,
1868 		 * so try pre-allocating the buffer again.  Hopefully we
1869 		 * don't hit this often.
1870 		 */
1871 		mutex_exit(&p->p_lock);
1872 		CL_FREE(cid, bufp);
1873 		goto retry;
1874 	}
1875 
1876 	ct->t_unpark = t->t_unpark;
1877 	ct->t_clfuncs = t->t_clfuncs;
1878 	CL_FORK(t, ct, bufp);
1879 	ct->t_cid = t->t_cid;	/* after data allocated so prgetpsinfo works */
1880 	mutex_exit(&p->p_lock);
1881 
1882 	return (clwp);
1883 }
1884 
1885 /*
1886  * Add a new lwp entry to the lwp directory and to the lwpid hash table.
1887  */
1888 void
1889 lwp_hash_in(proc_t *p, lwpent_t *lep, tidhash_t *tidhash, uint_t tidhash_sz,
1890     int do_lock)
1891 {
1892 	tidhash_t *thp = &tidhash[TIDHASH(lep->le_lwpid, tidhash_sz)];
1893 	lwpdir_t **ldpp;
1894 	lwpdir_t *ldp;
1895 	kthread_t *t;
1896 
1897 	/*
1898 	 * Allocate a directory element from the free list.
1899 	 * Code elsewhere guarantees a free slot.
1900 	 */
1901 	ldp = p->p_lwpfree;
1902 	p->p_lwpfree = ldp->ld_next;
1903 	ASSERT(ldp->ld_entry == NULL);
1904 	ldp->ld_entry = lep;
1905 
1906 	if (do_lock)
1907 		mutex_enter(&thp->th_lock);
1908 
1909 	/*
1910 	 * Insert it into the lwpid hash table.
1911 	 */
1912 	ldpp = &thp->th_list;
1913 	ldp->ld_next = *ldpp;
1914 	*ldpp = ldp;
1915 
1916 	/*
1917 	 * Set the active thread's directory slot entry.
1918 	 */
1919 	if ((t = lep->le_thread) != NULL) {
1920 		ASSERT(lep->le_lwpid == t->t_tid);
1921 		t->t_dslot = (int)(ldp - p->p_lwpdir);
1922 	}
1923 
1924 	if (do_lock)
1925 		mutex_exit(&thp->th_lock);
1926 }
1927 
1928 /*
1929  * Remove an lwp from the lwpid hash table and free its directory entry.
1930  * This is done when a detached lwp exits in lwp_exit() or
1931  * when a non-detached lwp is waited for in lwp_wait() or
1932  * when a zombie lwp is detached in lwp_detach().
1933  */
1934 void
1935 lwp_hash_out(proc_t *p, id_t lwpid)
1936 {
1937 	tidhash_t *thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)];
1938 	lwpdir_t **ldpp;
1939 	lwpdir_t *ldp;
1940 	lwpent_t *lep;
1941 
1942 	mutex_enter(&thp->th_lock);
1943 	for (ldpp = &thp->th_list;
1944 	    (ldp = *ldpp) != NULL; ldpp = &ldp->ld_next) {
1945 		lep = ldp->ld_entry;
1946 		if (lep->le_lwpid == lwpid) {
1947 			prlwpfree(p, lep);	/* /proc deals with le_trace */
1948 			*ldpp = ldp->ld_next;
1949 			ldp->ld_entry = NULL;
1950 			ldp->ld_next = p->p_lwpfree;
1951 			p->p_lwpfree = ldp;
1952 			kmem_free(lep, sizeof (*lep));
1953 			break;
1954 		}
1955 	}
1956 	mutex_exit(&thp->th_lock);
1957 }
1958 
1959 /*
1960  * Lookup an lwp in the lwpid hash table by lwpid.
1961  */
1962 lwpdir_t *
1963 lwp_hash_lookup(proc_t *p, id_t lwpid)
1964 {
1965 	tidhash_t *thp;
1966 	lwpdir_t *ldp;
1967 
1968 	/*
1969 	 * The process may be exiting, after p_tidhash has been set to NULL in
1970 	 * proc_exit() but before prfee() has been called.  Return failure in
1971 	 * this case.
1972 	 */
1973 	if (p->p_tidhash == NULL)
1974 		return (NULL);
1975 
1976 	thp = &p->p_tidhash[TIDHASH(lwpid, p->p_tidhash_sz)];
1977 	for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) {
1978 		if (ldp->ld_entry->le_lwpid == lwpid)
1979 			return (ldp);
1980 	}
1981 
1982 	return (NULL);
1983 }
1984 
1985 /*
1986  * Same as lwp_hash_lookup(), but acquire and return
1987  * the tid hash table entry lock on success.
1988  */
1989 lwpdir_t *
1990 lwp_hash_lookup_and_lock(proc_t *p, id_t lwpid, kmutex_t **mpp)
1991 {
1992 	tidhash_t *tidhash;
1993 	uint_t tidhash_sz;
1994 	tidhash_t *thp;
1995 	lwpdir_t *ldp;
1996 
1997 top:
1998 	tidhash_sz = p->p_tidhash_sz;
1999 	membar_consumer();
2000 	if ((tidhash = p->p_tidhash) == NULL)
2001 		return (NULL);
2002 
2003 	thp = &tidhash[TIDHASH(lwpid, tidhash_sz)];
2004 	mutex_enter(&thp->th_lock);
2005 
2006 	/*
2007 	 * Since we are not holding p->p_lock, the tid hash table
2008 	 * may have changed.  If so, start over.  If not, then
2009 	 * it cannot change until after we drop &thp->th_lock;
2010 	 */
2011 	if (tidhash != p->p_tidhash || tidhash_sz != p->p_tidhash_sz) {
2012 		mutex_exit(&thp->th_lock);
2013 		goto top;
2014 	}
2015 
2016 	for (ldp = thp->th_list; ldp != NULL; ldp = ldp->ld_next) {
2017 		if (ldp->ld_entry->le_lwpid == lwpid) {
2018 			*mpp = &thp->th_lock;
2019 			return (ldp);
2020 		}
2021 	}
2022 
2023 	mutex_exit(&thp->th_lock);
2024 	return (NULL);
2025 }
2026 
2027 /*
2028  * Update the indicated LWP usage statistic for the current LWP.
2029  */
2030 void
2031 lwp_stat_update(lwp_stat_id_t lwp_stat_id, long inc)
2032 {
2033 	klwp_t *lwp = ttolwp(curthread);
2034 
2035 	if (lwp == NULL)
2036 		return;
2037 
2038 	switch (lwp_stat_id) {
2039 	case LWP_STAT_INBLK:
2040 		lwp->lwp_ru.inblock += inc;
2041 		break;
2042 	case LWP_STAT_OUBLK:
2043 		lwp->lwp_ru.oublock += inc;
2044 		break;
2045 	case LWP_STAT_MSGRCV:
2046 		lwp->lwp_ru.msgrcv += inc;
2047 		break;
2048 	case LWP_STAT_MSGSND:
2049 		lwp->lwp_ru.msgsnd += inc;
2050 		break;
2051 	default:
2052 		panic("lwp_stat_update: invalid lwp_stat_id 0x%x", lwp_stat_id);
2053 	}
2054 }
2055