xref: /titanic_50/usr/src/uts/common/disp/thread.c (revision 936b7af69172dce89b577831f79c0e18d15e854b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/sysmacros.h>
31 #include <sys/signal.h>
32 #include <sys/stack.h>
33 #include <sys/pcb.h>
34 #include <sys/user.h>
35 #include <sys/systm.h>
36 #include <sys/sysinfo.h>
37 #include <sys/errno.h>
38 #include <sys/cmn_err.h>
39 #include <sys/cred.h>
40 #include <sys/resource.h>
41 #include <sys/task.h>
42 #include <sys/project.h>
43 #include <sys/proc.h>
44 #include <sys/debug.h>
45 #include <sys/disp.h>
46 #include <sys/class.h>
47 #include <vm/seg_kmem.h>
48 #include <vm/seg_kp.h>
49 #include <sys/machlock.h>
50 #include <sys/kmem.h>
51 #include <sys/varargs.h>
52 #include <sys/turnstile.h>
53 #include <sys/poll.h>
54 #include <sys/vtrace.h>
55 #include <sys/callb.h>
56 #include <c2/audit.h>
57 #include <sys/tnf.h>
58 #include <sys/sobject.h>
59 #include <sys/cpupart.h>
60 #include <sys/pset.h>
61 #include <sys/door.h>
62 #include <sys/spl.h>
63 #include <sys/copyops.h>
64 #include <sys/rctl.h>
65 #include <sys/brand.h>
66 #include <sys/pool.h>
67 #include <sys/zone.h>
68 #include <sys/tsol/label.h>
69 #include <sys/tsol/tndb.h>
70 #include <sys/cpc_impl.h>
71 #include <sys/sdt.h>
72 #include <sys/reboot.h>
73 #include <sys/kdi.h>
74 #include <sys/waitq.h>
75 #include <sys/cpucaps.h>
76 #include <sys/kiconv.h>
77 
78 struct kmem_cache *thread_cache;	/* cache of free threads */
79 struct kmem_cache *lwp_cache;		/* cache of free lwps */
80 struct kmem_cache *turnstile_cache;	/* cache of free turnstiles */
81 
82 /*
83  * allthreads is only for use by kmem_readers.  All kernel loops can use
84  * the current thread as a start/end point.
85  */
86 static kthread_t *allthreads = &t0;	/* circular list of all threads */
87 
88 static kcondvar_t reaper_cv;		/* synchronization var */
89 kthread_t	*thread_deathrow;	/* circular list of reapable threads */
90 kthread_t	*lwp_deathrow;		/* circular list of reapable threads */
91 kmutex_t	reaplock;		/* protects lwp and thread deathrows */
92 kmutex_t	thread_free_lock;	/* protects clock from reaper */
93 int	thread_reapcnt = 0;		/* number of threads on deathrow */
94 int	lwp_reapcnt = 0;		/* number of lwps on deathrow */
95 int	reaplimit = 16;			/* delay reaping until reaplimit */
96 
97 extern int nthread;
98 
99 id_t	syscid;				/* system scheduling class ID */
100 void	*segkp_thread;			/* cookie for segkp pool */
101 
102 int lwp_cache_sz = 32;
103 int t_cache_sz = 8;
104 static kt_did_t next_t_id = 1;
105 
106 /*
107  * Min/Max stack sizes for stack size parameters
108  */
109 #define	MAX_STKSIZE	(32 * DEFAULTSTKSZ)
110 #define	MIN_STKSIZE	DEFAULTSTKSZ
111 
112 /*
113  * default_stksize overrides lwp_default_stksize if it is set.
114  */
115 int	default_stksize;
116 int	lwp_default_stksize;
117 
118 static zone_key_t zone_thread_key;
119 
120 /*
121  * forward declarations for internal thread specific data (tsd)
122  */
123 static void *tsd_realloc(void *, size_t, size_t);
124 
125 void thread_reaper(void);
126 
127 /*ARGSUSED*/
128 static int
129 turnstile_constructor(void *buf, void *cdrarg, int kmflags)
130 {
131 	bzero(buf, sizeof (turnstile_t));
132 	return (0);
133 }
134 
135 /*ARGSUSED*/
136 static void
137 turnstile_destructor(void *buf, void *cdrarg)
138 {
139 	turnstile_t *ts = buf;
140 
141 	ASSERT(ts->ts_free == NULL);
142 	ASSERT(ts->ts_waiters == 0);
143 	ASSERT(ts->ts_inheritor == NULL);
144 	ASSERT(ts->ts_sleepq[0].sq_first == NULL);
145 	ASSERT(ts->ts_sleepq[1].sq_first == NULL);
146 }
147 
148 void
149 thread_init(void)
150 {
151 	kthread_t *tp;
152 	extern char sys_name[];
153 	extern void idle();
154 	struct cpu *cpu = CPU;
155 
156 	mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL));
157 
158 #if defined(__i386) || defined(__amd64)
159 	thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
160 	    PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0);
161 
162 	/*
163 	 * "struct _klwp" includes a "struct pcb", which includes a
164 	 * "struct fpu", which needs to be 16-byte aligned on amd64
165 	 * (and even on i386 for fxsave/fxrstor).
166 	 */
167 	lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
168 	    16, NULL, NULL, NULL, NULL, NULL, 0);
169 #else
170 	/*
171 	 * Allocate thread structures from static_arena.  This prevents
172 	 * issues where a thread tries to relocate its own thread
173 	 * structure and touches it after the mapping has been suspended.
174 	 */
175 	thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
176 	    PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0);
177 
178 	lwp_stk_cache_init();
179 
180 	lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
181 	    0, NULL, NULL, NULL, NULL, NULL, 0);
182 #endif
183 
184 	turnstile_cache = kmem_cache_create("turnstile_cache",
185 	    sizeof (turnstile_t), 0,
186 	    turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0);
187 
188 	label_init();
189 	cred_init();
190 
191 	/*
192 	 * Initialize various resource management facilities.
193 	 */
194 	rctl_init();
195 	cpucaps_init();
196 	/*
197 	 * Zone_init() should be called before project_init() so that project ID
198 	 * for the first project is initialized correctly.
199 	 */
200 	zone_init();
201 	project_init();
202 	brand_init();
203 	kiconv_init();
204 	task_init();
205 	tcache_init();
206 	pool_init();
207 
208 	curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
209 
210 	/*
211 	 * Originally, we had two parameters to set default stack
212 	 * size: one for lwp's (lwp_default_stksize), and one for
213 	 * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz).
214 	 * Now we have a third parameter that overrides both if it is
215 	 * set to a legal stack size, called default_stksize.
216 	 */
217 
218 	if (default_stksize == 0) {
219 		default_stksize = DEFAULTSTKSZ;
220 	} else if (default_stksize % PAGESIZE != 0 ||
221 	    default_stksize > MAX_STKSIZE ||
222 	    default_stksize < MIN_STKSIZE) {
223 		cmn_err(CE_WARN, "Illegal stack size. Using %d",
224 		    (int)DEFAULTSTKSZ);
225 		default_stksize = DEFAULTSTKSZ;
226 	} else {
227 		lwp_default_stksize = default_stksize;
228 	}
229 
230 	if (lwp_default_stksize == 0) {
231 		lwp_default_stksize = default_stksize;
232 	} else if (lwp_default_stksize % PAGESIZE != 0 ||
233 	    lwp_default_stksize > MAX_STKSIZE ||
234 	    lwp_default_stksize < MIN_STKSIZE) {
235 		cmn_err(CE_WARN, "Illegal stack size. Using %d",
236 		    default_stksize);
237 		lwp_default_stksize = default_stksize;
238 	}
239 
240 	segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz,
241 	    lwp_default_stksize,
242 	    (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED));
243 
244 	segkp_thread = segkp_cache_init(segkp, t_cache_sz,
245 	    default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON);
246 
247 	(void) getcid(sys_name, &syscid);
248 	curthread->t_cid = syscid;	/* current thread is t0 */
249 
250 	/*
251 	 * Set up the first CPU's idle thread.
252 	 * It runs whenever the CPU has nothing worthwhile to do.
253 	 */
254 	tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1);
255 	cpu->cpu_idle_thread = tp;
256 	tp->t_preempt = 1;
257 	tp->t_disp_queue = cpu->cpu_disp;
258 	ASSERT(tp->t_disp_queue != NULL);
259 	tp->t_bound_cpu = cpu;
260 	tp->t_affinitycnt = 1;
261 
262 	/*
263 	 * Registering a thread in the callback table is usually
264 	 * done in the initialization code of the thread. In this
265 	 * case, we do it right after thread creation to avoid
266 	 * blocking idle thread while registering itself. It also
267 	 * avoids the possibility of reregistration in case a CPU
268 	 * restarts its idle thread.
269 	 */
270 	CALLB_CPR_INIT_SAFE(tp, "idle");
271 
272 	/*
273 	 * Create the thread_reaper daemon. From this point on, exited
274 	 * threads will get reaped.
275 	 */
276 	(void) thread_create(NULL, 0, (void (*)())thread_reaper,
277 	    NULL, 0, &p0, TS_RUN, minclsyspri);
278 
279 	/*
280 	 * Finish initializing the kernel memory allocator now that
281 	 * thread_create() is available.
282 	 */
283 	kmem_thread_init();
284 
285 	if (boothowto & RB_DEBUG)
286 		kdi_dvec_thravail();
287 }
288 
289 /*
290  * Create a thread.
291  *
292  * thread_create() blocks for memory if necessary.  It never fails.
293  *
294  * If stk is NULL, the thread is created at the base of the stack
295  * and cannot be swapped.
296  */
297 kthread_t *
298 thread_create(
299 	caddr_t	stk,
300 	size_t	stksize,
301 	void	(*proc)(),
302 	void	*arg,
303 	size_t	len,
304 	proc_t	 *pp,
305 	int	state,
306 	pri_t	pri)
307 {
308 	kthread_t *t;
309 	extern struct classfuncs sys_classfuncs;
310 	turnstile_t *ts;
311 
312 	/*
313 	 * Every thread keeps a turnstile around in case it needs to block.
314 	 * The only reason the turnstile is not simply part of the thread
315 	 * structure is that we may have to break the association whenever
316 	 * more than one thread blocks on a given synchronization object.
317 	 * From a memory-management standpoint, turnstiles are like the
318 	 * "attached mblks" that hang off dblks in the streams allocator.
319 	 */
320 	ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
321 
322 	if (stk == NULL) {
323 		/*
324 		 * alloc both thread and stack in segkp chunk
325 		 */
326 
327 		if (stksize < default_stksize)
328 			stksize = default_stksize;
329 
330 		if (stksize == default_stksize) {
331 			stk = (caddr_t)segkp_cache_get(segkp_thread);
332 		} else {
333 			stksize = roundup(stksize, PAGESIZE);
334 			stk = (caddr_t)segkp_get(segkp, stksize,
335 			    (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED));
336 		}
337 
338 		ASSERT(stk != NULL);
339 
340 		/*
341 		 * The machine-dependent mutex code may require that
342 		 * thread pointers (since they may be used for mutex owner
343 		 * fields) have certain alignment requirements.
344 		 * PTR24_ALIGN is the size of the alignment quanta.
345 		 * XXX - assumes stack grows toward low addresses.
346 		 */
347 		if (stksize <= sizeof (kthread_t) + PTR24_ALIGN)
348 			cmn_err(CE_PANIC, "thread_create: proposed stack size"
349 			    " too small to hold thread.");
350 #ifdef STACK_GROWTH_DOWN
351 		stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1);
352 		stksize &= -PTR24_ALIGN;	/* make thread aligned */
353 		t = (kthread_t *)(stk + stksize);
354 		bzero(t, sizeof (kthread_t));
355 		if (audit_active)
356 			audit_thread_create(t);
357 		t->t_stk = stk + stksize;
358 		t->t_stkbase = stk;
359 #else	/* stack grows to larger addresses */
360 		stksize -= SA(sizeof (kthread_t));
361 		t = (kthread_t *)(stk);
362 		bzero(t, sizeof (kthread_t));
363 		t->t_stk = stk + sizeof (kthread_t);
364 		t->t_stkbase = stk + stksize + sizeof (kthread_t);
365 #endif	/* STACK_GROWTH_DOWN */
366 		t->t_flag |= T_TALLOCSTK;
367 		t->t_swap = stk;
368 	} else {
369 		t = kmem_cache_alloc(thread_cache, KM_SLEEP);
370 		bzero(t, sizeof (kthread_t));
371 		ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0);
372 		if (audit_active)
373 			audit_thread_create(t);
374 		/*
375 		 * Initialize t_stk to the kernel stack pointer to use
376 		 * upon entry to the kernel
377 		 */
378 #ifdef STACK_GROWTH_DOWN
379 		t->t_stk = stk + stksize;
380 		t->t_stkbase = stk;
381 #else
382 		t->t_stk = stk;			/* 3b2-like */
383 		t->t_stkbase = stk + stksize;
384 #endif /* STACK_GROWTH_DOWN */
385 	}
386 
387 	/* set default stack flag */
388 	if (stksize == lwp_default_stksize)
389 		t->t_flag |= T_DFLTSTK;
390 
391 	t->t_ts = ts;
392 
393 	/*
394 	 * p_cred could be NULL if it thread_create is called before cred_init
395 	 * is called in main.
396 	 */
397 	mutex_enter(&pp->p_crlock);
398 	if (pp->p_cred)
399 		crhold(t->t_cred = pp->p_cred);
400 	mutex_exit(&pp->p_crlock);
401 	t->t_start = gethrestime_sec();
402 	t->t_startpc = proc;
403 	t->t_procp = pp;
404 	t->t_clfuncs = &sys_classfuncs.thread;
405 	t->t_cid = syscid;
406 	t->t_pri = pri;
407 	t->t_stime = lbolt;
408 	t->t_schedflag = TS_LOAD | TS_DONT_SWAP;
409 	t->t_bind_cpu = PBIND_NONE;
410 	t->t_bind_pset = PS_NONE;
411 	t->t_plockp = &pp->p_lock;
412 	t->t_copyops = NULL;
413 	t->t_taskq = NULL;
414 	t->t_anttime = 0;
415 	t->t_hatdepth = 0;
416 
417 	t->t_dtrace_vtime = 1;	/* assure vtimestamp is always non-zero */
418 
419 	CPU_STATS_ADDQ(CPU, sys, nthreads, 1);
420 #ifndef NPROBE
421 	/* Kernel probe */
422 	tnf_thread_create(t);
423 #endif /* NPROBE */
424 	LOCK_INIT_CLEAR(&t->t_lock);
425 
426 	/*
427 	 * Callers who give us a NULL proc must do their own
428 	 * stack initialization.  e.g. lwp_create()
429 	 */
430 	if (proc != NULL) {
431 		t->t_stk = thread_stk_init(t->t_stk);
432 		thread_load(t, proc, arg, len);
433 	}
434 
435 	/*
436 	 * Put a hold on project0. If this thread is actually in a
437 	 * different project, then t_proj will be changed later in
438 	 * lwp_create().  All kernel-only threads must be in project 0.
439 	 */
440 	t->t_proj = project_hold(proj0p);
441 
442 	lgrp_affinity_init(&t->t_lgrp_affinity);
443 
444 	mutex_enter(&pidlock);
445 	nthread++;
446 	t->t_did = next_t_id++;
447 	t->t_prev = curthread->t_prev;
448 	t->t_next = curthread;
449 
450 	/*
451 	 * Add the thread to the list of all threads, and initialize
452 	 * its t_cpu pointer.  We need to block preemption since
453 	 * cpu_offline walks the thread list looking for threads
454 	 * with t_cpu pointing to the CPU being offlined.  We want
455 	 * to make sure that the list is consistent and that if t_cpu
456 	 * is set, the thread is on the list.
457 	 */
458 	kpreempt_disable();
459 	curthread->t_prev->t_next = t;
460 	curthread->t_prev = t;
461 
462 	/*
463 	 * Threads should never have a NULL t_cpu pointer so assign it
464 	 * here.  If the thread is being created with state TS_RUN a
465 	 * better CPU may be chosen when it is placed on the run queue.
466 	 *
467 	 * We need to keep kernel preemption disabled when setting all
468 	 * three fields to keep them in sync.  Also, always create in
469 	 * the default partition since that's where kernel threads go
470 	 * (if this isn't a kernel thread, t_cpupart will be changed
471 	 * in lwp_create before setting the thread runnable).
472 	 */
473 	t->t_cpupart = &cp_default;
474 
475 	/*
476 	 * For now, affiliate this thread with the root lgroup.
477 	 * Since the kernel does not (presently) allocate its memory
478 	 * in a locality aware fashion, the root is an appropriate home.
479 	 * If this thread is later associated with an lwp, it will have
480 	 * it's lgroup re-assigned at that time.
481 	 */
482 	lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
483 
484 	/*
485 	 * Inherit the current cpu.  If this cpu isn't part of the chosen
486 	 * lgroup, a new cpu will be chosen by cpu_choose when the thread
487 	 * is ready to run.
488 	 */
489 	if (CPU->cpu_part == &cp_default)
490 		t->t_cpu = CPU;
491 	else
492 		t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl,
493 		    t->t_pri, NULL);
494 
495 	t->t_disp_queue = t->t_cpu->cpu_disp;
496 	kpreempt_enable();
497 
498 	/*
499 	 * Initialize thread state and the dispatcher lock pointer.
500 	 * Need to hold onto pidlock to block allthreads walkers until
501 	 * the state is set.
502 	 */
503 	switch (state) {
504 	case TS_RUN:
505 		curthread->t_oldspl = splhigh();	/* get dispatcher spl */
506 		THREAD_SET_STATE(t, TS_STOPPED, &transition_lock);
507 		CL_SETRUN(t);
508 		thread_unlock(t);
509 		break;
510 
511 	case TS_ONPROC:
512 		THREAD_ONPROC(t, t->t_cpu);
513 		break;
514 
515 	case TS_FREE:
516 		/*
517 		 * Free state will be used for intr threads.
518 		 * The interrupt routine must set the thread dispatcher
519 		 * lock pointer (t_lockp) if starting on a CPU
520 		 * other than the current one.
521 		 */
522 		THREAD_FREEINTR(t, CPU);
523 		break;
524 
525 	case TS_STOPPED:
526 		THREAD_SET_STATE(t, TS_STOPPED, &stop_lock);
527 		break;
528 
529 	default:			/* TS_SLEEP, TS_ZOMB or TS_TRANS */
530 		cmn_err(CE_PANIC, "thread_create: invalid state %d", state);
531 	}
532 	mutex_exit(&pidlock);
533 	return (t);
534 }
535 
536 /*
537  * Move thread to project0 and take care of project reference counters.
538  */
539 void
540 thread_rele(kthread_t *t)
541 {
542 	kproject_t *kpj;
543 
544 	thread_lock(t);
545 
546 	ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0);
547 	kpj = ttoproj(t);
548 	t->t_proj = proj0p;
549 
550 	thread_unlock(t);
551 
552 	if (kpj != proj0p) {
553 		project_rele(kpj);
554 		(void) project_hold(proj0p);
555 	}
556 }
557 
558 void
559 thread_exit(void)
560 {
561 	kthread_t *t = curthread;
562 
563 	if ((t->t_proc_flag & TP_ZTHREAD) != 0)
564 		cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called");
565 
566 	tsd_exit();		/* Clean up this thread's TSD */
567 
568 	kcpc_passivate();	/* clean up performance counter state */
569 
570 	/*
571 	 * No kernel thread should have called poll() without arranging
572 	 * calling pollcleanup() here.
573 	 */
574 	ASSERT(t->t_pollstate == NULL);
575 	ASSERT(t->t_schedctl == NULL);
576 	if (t->t_door)
577 		door_slam();	/* in case thread did an upcall */
578 
579 #ifndef NPROBE
580 	/* Kernel probe */
581 	if (t->t_tnf_tpdp)
582 		tnf_thread_exit();
583 #endif /* NPROBE */
584 
585 	thread_rele(t);
586 	t->t_preempt++;
587 
588 	/*
589 	 * remove thread from the all threads list so that
590 	 * death-row can use the same pointers.
591 	 */
592 	mutex_enter(&pidlock);
593 	t->t_next->t_prev = t->t_prev;
594 	t->t_prev->t_next = t->t_next;
595 	ASSERT(allthreads != t);	/* t0 never exits */
596 	cv_broadcast(&t->t_joincv);	/* wake up anyone in thread_join */
597 	mutex_exit(&pidlock);
598 
599 	if (t->t_ctx != NULL)
600 		exitctx(t);
601 	if (t->t_procp->p_pctx != NULL)
602 		exitpctx(t->t_procp);
603 
604 	t->t_state = TS_ZOMB;	/* set zombie thread */
605 
606 	swtch_from_zombie();	/* give up the CPU */
607 	/* NOTREACHED */
608 }
609 
610 /*
611  * Check to see if the specified thread is active (defined as being on
612  * the thread list).  This is certainly a slow way to do this; if there's
613  * ever a reason to speed it up, we could maintain a hash table of active
614  * threads indexed by their t_did.
615  */
616 static kthread_t *
617 did_to_thread(kt_did_t tid)
618 {
619 	kthread_t *t;
620 
621 	ASSERT(MUTEX_HELD(&pidlock));
622 	for (t = curthread->t_next; t != curthread; t = t->t_next) {
623 		if (t->t_did == tid)
624 			break;
625 	}
626 	if (t->t_did == tid)
627 		return (t);
628 	else
629 		return (NULL);
630 }
631 
632 /*
633  * Wait for specified thread to exit.  Returns immediately if the thread
634  * could not be found, meaning that it has either already exited or never
635  * existed.
636  */
637 void
638 thread_join(kt_did_t tid)
639 {
640 	kthread_t *t;
641 
642 	ASSERT(tid != curthread->t_did);
643 	ASSERT(tid != t0.t_did);
644 
645 	mutex_enter(&pidlock);
646 	/*
647 	 * Make sure we check that the thread is on the thread list
648 	 * before blocking on it; otherwise we could end up blocking on
649 	 * a cv that's already been freed.  In other words, don't cache
650 	 * the thread pointer across calls to cv_wait.
651 	 *
652 	 * The choice of loop invariant means that whenever a thread
653 	 * is taken off the allthreads list, a cv_broadcast must be
654 	 * performed on that thread's t_joincv to wake up any waiters.
655 	 * The broadcast doesn't have to happen right away, but it
656 	 * shouldn't be postponed indefinitely (e.g., by doing it in
657 	 * thread_free which may only be executed when the deathrow
658 	 * queue is processed.
659 	 */
660 	while (t = did_to_thread(tid))
661 		cv_wait(&t->t_joincv, &pidlock);
662 	mutex_exit(&pidlock);
663 }
664 
665 void
666 thread_free(kthread_t *t)
667 {
668 	ASSERT(t != &t0 && t->t_state == TS_FREE);
669 	ASSERT(t->t_door == NULL);
670 	ASSERT(t->t_schedctl == NULL);
671 	ASSERT(t->t_pollstate == NULL);
672 
673 	t->t_pri = 0;
674 	t->t_pc = 0;
675 	t->t_sp = 0;
676 	t->t_wchan0 = NULL;
677 	t->t_wchan = NULL;
678 	if (t->t_cred != NULL) {
679 		crfree(t->t_cred);
680 		t->t_cred = 0;
681 	}
682 	if (t->t_pdmsg) {
683 		kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1);
684 		t->t_pdmsg = NULL;
685 	}
686 	if (audit_active)
687 		audit_thread_free(t);
688 #ifndef NPROBE
689 	if (t->t_tnf_tpdp)
690 		tnf_thread_free(t);
691 #endif /* NPROBE */
692 	if (t->t_cldata) {
693 		CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata);
694 	}
695 	if (t->t_rprof != NULL) {
696 		kmem_free(t->t_rprof, sizeof (*t->t_rprof));
697 		t->t_rprof = NULL;
698 	}
699 	t->t_lockp = NULL;	/* nothing should try to lock this thread now */
700 	if (t->t_lwp)
701 		lwp_freeregs(t->t_lwp, 0);
702 	if (t->t_ctx)
703 		freectx(t, 0);
704 	t->t_stk = NULL;
705 	if (t->t_lwp)
706 		lwp_stk_fini(t->t_lwp);
707 	lock_clear(&t->t_lock);
708 
709 	if (t->t_ts->ts_waiters > 0)
710 		panic("thread_free: turnstile still active");
711 
712 	kmem_cache_free(turnstile_cache, t->t_ts);
713 
714 	free_afd(&t->t_activefd);
715 
716 	/*
717 	 * Barrier for clock thread.  The clock holds this lock to
718 	 * keep the thread from going away while it's looking at it.
719 	 */
720 	mutex_enter(&thread_free_lock);
721 	mutex_exit(&thread_free_lock);
722 
723 	ASSERT(ttoproj(t) == proj0p);
724 	project_rele(ttoproj(t));
725 
726 	lgrp_affinity_free(&t->t_lgrp_affinity);
727 
728 	/*
729 	 * Free thread struct and its stack.
730 	 */
731 	if (t->t_flag & T_TALLOCSTK) {
732 		/* thread struct is embedded in stack */
733 		segkp_release(segkp, t->t_swap);
734 		mutex_enter(&pidlock);
735 		nthread--;
736 		mutex_exit(&pidlock);
737 	} else {
738 		if (t->t_swap) {
739 			segkp_release(segkp, t->t_swap);
740 			t->t_swap = NULL;
741 		}
742 		if (t->t_lwp) {
743 			kmem_cache_free(lwp_cache, t->t_lwp);
744 			t->t_lwp = NULL;
745 		}
746 		mutex_enter(&pidlock);
747 		nthread--;
748 		mutex_exit(&pidlock);
749 		kmem_cache_free(thread_cache, t);
750 	}
751 }
752 
753 /*
754  * Removes threads associated with the given zone from a deathrow queue.
755  * tp is a pointer to the head of the deathrow queue, and countp is a
756  * pointer to the current deathrow count.  Returns a linked list of
757  * threads removed from the list.
758  */
759 static kthread_t *
760 thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid)
761 {
762 	kthread_t *tmp, *list = NULL;
763 	cred_t *cr;
764 
765 	ASSERT(MUTEX_HELD(&reaplock));
766 	while (*tp != NULL) {
767 		if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) {
768 			tmp = *tp;
769 			*tp = tmp->t_forw;
770 			tmp->t_forw = list;
771 			list = tmp;
772 			(*countp)--;
773 		} else {
774 			tp = &(*tp)->t_forw;
775 		}
776 	}
777 	return (list);
778 }
779 
780 static void
781 thread_reap_list(kthread_t *t)
782 {
783 	kthread_t *next;
784 
785 	while (t != NULL) {
786 		next = t->t_forw;
787 		thread_free(t);
788 		t = next;
789 	}
790 }
791 
792 /* ARGSUSED */
793 static void
794 thread_zone_destroy(zoneid_t zoneid, void *unused)
795 {
796 	kthread_t *t, *l;
797 
798 	mutex_enter(&reaplock);
799 	/*
800 	 * Pull threads and lwps associated with zone off deathrow lists.
801 	 */
802 	t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid);
803 	l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid);
804 	mutex_exit(&reaplock);
805 
806 	/*
807 	 * Reap threads
808 	 */
809 	thread_reap_list(t);
810 
811 	/*
812 	 * Reap lwps
813 	 */
814 	thread_reap_list(l);
815 }
816 
817 /*
818  * cleanup zombie threads that are on deathrow.
819  */
820 void
821 thread_reaper()
822 {
823 	kthread_t *t, *l;
824 	callb_cpr_t cprinfo;
825 
826 	/*
827 	 * Register callback to clean up threads when zone is destroyed.
828 	 */
829 	zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy);
830 
831 	CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper");
832 	for (;;) {
833 		mutex_enter(&reaplock);
834 		while (thread_deathrow == NULL && lwp_deathrow == NULL) {
835 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
836 			cv_wait(&reaper_cv, &reaplock);
837 			CALLB_CPR_SAFE_END(&cprinfo, &reaplock);
838 		}
839 		t = thread_deathrow;
840 		l = lwp_deathrow;
841 		thread_deathrow = NULL;
842 		lwp_deathrow = NULL;
843 		thread_reapcnt = 0;
844 		lwp_reapcnt = 0;
845 		mutex_exit(&reaplock);
846 
847 		/*
848 		 * Reap threads
849 		 */
850 		thread_reap_list(t);
851 
852 		/*
853 		 * Reap lwps
854 		 */
855 		thread_reap_list(l);
856 	}
857 }
858 
859 /*
860  * This is called by resume() to put a zombie thread onto deathrow.
861  * The thread's state is changed to TS_FREE to indicate that is reapable.
862  * This is called from the idle thread so it must not block (just spin).
863  */
864 void
865 reapq_add(kthread_t *t)
866 {
867 	mutex_enter(&reaplock);
868 
869 	/*
870 	 * lwp_deathrow contains only threads with lwp linkage
871 	 * that are of the default stacksize. Anything else goes
872 	 * on thread_deathrow.
873 	 */
874 	if (ttolwp(t) && (t->t_flag & T_DFLTSTK)) {
875 		t->t_forw = lwp_deathrow;
876 		lwp_deathrow = t;
877 		lwp_reapcnt++;
878 	} else {
879 		t->t_forw = thread_deathrow;
880 		thread_deathrow = t;
881 		thread_reapcnt++;
882 	}
883 	if (lwp_reapcnt + thread_reapcnt > reaplimit)
884 		cv_signal(&reaper_cv);	/* wake the reaper */
885 	t->t_state = TS_FREE;
886 	lock_clear(&t->t_lock);
887 
888 	/*
889 	 * Before we return, we need to grab and drop the thread lock for
890 	 * the dead thread.  At this point, the current thread is the idle
891 	 * thread, and the dead thread's CPU lock points to the current
892 	 * CPU -- and we must grab and drop the lock to synchronize with
893 	 * a racing thread walking a blocking chain that the zombie thread
894 	 * was recently in.  By this point, that blocking chain is (by
895 	 * definition) stale:  the dead thread is not holding any locks, and
896 	 * is therefore not in any blocking chains -- but if we do not regrab
897 	 * our lock before freeing the dead thread's data structures, the
898 	 * thread walking the (stale) blocking chain will die on memory
899 	 * corruption when it attempts to drop the dead thread's lock.  We
900 	 * only need do this once because there is no way for the dead thread
901 	 * to ever again be on a blocking chain:  once we have grabbed and
902 	 * dropped the thread lock, we are guaranteed that anyone that could
903 	 * have seen this thread in a blocking chain can no longer see it.
904 	 */
905 	thread_lock(t);
906 	thread_unlock(t);
907 
908 	mutex_exit(&reaplock);
909 }
910 
911 /*
912  * Install thread context ops for the current thread.
913  */
914 void
915 installctx(
916 	kthread_t *t,
917 	void	*arg,
918 	void	(*save)(void *),
919 	void	(*restore)(void *),
920 	void	(*fork)(void *, void *),
921 	void	(*lwp_create)(void *, void *),
922 	void	(*exit)(void *),
923 	void	(*free)(void *, int))
924 {
925 	struct ctxop *ctx;
926 
927 	ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP);
928 	ctx->save_op = save;
929 	ctx->restore_op = restore;
930 	ctx->fork_op = fork;
931 	ctx->lwp_create_op = lwp_create;
932 	ctx->exit_op = exit;
933 	ctx->free_op = free;
934 	ctx->arg = arg;
935 	ctx->next = t->t_ctx;
936 	t->t_ctx = ctx;
937 }
938 
939 /*
940  * Remove the thread context ops from a thread.
941  */
942 int
943 removectx(
944 	kthread_t *t,
945 	void	*arg,
946 	void	(*save)(void *),
947 	void	(*restore)(void *),
948 	void	(*fork)(void *, void *),
949 	void	(*lwp_create)(void *, void *),
950 	void	(*exit)(void *),
951 	void	(*free)(void *, int))
952 {
953 	struct ctxop *ctx, *prev_ctx;
954 
955 	/*
956 	 * The incoming kthread_t (which is the thread for which the
957 	 * context ops will be removed) should be one of the following:
958 	 *
959 	 * a) the current thread,
960 	 *
961 	 * b) a thread of a process that's being forked (SIDL),
962 	 *
963 	 * c) a thread that belongs to the same process as the current
964 	 *    thread and for which the current thread is the agent thread,
965 	 *
966 	 * d) a thread that is TS_STOPPED which is indicative of it
967 	 *    being (if curthread is not an agent) a thread being created
968 	 *    as part of an lwp creation.
969 	 */
970 	ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL ||
971 	    ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
972 
973 	/*
974 	 * Serialize modifications to t->t_ctx to prevent the agent thread
975 	 * and the target thread from racing with each other during lwp exit.
976 	 */
977 	mutex_enter(&t->t_ctx_lock);
978 	prev_ctx = NULL;
979 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) {
980 		if (ctx->save_op == save && ctx->restore_op == restore &&
981 		    ctx->fork_op == fork && ctx->lwp_create_op == lwp_create &&
982 		    ctx->exit_op == exit && ctx->free_op == free &&
983 		    ctx->arg == arg) {
984 			if (prev_ctx)
985 				prev_ctx->next = ctx->next;
986 			else
987 				t->t_ctx = ctx->next;
988 			mutex_exit(&t->t_ctx_lock);
989 			if (ctx->free_op != NULL)
990 				(ctx->free_op)(ctx->arg, 0);
991 			kmem_free(ctx, sizeof (struct ctxop));
992 			return (1);
993 		}
994 		prev_ctx = ctx;
995 	}
996 	mutex_exit(&t->t_ctx_lock);
997 
998 	return (0);
999 }
1000 
1001 void
1002 savectx(kthread_t *t)
1003 {
1004 	struct ctxop *ctx;
1005 
1006 	ASSERT(t == curthread);
1007 	for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
1008 		if (ctx->save_op != NULL)
1009 			(ctx->save_op)(ctx->arg);
1010 }
1011 
1012 void
1013 restorectx(kthread_t *t)
1014 {
1015 	struct ctxop *ctx;
1016 
1017 	ASSERT(t == curthread);
1018 	for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
1019 		if (ctx->restore_op != NULL)
1020 			(ctx->restore_op)(ctx->arg);
1021 }
1022 
1023 void
1024 forkctx(kthread_t *t, kthread_t *ct)
1025 {
1026 	struct ctxop *ctx;
1027 
1028 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1029 		if (ctx->fork_op != NULL)
1030 			(ctx->fork_op)(t, ct);
1031 }
1032 
1033 /*
1034  * Note that this operator is only invoked via the _lwp_create
1035  * system call.  The system may have other reasons to create lwps
1036  * e.g. the agent lwp or the doors unreferenced lwp.
1037  */
1038 void
1039 lwp_createctx(kthread_t *t, kthread_t *ct)
1040 {
1041 	struct ctxop *ctx;
1042 
1043 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1044 		if (ctx->lwp_create_op != NULL)
1045 			(ctx->lwp_create_op)(t, ct);
1046 }
1047 
1048 /*
1049  * exitctx is called from thread_exit() and lwp_exit() to perform any actions
1050  * needed when the thread/LWP leaves the processor for the last time. This
1051  * routine is not intended to deal with freeing memory; freectx() is used for
1052  * that purpose during thread_free(). This routine is provided to allow for
1053  * clean-up that can't wait until thread_free().
1054  */
1055 void
1056 exitctx(kthread_t *t)
1057 {
1058 	struct ctxop *ctx;
1059 
1060 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1061 		if (ctx->exit_op != NULL)
1062 			(ctx->exit_op)(t);
1063 }
1064 
1065 /*
1066  * freectx is called from thread_free() and exec() to get
1067  * rid of old thread context ops.
1068  */
1069 void
1070 freectx(kthread_t *t, int isexec)
1071 {
1072 	struct ctxop *ctx;
1073 
1074 	while ((ctx = t->t_ctx) != NULL) {
1075 		t->t_ctx = ctx->next;
1076 		if (ctx->free_op != NULL)
1077 			(ctx->free_op)(ctx->arg, isexec);
1078 		kmem_free(ctx, sizeof (struct ctxop));
1079 	}
1080 }
1081 
1082 /*
1083  * Set the thread running; arrange for it to be swapped in if necessary.
1084  */
1085 void
1086 setrun_locked(kthread_t *t)
1087 {
1088 	ASSERT(THREAD_LOCK_HELD(t));
1089 	if (t->t_state == TS_SLEEP) {
1090 		/*
1091 		 * Take off sleep queue.
1092 		 */
1093 		SOBJ_UNSLEEP(t->t_sobj_ops, t);
1094 	} else if (t->t_state & (TS_RUN | TS_ONPROC)) {
1095 		/*
1096 		 * Already on dispatcher queue.
1097 		 */
1098 		return;
1099 	} else if (t->t_state == TS_WAIT) {
1100 		waitq_setrun(t);
1101 	} else if (t->t_state == TS_STOPPED) {
1102 		/*
1103 		 * All of the sending of SIGCONT (TC_XSTART) and /proc
1104 		 * (TC_PSTART) and lwp_continue() (TC_CSTART) must have
1105 		 * requested that the thread be run.
1106 		 * Just calling setrun() is not sufficient to set a stopped
1107 		 * thread running.  TP_TXSTART is always set if the thread
1108 		 * is not stopped by a jobcontrol stop signal.
1109 		 * TP_TPSTART is always set if /proc is not controlling it.
1110 		 * TP_TCSTART is always set if lwp_suspend() didn't stop it.
1111 		 * The thread won't be stopped unless one of these
1112 		 * three mechanisms did it.
1113 		 *
1114 		 * These flags must be set before calling setrun_locked(t).
1115 		 * They can't be passed as arguments because the streams
1116 		 * code calls setrun() indirectly and the mechanism for
1117 		 * doing so admits only one argument.  Note that the
1118 		 * thread must be locked in order to change t_schedflags.
1119 		 */
1120 		if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART)
1121 			return;
1122 		/*
1123 		 * Process is no longer stopped (a thread is running).
1124 		 */
1125 		t->t_whystop = 0;
1126 		t->t_whatstop = 0;
1127 		/*
1128 		 * Strictly speaking, we do not have to clear these
1129 		 * flags here; they are cleared on entry to stop().
1130 		 * However, they are confusing when doing kernel
1131 		 * debugging or when they are revealed by ps(1).
1132 		 */
1133 		t->t_schedflag &= ~TS_ALLSTART;
1134 		THREAD_TRANSITION(t);	/* drop stopped-thread lock */
1135 		ASSERT(t->t_lockp == &transition_lock);
1136 		ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
1137 		/*
1138 		 * Let the class put the process on the dispatcher queue.
1139 		 */
1140 		CL_SETRUN(t);
1141 	}
1142 }
1143 
1144 void
1145 setrun(kthread_t *t)
1146 {
1147 	thread_lock(t);
1148 	setrun_locked(t);
1149 	thread_unlock(t);
1150 }
1151 
1152 /*
1153  * Unpin an interrupted thread.
1154  *	When an interrupt occurs, the interrupt is handled on the stack
1155  *	of an interrupt thread, taken from a pool linked to the CPU structure.
1156  *
1157  *	When swtch() is switching away from an interrupt thread because it
1158  *	blocked or was preempted, this routine is called to complete the
1159  *	saving of the interrupted thread state, and returns the interrupted
1160  *	thread pointer so it may be resumed.
1161  *
1162  *	Called by swtch() only at high spl.
1163  */
1164 kthread_t *
1165 thread_unpin()
1166 {
1167 	kthread_t	*t = curthread;	/* current thread */
1168 	kthread_t	*itp;		/* interrupted thread */
1169 	int		i;		/* interrupt level */
1170 	extern int	intr_passivate();
1171 
1172 	ASSERT(t->t_intr != NULL);
1173 
1174 	itp = t->t_intr;		/* interrupted thread */
1175 	t->t_intr = NULL;		/* clear interrupt ptr */
1176 
1177 	/*
1178 	 * Get state from interrupt thread for the one
1179 	 * it interrupted.
1180 	 */
1181 
1182 	i = intr_passivate(t, itp);
1183 
1184 	TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1185 	    "intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1186 	    i, t, t, itp, itp);
1187 
1188 	/*
1189 	 * Dissociate the current thread from the interrupted thread's LWP.
1190 	 */
1191 	t->t_lwp = NULL;
1192 
1193 	/*
1194 	 * Interrupt handlers above the level that spinlocks block must
1195 	 * not block.
1196 	 */
1197 #if DEBUG
1198 	if (i < 0 || i > LOCK_LEVEL)
1199 		cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i);
1200 #endif
1201 
1202 	/*
1203 	 * Compute the CPU's base interrupt level based on the active
1204 	 * interrupts.
1205 	 */
1206 	ASSERT(CPU->cpu_intr_actv & (1 << i));
1207 	set_base_spl();
1208 
1209 	return (itp);
1210 }
1211 
1212 /*
1213  * Create and initialize an interrupt thread.
1214  *	Returns non-zero on error.
1215  *	Called at spl7() or better.
1216  */
1217 void
1218 thread_create_intr(struct cpu *cp)
1219 {
1220 	kthread_t *tp;
1221 
1222 	tp = thread_create(NULL, 0,
1223 	    (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0);
1224 
1225 	/*
1226 	 * Set the thread in the TS_FREE state.  The state will change
1227 	 * to TS_ONPROC only while the interrupt is active.  Think of these
1228 	 * as being on a private free list for the CPU.  Being TS_FREE keeps
1229 	 * inactive interrupt threads out of debugger thread lists.
1230 	 *
1231 	 * We cannot call thread_create with TS_FREE because of the current
1232 	 * checks there for ONPROC.  Fix this when thread_create takes flags.
1233 	 */
1234 	THREAD_FREEINTR(tp, cp);
1235 
1236 	/*
1237 	 * Nobody should ever reference the credentials of an interrupt
1238 	 * thread so make it NULL to catch any such references.
1239 	 */
1240 	tp->t_cred = NULL;
1241 	tp->t_flag |= T_INTR_THREAD;
1242 	tp->t_cpu = cp;
1243 	tp->t_bound_cpu = cp;
1244 	tp->t_disp_queue = cp->cpu_disp;
1245 	tp->t_affinitycnt = 1;
1246 	tp->t_preempt = 1;
1247 
1248 	/*
1249 	 * Don't make a user-requested binding on this thread so that
1250 	 * the processor can be offlined.
1251 	 */
1252 	tp->t_bind_cpu = PBIND_NONE;	/* no USER-requested binding */
1253 	tp->t_bind_pset = PS_NONE;
1254 
1255 #if defined(__i386) || defined(__amd64)
1256 	tp->t_stk -= STACK_ALIGN;
1257 	*(tp->t_stk) = 0;		/* terminate intr thread stack */
1258 #endif
1259 
1260 	/*
1261 	 * Link onto CPU's interrupt pool.
1262 	 */
1263 	tp->t_link = cp->cpu_intr_thread;
1264 	cp->cpu_intr_thread = tp;
1265 }
1266 
1267 /*
1268  * TSD -- THREAD SPECIFIC DATA
1269  */
1270 static kmutex_t		tsd_mutex;	 /* linked list spin lock */
1271 static uint_t		tsd_nkeys;	 /* size of destructor array */
1272 /* per-key destructor funcs */
1273 static void 		(**tsd_destructor)(void *);
1274 /* list of tsd_thread's */
1275 static struct tsd_thread	*tsd_list;
1276 
1277 /*
1278  * Default destructor
1279  *	Needed because NULL destructor means that the key is unused
1280  */
1281 /* ARGSUSED */
1282 void
1283 tsd_defaultdestructor(void *value)
1284 {}
1285 
1286 /*
1287  * Create a key (index into per thread array)
1288  *	Locks out tsd_create, tsd_destroy, and tsd_exit
1289  *	May allocate memory with lock held
1290  */
1291 void
1292 tsd_create(uint_t *keyp, void (*destructor)(void *))
1293 {
1294 	int	i;
1295 	uint_t	nkeys;
1296 
1297 	/*
1298 	 * if key is allocated, do nothing
1299 	 */
1300 	mutex_enter(&tsd_mutex);
1301 	if (*keyp) {
1302 		mutex_exit(&tsd_mutex);
1303 		return;
1304 	}
1305 	/*
1306 	 * find an unused key
1307 	 */
1308 	if (destructor == NULL)
1309 		destructor = tsd_defaultdestructor;
1310 
1311 	for (i = 0; i < tsd_nkeys; ++i)
1312 		if (tsd_destructor[i] == NULL)
1313 			break;
1314 
1315 	/*
1316 	 * if no unused keys, increase the size of the destructor array
1317 	 */
1318 	if (i == tsd_nkeys) {
1319 		if ((nkeys = (tsd_nkeys << 1)) == 0)
1320 			nkeys = 1;
1321 		tsd_destructor =
1322 		    (void (**)(void *))tsd_realloc((void *)tsd_destructor,
1323 		    (size_t)(tsd_nkeys * sizeof (void (*)(void *))),
1324 		    (size_t)(nkeys * sizeof (void (*)(void *))));
1325 		tsd_nkeys = nkeys;
1326 	}
1327 
1328 	/*
1329 	 * allocate the next available unused key
1330 	 */
1331 	tsd_destructor[i] = destructor;
1332 	*keyp = i + 1;
1333 	mutex_exit(&tsd_mutex);
1334 }
1335 
1336 /*
1337  * Destroy a key -- this is for unloadable modules
1338  *
1339  * Assumes that the caller is preventing tsd_set and tsd_get
1340  * Locks out tsd_create, tsd_destroy, and tsd_exit
1341  * May free memory with lock held
1342  */
1343 void
1344 tsd_destroy(uint_t *keyp)
1345 {
1346 	uint_t key;
1347 	struct tsd_thread *tsd;
1348 
1349 	/*
1350 	 * protect the key namespace and our destructor lists
1351 	 */
1352 	mutex_enter(&tsd_mutex);
1353 	key = *keyp;
1354 	*keyp = 0;
1355 
1356 	ASSERT(key <= tsd_nkeys);
1357 
1358 	/*
1359 	 * if the key is valid
1360 	 */
1361 	if (key != 0) {
1362 		uint_t k = key - 1;
1363 		/*
1364 		 * for every thread with TSD, call key's destructor
1365 		 */
1366 		for (tsd = tsd_list; tsd; tsd = tsd->ts_next) {
1367 			/*
1368 			 * no TSD for key in this thread
1369 			 */
1370 			if (key > tsd->ts_nkeys)
1371 				continue;
1372 			/*
1373 			 * call destructor for key
1374 			 */
1375 			if (tsd->ts_value[k] && tsd_destructor[k])
1376 				(*tsd_destructor[k])(tsd->ts_value[k]);
1377 			/*
1378 			 * reset value for key
1379 			 */
1380 			tsd->ts_value[k] = NULL;
1381 		}
1382 		/*
1383 		 * actually free the key (NULL destructor == unused)
1384 		 */
1385 		tsd_destructor[k] = NULL;
1386 	}
1387 
1388 	mutex_exit(&tsd_mutex);
1389 }
1390 
1391 /*
1392  * Quickly return the per thread value that was stored with the specified key
1393  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1394  */
1395 void *
1396 tsd_get(uint_t key)
1397 {
1398 	return (tsd_agent_get(curthread, key));
1399 }
1400 
1401 /*
1402  * Set a per thread value indexed with the specified key
1403  */
1404 int
1405 tsd_set(uint_t key, void *value)
1406 {
1407 	return (tsd_agent_set(curthread, key, value));
1408 }
1409 
1410 /*
1411  * Like tsd_get(), except that the agent lwp can get the tsd of
1412  * another thread in the same process (the agent thread only runs when the
1413  * process is completely stopped by /proc), or syslwp is creating a new lwp.
1414  */
1415 void *
1416 tsd_agent_get(kthread_t *t, uint_t key)
1417 {
1418 	struct tsd_thread *tsd = t->t_tsd;
1419 
1420 	ASSERT(t == curthread ||
1421 	    ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1422 
1423 	if (key && tsd != NULL && key <= tsd->ts_nkeys)
1424 		return (tsd->ts_value[key - 1]);
1425 	return (NULL);
1426 }
1427 
1428 /*
1429  * Like tsd_set(), except that the agent lwp can set the tsd of
1430  * another thread in the same process, or syslwp can set the tsd
1431  * of a thread it's in the middle of creating.
1432  *
1433  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1434  * May lock out tsd_destroy (and tsd_create), may allocate memory with
1435  * lock held
1436  */
1437 int
1438 tsd_agent_set(kthread_t *t, uint_t key, void *value)
1439 {
1440 	struct tsd_thread *tsd = t->t_tsd;
1441 
1442 	ASSERT(t == curthread ||
1443 	    ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1444 
1445 	if (key == 0)
1446 		return (EINVAL);
1447 	if (tsd == NULL)
1448 		tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1449 	if (key <= tsd->ts_nkeys) {
1450 		tsd->ts_value[key - 1] = value;
1451 		return (0);
1452 	}
1453 
1454 	ASSERT(key <= tsd_nkeys);
1455 
1456 	/*
1457 	 * lock out tsd_destroy()
1458 	 */
1459 	mutex_enter(&tsd_mutex);
1460 	if (tsd->ts_nkeys == 0) {
1461 		/*
1462 		 * Link onto list of threads with TSD
1463 		 */
1464 		if ((tsd->ts_next = tsd_list) != NULL)
1465 			tsd_list->ts_prev = tsd;
1466 		tsd_list = tsd;
1467 	}
1468 
1469 	/*
1470 	 * Allocate thread local storage and set the value for key
1471 	 */
1472 	tsd->ts_value = tsd_realloc(tsd->ts_value,
1473 	    tsd->ts_nkeys * sizeof (void *),
1474 	    key * sizeof (void *));
1475 	tsd->ts_nkeys = key;
1476 	tsd->ts_value[key - 1] = value;
1477 	mutex_exit(&tsd_mutex);
1478 
1479 	return (0);
1480 }
1481 
1482 
1483 /*
1484  * Return the per thread value that was stored with the specified key
1485  *	If necessary, create the key and the value
1486  *	Assumes the caller is protecting *keyp from tsd_destroy
1487  */
1488 void *
1489 tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void))
1490 {
1491 	void *value;
1492 	uint_t key = *keyp;
1493 	struct tsd_thread *tsd = curthread->t_tsd;
1494 
1495 	if (tsd == NULL)
1496 		tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1497 	if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1]))
1498 		return (value);
1499 	if (key == 0)
1500 		tsd_create(keyp, destroy);
1501 	(void) tsd_set(*keyp, value = (*allocate)());
1502 
1503 	return (value);
1504 }
1505 
1506 /*
1507  * Called from thread_exit() to run the destructor function for each tsd
1508  *	Locks out tsd_create and tsd_destroy
1509  *	Assumes that the destructor *DOES NOT* use tsd
1510  */
1511 void
1512 tsd_exit(void)
1513 {
1514 	int i;
1515 	struct tsd_thread *tsd = curthread->t_tsd;
1516 
1517 	if (tsd == NULL)
1518 		return;
1519 
1520 	if (tsd->ts_nkeys == 0) {
1521 		kmem_free(tsd, sizeof (*tsd));
1522 		curthread->t_tsd = NULL;
1523 		return;
1524 	}
1525 
1526 	/*
1527 	 * lock out tsd_create and tsd_destroy, call
1528 	 * the destructor, and mark the value as destroyed.
1529 	 */
1530 	mutex_enter(&tsd_mutex);
1531 
1532 	for (i = 0; i < tsd->ts_nkeys; i++) {
1533 		if (tsd->ts_value[i] && tsd_destructor[i])
1534 			(*tsd_destructor[i])(tsd->ts_value[i]);
1535 		tsd->ts_value[i] = NULL;
1536 	}
1537 
1538 	/*
1539 	 * remove from linked list of threads with TSD
1540 	 */
1541 	if (tsd->ts_next)
1542 		tsd->ts_next->ts_prev = tsd->ts_prev;
1543 	if (tsd->ts_prev)
1544 		tsd->ts_prev->ts_next = tsd->ts_next;
1545 	if (tsd_list == tsd)
1546 		tsd_list = tsd->ts_next;
1547 
1548 	mutex_exit(&tsd_mutex);
1549 
1550 	/*
1551 	 * free up the TSD
1552 	 */
1553 	kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *));
1554 	kmem_free(tsd, sizeof (struct tsd_thread));
1555 	curthread->t_tsd = NULL;
1556 }
1557 
1558 /*
1559  * realloc
1560  */
1561 static void *
1562 tsd_realloc(void *old, size_t osize, size_t nsize)
1563 {
1564 	void *new;
1565 
1566 	new = kmem_zalloc(nsize, KM_SLEEP);
1567 	if (old) {
1568 		bcopy(old, new, osize);
1569 		kmem_free(old, osize);
1570 	}
1571 	return (new);
1572 }
1573 
1574 /*
1575  * Check to see if an interrupt thread might be active at a given ipl.
1576  * If so return true.
1577  * We must be conservative--it is ok to give a false yes, but a false no
1578  * will cause disaster.  (But if the situation changes after we check it is
1579  * ok--the caller is trying to ensure that an interrupt routine has been
1580  * exited).
1581  * This is used when trying to remove an interrupt handler from an autovector
1582  * list in avintr.c.
1583  */
1584 int
1585 intr_active(struct cpu *cp, int level)
1586 {
1587 	if (level <= LOCK_LEVEL)
1588 		return (cp->cpu_thread != cp->cpu_dispthread);
1589 	else
1590 		return (CPU_ON_INTR(cp));
1591 }
1592 
1593 /*
1594  * Return non-zero if an interrupt is being serviced.
1595  */
1596 int
1597 servicing_interrupt()
1598 {
1599 	int onintr = 0;
1600 
1601 	/* Are we an interrupt thread */
1602 	if (curthread->t_flag & T_INTR_THREAD)
1603 		return (1);
1604 	/* Are we servicing a high level interrupt? */
1605 	if (CPU_ON_INTR(CPU)) {
1606 		kpreempt_disable();
1607 		onintr = CPU_ON_INTR(CPU);
1608 		kpreempt_enable();
1609 	}
1610 	return (onintr);
1611 }
1612 
1613 
1614 /*
1615  * Change the dispatch priority of a thread in the system.
1616  * Used when raising or lowering a thread's priority.
1617  * (E.g., priority inheritance)
1618  *
1619  * Since threads are queued according to their priority, we
1620  * we must check the thread's state to determine whether it
1621  * is on a queue somewhere. If it is, we've got to:
1622  *
1623  *	o Dequeue the thread.
1624  *	o Change its effective priority.
1625  *	o Enqueue the thread.
1626  *
1627  * Assumptions: The thread whose priority we wish to change
1628  * must be locked before we call thread_change_(e)pri().
1629  * The thread_change(e)pri() function doesn't drop the thread
1630  * lock--that must be done by its caller.
1631  */
1632 void
1633 thread_change_epri(kthread_t *t, pri_t disp_pri)
1634 {
1635 	uint_t	state;
1636 
1637 	ASSERT(THREAD_LOCK_HELD(t));
1638 
1639 	/*
1640 	 * If the inherited priority hasn't actually changed,
1641 	 * just return.
1642 	 */
1643 	if (t->t_epri == disp_pri)
1644 		return;
1645 
1646 	state = t->t_state;
1647 
1648 	/*
1649 	 * If it's not on a queue, change the priority with
1650 	 * impunity.
1651 	 */
1652 	if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) {
1653 		t->t_epri = disp_pri;
1654 
1655 		if (state == TS_ONPROC) {
1656 			cpu_t *cp = t->t_disp_queue->disp_cpu;
1657 
1658 			if (t == cp->cpu_dispthread)
1659 				cp->cpu_dispatch_pri = DISP_PRIO(t);
1660 		}
1661 		return;
1662 	}
1663 
1664 	/*
1665 	 * It's either on a sleep queue or a run queue.
1666 	 */
1667 	if (state == TS_SLEEP) {
1668 		/*
1669 		 * Take the thread out of its sleep queue.
1670 		 * Change the inherited priority.
1671 		 * Re-enqueue the thread.
1672 		 * Each synchronization object exports a function
1673 		 * to do this in an appropriate manner.
1674 		 */
1675 		SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri);
1676 	} else if (state == TS_WAIT) {
1677 		/*
1678 		 * Re-enqueue a thread on the wait queue if its
1679 		 * effective priority needs to change.
1680 		 */
1681 		if (disp_pri != t->t_epri)
1682 			waitq_change_pri(t, disp_pri);
1683 	} else {
1684 		/*
1685 		 * The thread is on a run queue.
1686 		 * Note: setbackdq() may not put the thread
1687 		 * back on the same run queue where it originally
1688 		 * resided.
1689 		 */
1690 		(void) dispdeq(t);
1691 		t->t_epri = disp_pri;
1692 		setbackdq(t);
1693 	}
1694 }	/* end of thread_change_epri */
1695 
1696 /*
1697  * Function: Change the t_pri field of a thread.
1698  * Side Effects: Adjust the thread ordering on a run queue
1699  *		 or sleep queue, if necessary.
1700  * Returns: 1 if the thread was on a run queue, else 0.
1701  */
1702 int
1703 thread_change_pri(kthread_t *t, pri_t disp_pri, int front)
1704 {
1705 	uint_t	state;
1706 	int	on_rq = 0;
1707 
1708 	ASSERT(THREAD_LOCK_HELD(t));
1709 
1710 	state = t->t_state;
1711 	THREAD_WILLCHANGE_PRI(t, disp_pri);
1712 
1713 	/*
1714 	 * If it's not on a queue, change the priority with
1715 	 * impunity.
1716 	 */
1717 	if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) {
1718 		t->t_pri = disp_pri;
1719 
1720 		if (state == TS_ONPROC) {
1721 			cpu_t *cp = t->t_disp_queue->disp_cpu;
1722 
1723 			if (t == cp->cpu_dispthread)
1724 				cp->cpu_dispatch_pri = DISP_PRIO(t);
1725 		}
1726 		return (0);
1727 	}
1728 
1729 	/*
1730 	 * It's either on a sleep queue or a run queue.
1731 	 */
1732 	if (state == TS_SLEEP) {
1733 		/*
1734 		 * If the priority has changed, take the thread out of
1735 		 * its sleep queue and change the priority.
1736 		 * Re-enqueue the thread.
1737 		 * Each synchronization object exports a function
1738 		 * to do this in an appropriate manner.
1739 		 */
1740 		if (disp_pri != t->t_pri)
1741 			SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri);
1742 	} else if (state == TS_WAIT) {
1743 		/*
1744 		 * Re-enqueue a thread on the wait queue if its
1745 		 * priority needs to change.
1746 		 */
1747 		if (disp_pri != t->t_pri)
1748 			waitq_change_pri(t, disp_pri);
1749 	} else {
1750 		/*
1751 		 * The thread is on a run queue.
1752 		 * Note: setbackdq() may not put the thread
1753 		 * back on the same run queue where it originally
1754 		 * resided.
1755 		 *
1756 		 * We still requeue the thread even if the priority
1757 		 * is unchanged to preserve round-robin (and other)
1758 		 * effects between threads of the same priority.
1759 		 */
1760 		on_rq = dispdeq(t);
1761 		ASSERT(on_rq);
1762 		t->t_pri = disp_pri;
1763 		if (front) {
1764 			setfrontdq(t);
1765 		} else {
1766 			setbackdq(t);
1767 		}
1768 	}
1769 	return (on_rq);
1770 }
1771