xref: /titanic_44/usr/src/uts/common/disp/thread.c (revision 5151fb1220e0ceafdc172203863c73da4285c170)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/sysmacros.h>
31 #include <sys/signal.h>
32 #include <sys/stack.h>
33 #include <sys/pcb.h>
34 #include <sys/user.h>
35 #include <sys/systm.h>
36 #include <sys/sysinfo.h>
37 #include <sys/var.h>
38 #include <sys/errno.h>
39 #include <sys/cmn_err.h>
40 #include <sys/cred.h>
41 #include <sys/resource.h>
42 #include <sys/task.h>
43 #include <sys/project.h>
44 #include <sys/proc.h>
45 #include <sys/debug.h>
46 #include <sys/inline.h>
47 #include <sys/disp.h>
48 #include <sys/class.h>
49 #include <vm/seg_kmem.h>
50 #include <vm/seg_kp.h>
51 #include <sys/machlock.h>
52 #include <sys/kmem.h>
53 #include <sys/varargs.h>
54 #include <sys/turnstile.h>
55 #include <sys/poll.h>
56 #include <sys/vtrace.h>
57 #include <sys/callb.h>
58 #include <c2/audit.h>
59 #include <sys/tnf.h>
60 #include <sys/sobject.h>
61 #include <sys/cpupart.h>
62 #include <sys/pset.h>
63 #include <sys/door.h>
64 #include <sys/spl.h>
65 #include <sys/copyops.h>
66 #include <sys/rctl.h>
67 #include <sys/brand.h>
68 #include <sys/pool.h>
69 #include <sys/zone.h>
70 #include <sys/tsol/label.h>
71 #include <sys/tsol/tndb.h>
72 #include <sys/cpc_impl.h>
73 #include <sys/sdt.h>
74 #include <sys/reboot.h>
75 #include <sys/kdi.h>
76 #include <sys/waitq.h>
77 #include <sys/cpucaps.h>
78 
79 struct kmem_cache *thread_cache;	/* cache of free threads */
80 struct kmem_cache *lwp_cache;		/* cache of free lwps */
81 struct kmem_cache *turnstile_cache;	/* cache of free turnstiles */
82 
83 /*
84  * allthreads is only for use by kmem_readers.  All kernel loops can use
85  * the current thread as a start/end point.
86  */
87 static kthread_t *allthreads = &t0;	/* circular list of all threads */
88 
89 static kcondvar_t reaper_cv;		/* synchronization var */
90 kthread_t	*thread_deathrow;	/* circular list of reapable threads */
91 kthread_t	*lwp_deathrow;		/* circular list of reapable threads */
92 kmutex_t	reaplock;		/* protects lwp and thread deathrows */
93 kmutex_t	thread_free_lock;	/* protects clock from reaper */
94 int	thread_reapcnt = 0;		/* number of threads on deathrow */
95 int	lwp_reapcnt = 0;		/* number of lwps on deathrow */
96 int	reaplimit = 16;			/* delay reaping until reaplimit */
97 
98 extern int nthread;
99 
100 id_t	syscid;				/* system scheduling class ID */
101 void	*segkp_thread;			/* cookie for segkp pool */
102 
103 int lwp_cache_sz = 32;
104 int t_cache_sz = 8;
105 static kt_did_t next_t_id = 1;
106 
107 /*
108  * Min/Max stack sizes for stack size parameters
109  */
110 #define	MAX_STKSIZE	(32 * DEFAULTSTKSZ)
111 #define	MIN_STKSIZE	DEFAULTSTKSZ
112 
113 /*
114  * default_stksize overrides lwp_default_stksize if it is set.
115  */
116 int	default_stksize;
117 int	lwp_default_stksize;
118 
119 static zone_key_t zone_thread_key;
120 
121 /*
122  * forward declarations for internal thread specific data (tsd)
123  */
124 static void *tsd_realloc(void *, size_t, size_t);
125 
126 /*ARGSUSED*/
127 static int
128 turnstile_constructor(void *buf, void *cdrarg, int kmflags)
129 {
130 	bzero(buf, sizeof (turnstile_t));
131 	return (0);
132 }
133 
134 /*ARGSUSED*/
135 static void
136 turnstile_destructor(void *buf, void *cdrarg)
137 {
138 	turnstile_t *ts = buf;
139 
140 	ASSERT(ts->ts_free == NULL);
141 	ASSERT(ts->ts_waiters == 0);
142 	ASSERT(ts->ts_inheritor == NULL);
143 	ASSERT(ts->ts_sleepq[0].sq_first == NULL);
144 	ASSERT(ts->ts_sleepq[1].sq_first == NULL);
145 }
146 
147 void
148 thread_init(void)
149 {
150 	kthread_t *tp;
151 	extern char sys_name[];
152 	extern void idle();
153 	struct cpu *cpu = CPU;
154 
155 	mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL));
156 
157 #if defined(__i386) || defined(__amd64)
158 	thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
159 	    PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0);
160 
161 	/*
162 	 * "struct _klwp" includes a "struct pcb", which includes a
163 	 * "struct fpu", which needs to be 16-byte aligned on amd64
164 	 * (and even on i386 for fxsave/fxrstor).
165 	 */
166 	lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
167 	    16, NULL, NULL, NULL, NULL, NULL, 0);
168 #else
169 	/*
170 	 * Allocate thread structures from static_arena.  This prevents
171 	 * issues where a thread tries to relocate its own thread
172 	 * structure and touches it after the mapping has been suspended.
173 	 */
174 	thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
175 	    PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0);
176 
177 	lwp_stk_cache_init();
178 
179 	lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
180 	    0, NULL, NULL, NULL, NULL, NULL, 0);
181 #endif
182 
183 	turnstile_cache = kmem_cache_create("turnstile_cache",
184 	    sizeof (turnstile_t), 0,
185 	    turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0);
186 
187 	label_init();
188 	cred_init();
189 
190 	/*
191 	 * Initialize various resource management facilities.
192 	 */
193 	rctl_init();
194 	cpucaps_init();
195 	/*
196 	 * Zone_init() should be called before project_init() so that project ID
197 	 * for the first project is initialized correctly.
198 	 */
199 	zone_init();
200 	project_init();
201 	brand_init();
202 	task_init();
203 	tcache_init();
204 	pool_init();
205 
206 	curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
207 
208 	/*
209 	 * Originally, we had two parameters to set default stack
210 	 * size: one for lwp's (lwp_default_stksize), and one for
211 	 * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz).
212 	 * Now we have a third parameter that overrides both if it is
213 	 * set to a legal stack size, called default_stksize.
214 	 */
215 
216 	if (default_stksize == 0) {
217 		default_stksize = DEFAULTSTKSZ;
218 	} else if (default_stksize % PAGESIZE != 0 ||
219 	    default_stksize > MAX_STKSIZE ||
220 	    default_stksize < MIN_STKSIZE) {
221 		cmn_err(CE_WARN, "Illegal stack size. Using %d",
222 		    (int)DEFAULTSTKSZ);
223 		default_stksize = DEFAULTSTKSZ;
224 	} else {
225 		lwp_default_stksize = default_stksize;
226 	}
227 
228 	if (lwp_default_stksize == 0) {
229 		lwp_default_stksize = default_stksize;
230 	} else if (lwp_default_stksize % PAGESIZE != 0 ||
231 	    lwp_default_stksize > MAX_STKSIZE ||
232 	    lwp_default_stksize < MIN_STKSIZE) {
233 		cmn_err(CE_WARN, "Illegal stack size. Using %d",
234 		    default_stksize);
235 		lwp_default_stksize = default_stksize;
236 	}
237 
238 	segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz,
239 	    lwp_default_stksize,
240 	    (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED));
241 
242 	segkp_thread = segkp_cache_init(segkp, t_cache_sz,
243 	    default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON);
244 
245 	(void) getcid(sys_name, &syscid);
246 	curthread->t_cid = syscid;	/* current thread is t0 */
247 
248 	/*
249 	 * Set up the first CPU's idle thread.
250 	 * It runs whenever the CPU has nothing worthwhile to do.
251 	 */
252 	tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1);
253 	cpu->cpu_idle_thread = tp;
254 	tp->t_preempt = 1;
255 	tp->t_disp_queue = cpu->cpu_disp;
256 	ASSERT(tp->t_disp_queue != NULL);
257 	tp->t_bound_cpu = cpu;
258 	tp->t_affinitycnt = 1;
259 
260 	/*
261 	 * Registering a thread in the callback table is usually
262 	 * done in the initialization code of the thread. In this
263 	 * case, we do it right after thread creation to avoid
264 	 * blocking idle thread while registering itself. It also
265 	 * avoids the possibility of reregistration in case a CPU
266 	 * restarts its idle thread.
267 	 */
268 	CALLB_CPR_INIT_SAFE(tp, "idle");
269 
270 	/*
271 	 * Finish initializing the kernel memory allocator now that
272 	 * thread_create() is available.
273 	 */
274 	kmem_thread_init();
275 
276 	if (boothowto & RB_DEBUG)
277 		kdi_dvec_thravail();
278 }
279 
280 /*
281  * Create a thread.
282  *
283  * thread_create() blocks for memory if necessary.  It never fails.
284  *
285  * If stk is NULL, the thread is created at the base of the stack
286  * and cannot be swapped.
287  */
288 kthread_t *
289 thread_create(
290 	caddr_t	stk,
291 	size_t	stksize,
292 	void	(*proc)(),
293 	void	*arg,
294 	size_t	len,
295 	proc_t	 *pp,
296 	int	state,
297 	pri_t	pri)
298 {
299 	kthread_t *t;
300 	extern struct classfuncs sys_classfuncs;
301 	turnstile_t *ts;
302 
303 	/*
304 	 * Every thread keeps a turnstile around in case it needs to block.
305 	 * The only reason the turnstile is not simply part of the thread
306 	 * structure is that we may have to break the association whenever
307 	 * more than one thread blocks on a given synchronization object.
308 	 * From a memory-management standpoint, turnstiles are like the
309 	 * "attached mblks" that hang off dblks in the streams allocator.
310 	 */
311 	ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
312 
313 	if (stk == NULL) {
314 		/*
315 		 * alloc both thread and stack in segkp chunk
316 		 */
317 
318 		if (stksize < default_stksize)
319 			stksize = default_stksize;
320 
321 		if (stksize == default_stksize) {
322 			stk = (caddr_t)segkp_cache_get(segkp_thread);
323 		} else {
324 			stksize = roundup(stksize, PAGESIZE);
325 			stk = (caddr_t)segkp_get(segkp, stksize,
326 			    (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED));
327 		}
328 
329 		ASSERT(stk != NULL);
330 
331 		/*
332 		 * The machine-dependent mutex code may require that
333 		 * thread pointers (since they may be used for mutex owner
334 		 * fields) have certain alignment requirements.
335 		 * PTR24_ALIGN is the size of the alignment quanta.
336 		 * XXX - assumes stack grows toward low addresses.
337 		 */
338 		if (stksize <= sizeof (kthread_t) + PTR24_ALIGN)
339 			cmn_err(CE_PANIC, "thread_create: proposed stack size"
340 			    " too small to hold thread.");
341 #ifdef STACK_GROWTH_DOWN
342 		stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1);
343 		stksize &= -PTR24_ALIGN;	/* make thread aligned */
344 		t = (kthread_t *)(stk + stksize);
345 		bzero(t, sizeof (kthread_t));
346 #ifdef	C2_AUDIT
347 		if (audit_active)
348 			audit_thread_create(t);
349 #endif
350 		t->t_stk = stk + stksize;
351 		t->t_stkbase = stk;
352 #else	/* stack grows to larger addresses */
353 		stksize -= SA(sizeof (kthread_t));
354 		t = (kthread_t *)(stk);
355 		bzero(t, sizeof (kthread_t));
356 		t->t_stk = stk + sizeof (kthread_t);
357 		t->t_stkbase = stk + stksize + sizeof (kthread_t);
358 #endif	/* STACK_GROWTH_DOWN */
359 		t->t_flag |= T_TALLOCSTK;
360 		t->t_swap = stk;
361 	} else {
362 		t = kmem_cache_alloc(thread_cache, KM_SLEEP);
363 		bzero(t, sizeof (kthread_t));
364 		ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0);
365 #ifdef	C2_AUDIT
366 		if (audit_active)
367 			audit_thread_create(t);
368 #endif
369 		/*
370 		 * Initialize t_stk to the kernel stack pointer to use
371 		 * upon entry to the kernel
372 		 */
373 #ifdef STACK_GROWTH_DOWN
374 		t->t_stk = stk + stksize;
375 		t->t_stkbase = stk;
376 #else
377 		t->t_stk = stk;			/* 3b2-like */
378 		t->t_stkbase = stk + stksize;
379 #endif /* STACK_GROWTH_DOWN */
380 	}
381 
382 	/* set default stack flag */
383 	if (stksize == lwp_default_stksize)
384 		t->t_flag |= T_DFLTSTK;
385 
386 	t->t_ts = ts;
387 
388 	/*
389 	 * p_cred could be NULL if it thread_create is called before cred_init
390 	 * is called in main.
391 	 */
392 	mutex_enter(&pp->p_crlock);
393 	if (pp->p_cred)
394 		crhold(t->t_cred = pp->p_cred);
395 	mutex_exit(&pp->p_crlock);
396 	t->t_start = gethrestime_sec();
397 	t->t_startpc = proc;
398 	t->t_procp = pp;
399 	t->t_clfuncs = &sys_classfuncs.thread;
400 	t->t_cid = syscid;
401 	t->t_pri = pri;
402 	t->t_stime = lbolt;
403 	t->t_schedflag = TS_LOAD | TS_DONT_SWAP;
404 	t->t_bind_cpu = PBIND_NONE;
405 	t->t_bind_pset = PS_NONE;
406 	t->t_plockp = &pp->p_lock;
407 	t->t_copyops = NULL;
408 	t->t_taskq = NULL;
409 	t->t_anttime = 0;
410 	t->t_hatdepth = 0;
411 
412 	t->t_dtrace_vtime = 1;	/* assure vtimestamp is always non-zero */
413 
414 	CPU_STATS_ADDQ(CPU, sys, nthreads, 1);
415 #ifndef NPROBE
416 	/* Kernel probe */
417 	tnf_thread_create(t);
418 #endif /* NPROBE */
419 	LOCK_INIT_CLEAR(&t->t_lock);
420 
421 	/*
422 	 * Callers who give us a NULL proc must do their own
423 	 * stack initialization.  e.g. lwp_create()
424 	 */
425 	if (proc != NULL) {
426 		t->t_stk = thread_stk_init(t->t_stk);
427 		thread_load(t, proc, arg, len);
428 	}
429 
430 	/*
431 	 * Put a hold on project0. If this thread is actually in a
432 	 * different project, then t_proj will be changed later in
433 	 * lwp_create().  All kernel-only threads must be in project 0.
434 	 */
435 	t->t_proj = project_hold(proj0p);
436 
437 	lgrp_affinity_init(&t->t_lgrp_affinity);
438 
439 	mutex_enter(&pidlock);
440 	nthread++;
441 	t->t_did = next_t_id++;
442 	t->t_prev = curthread->t_prev;
443 	t->t_next = curthread;
444 
445 	/*
446 	 * Add the thread to the list of all threads, and initialize
447 	 * its t_cpu pointer.  We need to block preemption since
448 	 * cpu_offline walks the thread list looking for threads
449 	 * with t_cpu pointing to the CPU being offlined.  We want
450 	 * to make sure that the list is consistent and that if t_cpu
451 	 * is set, the thread is on the list.
452 	 */
453 	kpreempt_disable();
454 	curthread->t_prev->t_next = t;
455 	curthread->t_prev = t;
456 
457 	/*
458 	 * Threads should never have a NULL t_cpu pointer so assign it
459 	 * here.  If the thread is being created with state TS_RUN a
460 	 * better CPU may be chosen when it is placed on the run queue.
461 	 *
462 	 * We need to keep kernel preemption disabled when setting all
463 	 * three fields to keep them in sync.  Also, always create in
464 	 * the default partition since that's where kernel threads go
465 	 * (if this isn't a kernel thread, t_cpupart will be changed
466 	 * in lwp_create before setting the thread runnable).
467 	 */
468 	t->t_cpupart = &cp_default;
469 
470 	/*
471 	 * For now, affiliate this thread with the root lgroup.
472 	 * Since the kernel does not (presently) allocate its memory
473 	 * in a locality aware fashion, the root is an appropriate home.
474 	 * If this thread is later associated with an lwp, it will have
475 	 * it's lgroup re-assigned at that time.
476 	 */
477 	lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
478 
479 	/*
480 	 * Inherit the current cpu.  If this cpu isn't part of the chosen
481 	 * lgroup, a new cpu will be chosen by cpu_choose when the thread
482 	 * is ready to run.
483 	 */
484 	if (CPU->cpu_part == &cp_default)
485 		t->t_cpu = CPU;
486 	else
487 		t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl,
488 		    t->t_pri, NULL);
489 
490 	t->t_disp_queue = t->t_cpu->cpu_disp;
491 	kpreempt_enable();
492 
493 	/*
494 	 * Initialize thread state and the dispatcher lock pointer.
495 	 * Need to hold onto pidlock to block allthreads walkers until
496 	 * the state is set.
497 	 */
498 	switch (state) {
499 	case TS_RUN:
500 		curthread->t_oldspl = splhigh();	/* get dispatcher spl */
501 		THREAD_SET_STATE(t, TS_STOPPED, &transition_lock);
502 		CL_SETRUN(t);
503 		thread_unlock(t);
504 		break;
505 
506 	case TS_ONPROC:
507 		THREAD_ONPROC(t, t->t_cpu);
508 		break;
509 
510 	case TS_FREE:
511 		/*
512 		 * Free state will be used for intr threads.
513 		 * The interrupt routine must set the thread dispatcher
514 		 * lock pointer (t_lockp) if starting on a CPU
515 		 * other than the current one.
516 		 */
517 		THREAD_FREEINTR(t, CPU);
518 		break;
519 
520 	case TS_STOPPED:
521 		THREAD_SET_STATE(t, TS_STOPPED, &stop_lock);
522 		break;
523 
524 	default:			/* TS_SLEEP, TS_ZOMB or TS_TRANS */
525 		cmn_err(CE_PANIC, "thread_create: invalid state %d", state);
526 	}
527 	mutex_exit(&pidlock);
528 	return (t);
529 }
530 
531 /*
532  * Move thread to project0 and take care of project reference counters.
533  */
534 void
535 thread_rele(kthread_t *t)
536 {
537 	kproject_t *kpj;
538 
539 	thread_lock(t);
540 
541 	ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0);
542 	kpj = ttoproj(t);
543 	t->t_proj = proj0p;
544 
545 	thread_unlock(t);
546 
547 	if (kpj != proj0p) {
548 		project_rele(kpj);
549 		(void) project_hold(proj0p);
550 	}
551 }
552 
553 
554 void	(*ip_cleanup_func)(void);
555 
556 void
557 thread_exit()
558 {
559 	kthread_t *t = curthread;
560 
561 	if ((t->t_proc_flag & TP_ZTHREAD) != 0)
562 		cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called");
563 
564 	if (ip_cleanup_func != NULL)
565 		(*ip_cleanup_func)();
566 
567 	tsd_exit();		/* Clean up this thread's TSD */
568 
569 	kcpc_passivate();	/* clean up performance counter state */
570 
571 	/*
572 	 * No kernel thread should have called poll() without arranging
573 	 * calling pollcleanup() here.
574 	 */
575 	ASSERT(t->t_pollstate == NULL);
576 	ASSERT(t->t_schedctl == NULL);
577 	if (t->t_door)
578 		door_slam();	/* in case thread did an upcall */
579 
580 #ifndef NPROBE
581 	/* Kernel probe */
582 	if (t->t_tnf_tpdp)
583 		tnf_thread_exit();
584 #endif /* NPROBE */
585 
586 	thread_rele(t);
587 	t->t_preempt++;
588 
589 	/*
590 	 * remove thread from the all threads list so that
591 	 * death-row can use the same pointers.
592 	 */
593 	mutex_enter(&pidlock);
594 	t->t_next->t_prev = t->t_prev;
595 	t->t_prev->t_next = t->t_next;
596 	ASSERT(allthreads != t);	/* t0 never exits */
597 	cv_broadcast(&t->t_joincv);	/* wake up anyone in thread_join */
598 	mutex_exit(&pidlock);
599 
600 	if (t->t_ctx != NULL)
601 		exitctx(t);
602 	if (t->t_procp->p_pctx != NULL)
603 		exitpctx(t->t_procp);
604 
605 	t->t_state = TS_ZOMB;	/* set zombie thread */
606 
607 	swtch_from_zombie();	/* give up the CPU */
608 	/* NOTREACHED */
609 }
610 
611 /*
612  * Check to see if the specified thread is active (defined as being on
613  * the thread list).  This is certainly a slow way to do this; if there's
614  * ever a reason to speed it up, we could maintain a hash table of active
615  * threads indexed by their t_did.
616  */
617 static kthread_t *
618 did_to_thread(kt_did_t tid)
619 {
620 	kthread_t *t;
621 
622 	ASSERT(MUTEX_HELD(&pidlock));
623 	for (t = curthread->t_next; t != curthread; t = t->t_next) {
624 		if (t->t_did == tid)
625 			break;
626 	}
627 	if (t->t_did == tid)
628 		return (t);
629 	else
630 		return (NULL);
631 }
632 
633 /*
634  * Wait for specified thread to exit.  Returns immediately if the thread
635  * could not be found, meaning that it has either already exited or never
636  * existed.
637  */
638 void
639 thread_join(kt_did_t tid)
640 {
641 	kthread_t *t;
642 
643 	ASSERT(tid != curthread->t_did);
644 	ASSERT(tid != t0.t_did);
645 
646 	mutex_enter(&pidlock);
647 	/*
648 	 * Make sure we check that the thread is on the thread list
649 	 * before blocking on it; otherwise we could end up blocking on
650 	 * a cv that's already been freed.  In other words, don't cache
651 	 * the thread pointer across calls to cv_wait.
652 	 *
653 	 * The choice of loop invariant means that whenever a thread
654 	 * is taken off the allthreads list, a cv_broadcast must be
655 	 * performed on that thread's t_joincv to wake up any waiters.
656 	 * The broadcast doesn't have to happen right away, but it
657 	 * shouldn't be postponed indefinitely (e.g., by doing it in
658 	 * thread_free which may only be executed when the deathrow
659 	 * queue is processed.
660 	 */
661 	while (t = did_to_thread(tid))
662 		cv_wait(&t->t_joincv, &pidlock);
663 	mutex_exit(&pidlock);
664 }
665 
666 void
667 thread_free(kthread_t *t)
668 {
669 	ASSERT(t != &t0 && t->t_state == TS_FREE);
670 	ASSERT(t->t_door == NULL);
671 	ASSERT(t->t_schedctl == NULL);
672 	ASSERT(t->t_pollstate == NULL);
673 
674 	t->t_pri = 0;
675 	t->t_pc = 0;
676 	t->t_sp = 0;
677 	t->t_wchan0 = NULL;
678 	t->t_wchan = NULL;
679 	if (t->t_cred != NULL) {
680 		crfree(t->t_cred);
681 		t->t_cred = 0;
682 	}
683 	if (t->t_pdmsg) {
684 		kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1);
685 		t->t_pdmsg = NULL;
686 	}
687 #ifdef	C2_AUDIT
688 	if (audit_active)
689 		audit_thread_free(t);
690 #endif
691 #ifndef NPROBE
692 	if (t->t_tnf_tpdp)
693 		tnf_thread_free(t);
694 #endif /* NPROBE */
695 	if (t->t_cldata) {
696 		CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata);
697 	}
698 	if (t->t_rprof != NULL) {
699 		kmem_free(t->t_rprof, sizeof (*t->t_rprof));
700 		t->t_rprof = NULL;
701 	}
702 	t->t_lockp = NULL;	/* nothing should try to lock this thread now */
703 	if (t->t_lwp)
704 		lwp_freeregs(t->t_lwp, 0);
705 	if (t->t_ctx)
706 		freectx(t, 0);
707 	t->t_stk = NULL;
708 	if (t->t_lwp)
709 		lwp_stk_fini(t->t_lwp);
710 	lock_clear(&t->t_lock);
711 
712 	if (t->t_ts->ts_waiters > 0)
713 		panic("thread_free: turnstile still active");
714 
715 	kmem_cache_free(turnstile_cache, t->t_ts);
716 
717 	free_afd(&t->t_activefd);
718 
719 	/*
720 	 * Barrier for clock thread.  The clock holds this lock to
721 	 * keep the thread from going away while it's looking at it.
722 	 */
723 	mutex_enter(&thread_free_lock);
724 	mutex_exit(&thread_free_lock);
725 
726 	ASSERT(ttoproj(t) == proj0p);
727 	project_rele(ttoproj(t));
728 
729 	lgrp_affinity_free(&t->t_lgrp_affinity);
730 
731 	/*
732 	 * Free thread struct and its stack.
733 	 */
734 	if (t->t_flag & T_TALLOCSTK) {
735 		/* thread struct is embedded in stack */
736 		segkp_release(segkp, t->t_swap);
737 		mutex_enter(&pidlock);
738 		nthread--;
739 		mutex_exit(&pidlock);
740 	} else {
741 		if (t->t_swap) {
742 			segkp_release(segkp, t->t_swap);
743 			t->t_swap = NULL;
744 		}
745 		if (t->t_lwp) {
746 			kmem_cache_free(lwp_cache, t->t_lwp);
747 			t->t_lwp = NULL;
748 		}
749 		mutex_enter(&pidlock);
750 		nthread--;
751 		mutex_exit(&pidlock);
752 		kmem_cache_free(thread_cache, t);
753 	}
754 }
755 
756 /*
757  * Removes threads associated with the given zone from a deathrow queue.
758  * tp is a pointer to the head of the deathrow queue, and countp is a
759  * pointer to the current deathrow count.  Returns a linked list of
760  * threads removed from the list.
761  */
762 static kthread_t *
763 thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid)
764 {
765 	kthread_t *tmp, *list = NULL;
766 	cred_t *cr;
767 
768 	ASSERT(MUTEX_HELD(&reaplock));
769 	while (*tp != NULL) {
770 		if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) {
771 			tmp = *tp;
772 			*tp = tmp->t_forw;
773 			tmp->t_forw = list;
774 			list = tmp;
775 			(*countp)--;
776 		} else {
777 			tp = &(*tp)->t_forw;
778 		}
779 	}
780 	return (list);
781 }
782 
783 static void
784 thread_reap_list(kthread_t *t)
785 {
786 	kthread_t *next;
787 
788 	while (t != NULL) {
789 		next = t->t_forw;
790 		thread_free(t);
791 		t = next;
792 	}
793 }
794 
795 /* ARGSUSED */
796 static void
797 thread_zone_destroy(zoneid_t zoneid, void *unused)
798 {
799 	kthread_t *t, *l;
800 
801 	mutex_enter(&reaplock);
802 	/*
803 	 * Pull threads and lwps associated with zone off deathrow lists.
804 	 */
805 	t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid);
806 	l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid);
807 	mutex_exit(&reaplock);
808 
809 	/*
810 	 * Reap threads
811 	 */
812 	thread_reap_list(t);
813 
814 	/*
815 	 * Reap lwps
816 	 */
817 	thread_reap_list(l);
818 }
819 
820 /*
821  * cleanup zombie threads that are on deathrow.
822  */
823 void
824 thread_reaper()
825 {
826 	kthread_t *t, *l;
827 	callb_cpr_t cprinfo;
828 
829 	/*
830 	 * Register callback to clean up threads when zone is destroyed.
831 	 */
832 	zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy);
833 
834 	CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper");
835 	for (;;) {
836 		mutex_enter(&reaplock);
837 		while (thread_deathrow == NULL && lwp_deathrow == NULL) {
838 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
839 			cv_wait(&reaper_cv, &reaplock);
840 			CALLB_CPR_SAFE_END(&cprinfo, &reaplock);
841 		}
842 		t = thread_deathrow;
843 		l = lwp_deathrow;
844 		thread_deathrow = NULL;
845 		lwp_deathrow = NULL;
846 		thread_reapcnt = 0;
847 		lwp_reapcnt = 0;
848 		mutex_exit(&reaplock);
849 
850 		/*
851 		 * Reap threads
852 		 */
853 		thread_reap_list(t);
854 
855 		/*
856 		 * Reap lwps
857 		 */
858 		thread_reap_list(l);
859 	}
860 }
861 
862 /*
863  * This is called by resume() to put a zombie thread onto deathrow.
864  * The thread's state is changed to TS_FREE to indicate that is reapable.
865  * This is called from the idle thread so it must not block (just spin).
866  */
867 void
868 reapq_add(kthread_t *t)
869 {
870 	mutex_enter(&reaplock);
871 
872 	/*
873 	 * lwp_deathrow contains only threads with lwp linkage
874 	 * that are of the default stacksize. Anything else goes
875 	 * on thread_deathrow.
876 	 */
877 	if (ttolwp(t) && (t->t_flag & T_DFLTSTK)) {
878 		t->t_forw = lwp_deathrow;
879 		lwp_deathrow = t;
880 		lwp_reapcnt++;
881 	} else {
882 		t->t_forw = thread_deathrow;
883 		thread_deathrow = t;
884 		thread_reapcnt++;
885 	}
886 	if (lwp_reapcnt + thread_reapcnt > reaplimit)
887 		cv_signal(&reaper_cv);	/* wake the reaper */
888 	t->t_state = TS_FREE;
889 	lock_clear(&t->t_lock);
890 	mutex_exit(&reaplock);
891 }
892 
893 /*
894  * Install thread context ops for the current thread.
895  */
896 void
897 installctx(
898 	kthread_t *t,
899 	void	*arg,
900 	void	(*save)(void *),
901 	void	(*restore)(void *),
902 	void	(*fork)(void *, void *),
903 	void	(*lwp_create)(void *, void *),
904 	void	(*exit)(void *),
905 	void	(*free)(void *, int))
906 {
907 	struct ctxop *ctx;
908 
909 	ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP);
910 	ctx->save_op = save;
911 	ctx->restore_op = restore;
912 	ctx->fork_op = fork;
913 	ctx->lwp_create_op = lwp_create;
914 	ctx->exit_op = exit;
915 	ctx->free_op = free;
916 	ctx->arg = arg;
917 	ctx->next = t->t_ctx;
918 	t->t_ctx = ctx;
919 }
920 
921 /*
922  * Remove the thread context ops from a thread.
923  */
924 int
925 removectx(
926 	kthread_t *t,
927 	void	*arg,
928 	void	(*save)(void *),
929 	void	(*restore)(void *),
930 	void	(*fork)(void *, void *),
931 	void	(*lwp_create)(void *, void *),
932 	void	(*exit)(void *),
933 	void	(*free)(void *, int))
934 {
935 	struct ctxop *ctx, *prev_ctx;
936 
937 	/*
938 	 * The incoming kthread_t (which is the thread for which the
939 	 * context ops will be removed) should be one of the following:
940 	 *
941 	 * a) the current thread,
942 	 *
943 	 * b) a thread of a process that's being forked (SIDL),
944 	 *
945 	 * c) a thread that belongs to the same process as the current
946 	 *    thread and for which the current thread is the agent thread,
947 	 *
948 	 * d) a thread that is TS_STOPPED which is indicative of it
949 	 *    being (if curthread is not an agent) a thread being created
950 	 *    as part of an lwp creation.
951 	 */
952 	ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL ||
953 	    ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
954 
955 	/*
956 	 * Serialize modifications to t->t_ctx to prevent the agent thread
957 	 * and the target thread from racing with each other during lwp exit.
958 	 */
959 	mutex_enter(&t->t_ctx_lock);
960 	prev_ctx = NULL;
961 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) {
962 		if (ctx->save_op == save && ctx->restore_op == restore &&
963 		    ctx->fork_op == fork && ctx->lwp_create_op == lwp_create &&
964 		    ctx->exit_op == exit && ctx->free_op == free &&
965 		    ctx->arg == arg) {
966 			if (prev_ctx)
967 				prev_ctx->next = ctx->next;
968 			else
969 				t->t_ctx = ctx->next;
970 			mutex_exit(&t->t_ctx_lock);
971 			if (ctx->free_op != NULL)
972 				(ctx->free_op)(ctx->arg, 0);
973 			kmem_free(ctx, sizeof (struct ctxop));
974 			return (1);
975 		}
976 		prev_ctx = ctx;
977 	}
978 	mutex_exit(&t->t_ctx_lock);
979 
980 	return (0);
981 }
982 
983 void
984 savectx(kthread_t *t)
985 {
986 	struct ctxop *ctx;
987 
988 	ASSERT(t == curthread);
989 	for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
990 		if (ctx->save_op != NULL)
991 			(ctx->save_op)(ctx->arg);
992 }
993 
994 void
995 restorectx(kthread_t *t)
996 {
997 	struct ctxop *ctx;
998 
999 	ASSERT(t == curthread);
1000 	for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
1001 		if (ctx->restore_op != NULL)
1002 			(ctx->restore_op)(ctx->arg);
1003 }
1004 
1005 void
1006 forkctx(kthread_t *t, kthread_t *ct)
1007 {
1008 	struct ctxop *ctx;
1009 
1010 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1011 		if (ctx->fork_op != NULL)
1012 			(ctx->fork_op)(t, ct);
1013 }
1014 
1015 /*
1016  * Note that this operator is only invoked via the _lwp_create
1017  * system call.  The system may have other reasons to create lwps
1018  * e.g. the agent lwp or the doors unreferenced lwp.
1019  */
1020 void
1021 lwp_createctx(kthread_t *t, kthread_t *ct)
1022 {
1023 	struct ctxop *ctx;
1024 
1025 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1026 		if (ctx->lwp_create_op != NULL)
1027 			(ctx->lwp_create_op)(t, ct);
1028 }
1029 
1030 /*
1031  * exitctx is called from thread_exit() and lwp_exit() to perform any actions
1032  * needed when the thread/LWP leaves the processor for the last time. This
1033  * routine is not intended to deal with freeing memory; freectx() is used for
1034  * that purpose during thread_free(). This routine is provided to allow for
1035  * clean-up that can't wait until thread_free().
1036  */
1037 void
1038 exitctx(kthread_t *t)
1039 {
1040 	struct ctxop *ctx;
1041 
1042 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1043 		if (ctx->exit_op != NULL)
1044 			(ctx->exit_op)(t);
1045 }
1046 
1047 /*
1048  * freectx is called from thread_free() and exec() to get
1049  * rid of old thread context ops.
1050  */
1051 void
1052 freectx(kthread_t *t, int isexec)
1053 {
1054 	struct ctxop *ctx;
1055 
1056 	while ((ctx = t->t_ctx) != NULL) {
1057 		t->t_ctx = ctx->next;
1058 		if (ctx->free_op != NULL)
1059 			(ctx->free_op)(ctx->arg, isexec);
1060 		kmem_free(ctx, sizeof (struct ctxop));
1061 	}
1062 }
1063 
1064 /*
1065  * Set the thread running; arrange for it to be swapped in if necessary.
1066  */
1067 void
1068 setrun_locked(kthread_t *t)
1069 {
1070 	ASSERT(THREAD_LOCK_HELD(t));
1071 	if (t->t_state == TS_SLEEP) {
1072 		/*
1073 		 * Take off sleep queue.
1074 		 */
1075 		SOBJ_UNSLEEP(t->t_sobj_ops, t);
1076 	} else if (t->t_state & (TS_RUN | TS_ONPROC)) {
1077 		/*
1078 		 * Already on dispatcher queue.
1079 		 */
1080 		return;
1081 	} else if (t->t_state == TS_WAIT) {
1082 		waitq_setrun(t);
1083 	} else if (t->t_state == TS_STOPPED) {
1084 		/*
1085 		 * All of the sending of SIGCONT (TC_XSTART) and /proc
1086 		 * (TC_PSTART) and lwp_continue() (TC_CSTART) must have
1087 		 * requested that the thread be run.
1088 		 * Just calling setrun() is not sufficient to set a stopped
1089 		 * thread running.  TP_TXSTART is always set if the thread
1090 		 * is not stopped by a jobcontrol stop signal.
1091 		 * TP_TPSTART is always set if /proc is not controlling it.
1092 		 * TP_TCSTART is always set if lwp_suspend() didn't stop it.
1093 		 * The thread won't be stopped unless one of these
1094 		 * three mechanisms did it.
1095 		 *
1096 		 * These flags must be set before calling setrun_locked(t).
1097 		 * They can't be passed as arguments because the streams
1098 		 * code calls setrun() indirectly and the mechanism for
1099 		 * doing so admits only one argument.  Note that the
1100 		 * thread must be locked in order to change t_schedflags.
1101 		 */
1102 		if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART)
1103 			return;
1104 		/*
1105 		 * Process is no longer stopped (a thread is running).
1106 		 */
1107 		t->t_whystop = 0;
1108 		t->t_whatstop = 0;
1109 		/*
1110 		 * Strictly speaking, we do not have to clear these
1111 		 * flags here; they are cleared on entry to stop().
1112 		 * However, they are confusing when doing kernel
1113 		 * debugging or when they are revealed by ps(1).
1114 		 */
1115 		t->t_schedflag &= ~TS_ALLSTART;
1116 		THREAD_TRANSITION(t);	/* drop stopped-thread lock */
1117 		ASSERT(t->t_lockp == &transition_lock);
1118 		ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
1119 		/*
1120 		 * Let the class put the process on the dispatcher queue.
1121 		 */
1122 		CL_SETRUN(t);
1123 	}
1124 }
1125 
1126 void
1127 setrun(kthread_t *t)
1128 {
1129 	thread_lock(t);
1130 	setrun_locked(t);
1131 	thread_unlock(t);
1132 }
1133 
1134 /*
1135  * Unpin an interrupted thread.
1136  *	When an interrupt occurs, the interrupt is handled on the stack
1137  *	of an interrupt thread, taken from a pool linked to the CPU structure.
1138  *
1139  *	When swtch() is switching away from an interrupt thread because it
1140  *	blocked or was preempted, this routine is called to complete the
1141  *	saving of the interrupted thread state, and returns the interrupted
1142  *	thread pointer so it may be resumed.
1143  *
1144  *	Called by swtch() only at high spl.
1145  */
1146 kthread_t *
1147 thread_unpin()
1148 {
1149 	kthread_t	*t = curthread;	/* current thread */
1150 	kthread_t	*itp;		/* interrupted thread */
1151 	int		i;		/* interrupt level */
1152 	extern int	intr_passivate();
1153 
1154 	ASSERT(t->t_intr != NULL);
1155 
1156 	itp = t->t_intr;		/* interrupted thread */
1157 	t->t_intr = NULL;		/* clear interrupt ptr */
1158 
1159 	/*
1160 	 * Get state from interrupt thread for the one
1161 	 * it interrupted.
1162 	 */
1163 
1164 	i = intr_passivate(t, itp);
1165 
1166 	TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1167 		"intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1168 		i, t, t, itp, itp);
1169 
1170 	/*
1171 	 * Dissociate the current thread from the interrupted thread's LWP.
1172 	 */
1173 	t->t_lwp = NULL;
1174 
1175 	/*
1176 	 * Interrupt handlers above the level that spinlocks block must
1177 	 * not block.
1178 	 */
1179 #if DEBUG
1180 	if (i < 0 || i > LOCK_LEVEL)
1181 		cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i);
1182 #endif
1183 
1184 	/*
1185 	 * Compute the CPU's base interrupt level based on the active
1186 	 * interrupts.
1187 	 */
1188 	ASSERT(CPU->cpu_intr_actv & (1 << i));
1189 	set_base_spl();
1190 
1191 	return (itp);
1192 }
1193 
1194 /*
1195  * Create and initialize an interrupt thread.
1196  *	Returns non-zero on error.
1197  *	Called at spl7() or better.
1198  */
1199 void
1200 thread_create_intr(struct cpu *cp)
1201 {
1202 	kthread_t *tp;
1203 
1204 	tp = thread_create(NULL, 0,
1205 	    (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0);
1206 
1207 	/*
1208 	 * Set the thread in the TS_FREE state.  The state will change
1209 	 * to TS_ONPROC only while the interrupt is active.  Think of these
1210 	 * as being on a private free list for the CPU.  Being TS_FREE keeps
1211 	 * inactive interrupt threads out of debugger thread lists.
1212 	 *
1213 	 * We cannot call thread_create with TS_FREE because of the current
1214 	 * checks there for ONPROC.  Fix this when thread_create takes flags.
1215 	 */
1216 	THREAD_FREEINTR(tp, cp);
1217 
1218 	/*
1219 	 * Nobody should ever reference the credentials of an interrupt
1220 	 * thread so make it NULL to catch any such references.
1221 	 */
1222 	tp->t_cred = NULL;
1223 	tp->t_flag |= T_INTR_THREAD;
1224 	tp->t_cpu = cp;
1225 	tp->t_bound_cpu = cp;
1226 	tp->t_disp_queue = cp->cpu_disp;
1227 	tp->t_affinitycnt = 1;
1228 	tp->t_preempt = 1;
1229 
1230 	/*
1231 	 * Don't make a user-requested binding on this thread so that
1232 	 * the processor can be offlined.
1233 	 */
1234 	tp->t_bind_cpu = PBIND_NONE;	/* no USER-requested binding */
1235 	tp->t_bind_pset = PS_NONE;
1236 
1237 #if defined(__i386) || defined(__amd64)
1238 	tp->t_stk -= STACK_ALIGN;
1239 	*(tp->t_stk) = 0;		/* terminate intr thread stack */
1240 #endif
1241 
1242 	/*
1243 	 * Link onto CPU's interrupt pool.
1244 	 */
1245 	tp->t_link = cp->cpu_intr_thread;
1246 	cp->cpu_intr_thread = tp;
1247 }
1248 
1249 /*
1250  * TSD -- THREAD SPECIFIC DATA
1251  */
1252 static kmutex_t		tsd_mutex;	 /* linked list spin lock */
1253 static uint_t		tsd_nkeys;	 /* size of destructor array */
1254 /* per-key destructor funcs */
1255 static void 		(**tsd_destructor)(void *);
1256 /* list of tsd_thread's */
1257 static struct tsd_thread	*tsd_list;
1258 
1259 /*
1260  * Default destructor
1261  *	Needed because NULL destructor means that the key is unused
1262  */
1263 /* ARGSUSED */
1264 void
1265 tsd_defaultdestructor(void *value)
1266 {}
1267 
1268 /*
1269  * Create a key (index into per thread array)
1270  *	Locks out tsd_create, tsd_destroy, and tsd_exit
1271  *	May allocate memory with lock held
1272  */
1273 void
1274 tsd_create(uint_t *keyp, void (*destructor)(void *))
1275 {
1276 	int	i;
1277 	uint_t	nkeys;
1278 
1279 	/*
1280 	 * if key is allocated, do nothing
1281 	 */
1282 	mutex_enter(&tsd_mutex);
1283 	if (*keyp) {
1284 		mutex_exit(&tsd_mutex);
1285 		return;
1286 	}
1287 	/*
1288 	 * find an unused key
1289 	 */
1290 	if (destructor == NULL)
1291 		destructor = tsd_defaultdestructor;
1292 
1293 	for (i = 0; i < tsd_nkeys; ++i)
1294 		if (tsd_destructor[i] == NULL)
1295 			break;
1296 
1297 	/*
1298 	 * if no unused keys, increase the size of the destructor array
1299 	 */
1300 	if (i == tsd_nkeys) {
1301 		if ((nkeys = (tsd_nkeys << 1)) == 0)
1302 			nkeys = 1;
1303 		tsd_destructor =
1304 		    (void (**)(void *))tsd_realloc((void *)tsd_destructor,
1305 		    (size_t)(tsd_nkeys * sizeof (void (*)(void *))),
1306 		    (size_t)(nkeys * sizeof (void (*)(void *))));
1307 		tsd_nkeys = nkeys;
1308 	}
1309 
1310 	/*
1311 	 * allocate the next available unused key
1312 	 */
1313 	tsd_destructor[i] = destructor;
1314 	*keyp = i + 1;
1315 	mutex_exit(&tsd_mutex);
1316 }
1317 
1318 /*
1319  * Destroy a key -- this is for unloadable modules
1320  *
1321  * Assumes that the caller is preventing tsd_set and tsd_get
1322  * Locks out tsd_create, tsd_destroy, and tsd_exit
1323  * May free memory with lock held
1324  */
1325 void
1326 tsd_destroy(uint_t *keyp)
1327 {
1328 	uint_t key;
1329 	struct tsd_thread *tsd;
1330 
1331 	/*
1332 	 * protect the key namespace and our destructor lists
1333 	 */
1334 	mutex_enter(&tsd_mutex);
1335 	key = *keyp;
1336 	*keyp = 0;
1337 
1338 	ASSERT(key <= tsd_nkeys);
1339 
1340 	/*
1341 	 * if the key is valid
1342 	 */
1343 	if (key != 0) {
1344 		uint_t k = key - 1;
1345 		/*
1346 		 * for every thread with TSD, call key's destructor
1347 		 */
1348 		for (tsd = tsd_list; tsd; tsd = tsd->ts_next) {
1349 			/*
1350 			 * no TSD for key in this thread
1351 			 */
1352 			if (key > tsd->ts_nkeys)
1353 				continue;
1354 			/*
1355 			 * call destructor for key
1356 			 */
1357 			if (tsd->ts_value[k] && tsd_destructor[k])
1358 				(*tsd_destructor[k])(tsd->ts_value[k]);
1359 			/*
1360 			 * reset value for key
1361 			 */
1362 			tsd->ts_value[k] = NULL;
1363 		}
1364 		/*
1365 		 * actually free the key (NULL destructor == unused)
1366 		 */
1367 		tsd_destructor[k] = NULL;
1368 	}
1369 
1370 	mutex_exit(&tsd_mutex);
1371 }
1372 
1373 /*
1374  * Quickly return the per thread value that was stored with the specified key
1375  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1376  */
1377 void *
1378 tsd_get(uint_t key)
1379 {
1380 	return (tsd_agent_get(curthread, key));
1381 }
1382 
1383 /*
1384  * Set a per thread value indexed with the specified key
1385  */
1386 int
1387 tsd_set(uint_t key, void *value)
1388 {
1389 	return (tsd_agent_set(curthread, key, value));
1390 }
1391 
1392 /*
1393  * Like tsd_get(), except that the agent lwp can get the tsd of
1394  * another thread in the same process (the agent thread only runs when the
1395  * process is completely stopped by /proc), or syslwp is creating a new lwp.
1396  */
1397 void *
1398 tsd_agent_get(kthread_t *t, uint_t key)
1399 {
1400 	struct tsd_thread *tsd = t->t_tsd;
1401 
1402 	ASSERT(t == curthread ||
1403 	    ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1404 
1405 	if (key && tsd != NULL && key <= tsd->ts_nkeys)
1406 		return (tsd->ts_value[key - 1]);
1407 	return (NULL);
1408 }
1409 
1410 /*
1411  * Like tsd_set(), except that the agent lwp can set the tsd of
1412  * another thread in the same process, or syslwp can set the tsd
1413  * of a thread it's in the middle of creating.
1414  *
1415  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1416  * May lock out tsd_destroy (and tsd_create), may allocate memory with
1417  * lock held
1418  */
1419 int
1420 tsd_agent_set(kthread_t *t, uint_t key, void *value)
1421 {
1422 	struct tsd_thread *tsd = t->t_tsd;
1423 
1424 	ASSERT(t == curthread ||
1425 	    ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1426 
1427 	if (key == 0)
1428 		return (EINVAL);
1429 	if (tsd == NULL)
1430 		tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1431 	if (key <= tsd->ts_nkeys) {
1432 		tsd->ts_value[key - 1] = value;
1433 		return (0);
1434 	}
1435 
1436 	ASSERT(key <= tsd_nkeys);
1437 
1438 	/*
1439 	 * lock out tsd_destroy()
1440 	 */
1441 	mutex_enter(&tsd_mutex);
1442 	if (tsd->ts_nkeys == 0) {
1443 		/*
1444 		 * Link onto list of threads with TSD
1445 		 */
1446 		if ((tsd->ts_next = tsd_list) != NULL)
1447 			tsd_list->ts_prev = tsd;
1448 		tsd_list = tsd;
1449 	}
1450 
1451 	/*
1452 	 * Allocate thread local storage and set the value for key
1453 	 */
1454 	tsd->ts_value = tsd_realloc(tsd->ts_value,
1455 	    tsd->ts_nkeys * sizeof (void *),
1456 	    key * sizeof (void *));
1457 	tsd->ts_nkeys = key;
1458 	tsd->ts_value[key - 1] = value;
1459 	mutex_exit(&tsd_mutex);
1460 
1461 	return (0);
1462 }
1463 
1464 
1465 /*
1466  * Return the per thread value that was stored with the specified key
1467  *	If necessary, create the key and the value
1468  *	Assumes the caller is protecting *keyp from tsd_destroy
1469  */
1470 void *
1471 tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void))
1472 {
1473 	void *value;
1474 	uint_t key = *keyp;
1475 	struct tsd_thread *tsd = curthread->t_tsd;
1476 
1477 	if (tsd == NULL)
1478 		tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1479 	if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1]))
1480 		return (value);
1481 	if (key == 0)
1482 		tsd_create(keyp, destroy);
1483 	(void) tsd_set(*keyp, value = (*allocate)());
1484 
1485 	return (value);
1486 }
1487 
1488 /*
1489  * Called from thread_exit() to run the destructor function for each tsd
1490  *	Locks out tsd_create and tsd_destroy
1491  *	Assumes that the destructor *DOES NOT* use tsd
1492  */
1493 void
1494 tsd_exit(void)
1495 {
1496 	int i;
1497 	struct tsd_thread *tsd = curthread->t_tsd;
1498 
1499 	if (tsd == NULL)
1500 		return;
1501 
1502 	if (tsd->ts_nkeys == 0) {
1503 		kmem_free(tsd, sizeof (*tsd));
1504 		curthread->t_tsd = NULL;
1505 		return;
1506 	}
1507 
1508 	/*
1509 	 * lock out tsd_create and tsd_destroy, call
1510 	 * the destructor, and mark the value as destroyed.
1511 	 */
1512 	mutex_enter(&tsd_mutex);
1513 
1514 	for (i = 0; i < tsd->ts_nkeys; i++) {
1515 		if (tsd->ts_value[i] && tsd_destructor[i])
1516 			(*tsd_destructor[i])(tsd->ts_value[i]);
1517 		tsd->ts_value[i] = NULL;
1518 	}
1519 
1520 	/*
1521 	 * remove from linked list of threads with TSD
1522 	 */
1523 	if (tsd->ts_next)
1524 		tsd->ts_next->ts_prev = tsd->ts_prev;
1525 	if (tsd->ts_prev)
1526 		tsd->ts_prev->ts_next = tsd->ts_next;
1527 	if (tsd_list == tsd)
1528 		tsd_list = tsd->ts_next;
1529 
1530 	mutex_exit(&tsd_mutex);
1531 
1532 	/*
1533 	 * free up the TSD
1534 	 */
1535 	kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *));
1536 	kmem_free(tsd, sizeof (struct tsd_thread));
1537 	curthread->t_tsd = NULL;
1538 }
1539 
1540 /*
1541  * realloc
1542  */
1543 static void *
1544 tsd_realloc(void *old, size_t osize, size_t nsize)
1545 {
1546 	void *new;
1547 
1548 	new = kmem_zalloc(nsize, KM_SLEEP);
1549 	if (old) {
1550 		bcopy(old, new, osize);
1551 		kmem_free(old, osize);
1552 	}
1553 	return (new);
1554 }
1555 
1556 /*
1557  * Check to see if an interrupt thread might be active at a given ipl.
1558  * If so return true.
1559  * We must be conservative--it is ok to give a false yes, but a false no
1560  * will cause disaster.  (But if the situation changes after we check it is
1561  * ok--the caller is trying to ensure that an interrupt routine has been
1562  * exited).
1563  * This is used when trying to remove an interrupt handler from an autovector
1564  * list in avintr.c.
1565  */
1566 int
1567 intr_active(struct cpu *cp, int level)
1568 {
1569 	if (level <= LOCK_LEVEL)
1570 		return (cp->cpu_thread != cp->cpu_dispthread);
1571 	else
1572 		return (CPU_ON_INTR(cp));
1573 }
1574 
1575 /*
1576  * Return non-zero if an interrupt is being serviced.
1577  */
1578 int
1579 servicing_interrupt()
1580 {
1581 	int onintr = 0;
1582 
1583 	/* Are we an interrupt thread */
1584 	if (curthread->t_flag & T_INTR_THREAD)
1585 		return (1);
1586 	/* Are we servicing a high level interrupt? */
1587 	if (CPU_ON_INTR(CPU)) {
1588 		kpreempt_disable();
1589 		onintr = CPU_ON_INTR(CPU);
1590 		kpreempt_enable();
1591 	}
1592 	return (onintr);
1593 }
1594 
1595 
1596 /*
1597  * Change the dispatch priority of a thread in the system.
1598  * Used when raising or lowering a thread's priority.
1599  * (E.g., priority inheritance)
1600  *
1601  * Since threads are queued according to their priority, we
1602  * we must check the thread's state to determine whether it
1603  * is on a queue somewhere. If it is, we've got to:
1604  *
1605  *	o Dequeue the thread.
1606  *	o Change its effective priority.
1607  *	o Enqueue the thread.
1608  *
1609  * Assumptions: The thread whose priority we wish to change
1610  * must be locked before we call thread_change_(e)pri().
1611  * The thread_change(e)pri() function doesn't drop the thread
1612  * lock--that must be done by its caller.
1613  */
1614 void
1615 thread_change_epri(kthread_t *t, pri_t disp_pri)
1616 {
1617 	uint_t	state;
1618 
1619 	ASSERT(THREAD_LOCK_HELD(t));
1620 
1621 	/*
1622 	 * If the inherited priority hasn't actually changed,
1623 	 * just return.
1624 	 */
1625 	if (t->t_epri == disp_pri)
1626 		return;
1627 
1628 	state = t->t_state;
1629 
1630 	/*
1631 	 * If it's not on a queue, change the priority with
1632 	 * impunity.
1633 	 */
1634 	if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) {
1635 		t->t_epri = disp_pri;
1636 
1637 		if (state == TS_ONPROC) {
1638 			cpu_t *cp = t->t_disp_queue->disp_cpu;
1639 
1640 			if (t == cp->cpu_dispthread)
1641 				cp->cpu_dispatch_pri = DISP_PRIO(t);
1642 		}
1643 		return;
1644 	}
1645 
1646 	/*
1647 	 * It's either on a sleep queue or a run queue.
1648 	 */
1649 	if (state == TS_SLEEP) {
1650 		/*
1651 		 * Take the thread out of its sleep queue.
1652 		 * Change the inherited priority.
1653 		 * Re-enqueue the thread.
1654 		 * Each synchronization object exports a function
1655 		 * to do this in an appropriate manner.
1656 		 */
1657 		SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri);
1658 	} else if (state == TS_WAIT) {
1659 		/*
1660 		 * Re-enqueue a thread on the wait queue if its
1661 		 * effective priority needs to change.
1662 		 */
1663 		if (disp_pri != t->t_epri)
1664 			waitq_change_pri(t, disp_pri);
1665 	} else {
1666 		/*
1667 		 * The thread is on a run queue.
1668 		 * Note: setbackdq() may not put the thread
1669 		 * back on the same run queue where it originally
1670 		 * resided.
1671 		 */
1672 		(void) dispdeq(t);
1673 		t->t_epri = disp_pri;
1674 		setbackdq(t);
1675 	}
1676 }	/* end of thread_change_epri */
1677 
1678 /*
1679  * Function: Change the t_pri field of a thread.
1680  * Side Effects: Adjust the thread ordering on a run queue
1681  *		 or sleep queue, if necessary.
1682  * Returns: 1 if the thread was on a run queue, else 0.
1683  */
1684 int
1685 thread_change_pri(kthread_t *t, pri_t disp_pri, int front)
1686 {
1687 	uint_t	state;
1688 	int	on_rq = 0;
1689 
1690 	ASSERT(THREAD_LOCK_HELD(t));
1691 
1692 	state = t->t_state;
1693 	THREAD_WILLCHANGE_PRI(t, disp_pri);
1694 
1695 	/*
1696 	 * If it's not on a queue, change the priority with
1697 	 * impunity.
1698 	 */
1699 	if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) {
1700 		t->t_pri = disp_pri;
1701 
1702 		if (state == TS_ONPROC) {
1703 			cpu_t *cp = t->t_disp_queue->disp_cpu;
1704 
1705 			if (t == cp->cpu_dispthread)
1706 				cp->cpu_dispatch_pri = DISP_PRIO(t);
1707 		}
1708 		return (0);
1709 	}
1710 
1711 	/*
1712 	 * It's either on a sleep queue or a run queue.
1713 	 */
1714 	if (state == TS_SLEEP) {
1715 		/*
1716 		 * If the priority has changed, take the thread out of
1717 		 * its sleep queue and change the priority.
1718 		 * Re-enqueue the thread.
1719 		 * Each synchronization object exports a function
1720 		 * to do this in an appropriate manner.
1721 		 */
1722 		if (disp_pri != t->t_pri)
1723 			SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri);
1724 	} else if (state == TS_WAIT) {
1725 		/*
1726 		 * Re-enqueue a thread on the wait queue if its
1727 		 * priority needs to change.
1728 		 */
1729 		if (disp_pri != t->t_pri)
1730 			waitq_change_pri(t, disp_pri);
1731 	} else {
1732 		/*
1733 		 * The thread is on a run queue.
1734 		 * Note: setbackdq() may not put the thread
1735 		 * back on the same run queue where it originally
1736 		 * resided.
1737 		 *
1738 		 * We still requeue the thread even if the priority
1739 		 * is unchanged to preserve round-robin (and other)
1740 		 * effects between threads of the same priority.
1741 		 */
1742 		on_rq = dispdeq(t);
1743 		ASSERT(on_rq);
1744 		t->t_pri = disp_pri;
1745 		if (front) {
1746 			setfrontdq(t);
1747 		} else {
1748 			setbackdq(t);
1749 		}
1750 	}
1751 	return (on_rq);
1752 }
1753