xref: /titanic_52/usr/src/uts/common/disp/thread.c (revision 88df2d76721d60b8b7cad14f9380446d06569f7c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/sysmacros.h>
31 #include <sys/signal.h>
32 #include <sys/stack.h>
33 #include <sys/pcb.h>
34 #include <sys/user.h>
35 #include <sys/systm.h>
36 #include <sys/sysinfo.h>
37 #include <sys/var.h>
38 #include <sys/errno.h>
39 #include <sys/cmn_err.h>
40 #include <sys/cred.h>
41 #include <sys/resource.h>
42 #include <sys/task.h>
43 #include <sys/project.h>
44 #include <sys/proc.h>
45 #include <sys/debug.h>
46 #include <sys/inline.h>
47 #include <sys/disp.h>
48 #include <sys/class.h>
49 #include <vm/seg_kmem.h>
50 #include <vm/seg_kp.h>
51 #include <sys/machlock.h>
52 #include <sys/kmem.h>
53 #include <sys/varargs.h>
54 #include <sys/turnstile.h>
55 #include <sys/poll.h>
56 #include <sys/vtrace.h>
57 #include <sys/callb.h>
58 #include <c2/audit.h>
59 #include <sys/tnf.h>
60 #include <sys/sobject.h>
61 #include <sys/cpupart.h>
62 #include <sys/pset.h>
63 #include <sys/door.h>
64 #include <sys/spl.h>
65 #include <sys/copyops.h>
66 #include <sys/rctl.h>
67 #include <sys/pool.h>
68 #include <sys/zone.h>
69 #include <sys/tsol/label.h>
70 #include <sys/tsol/tndb.h>
71 #include <sys/cpc_impl.h>
72 #include <sys/sdt.h>
73 #include <sys/reboot.h>
74 #include <sys/kdi.h>
75 
76 struct kmem_cache *thread_cache;	/* cache of free threads */
77 struct kmem_cache *lwp_cache;		/* cache of free lwps */
78 struct kmem_cache *turnstile_cache;	/* cache of free turnstiles */
79 
80 /*
81  * allthreads is only for use by kmem_readers.  All kernel loops can use
82  * the current thread as a start/end point.
83  */
84 static kthread_t *allthreads = &t0;	/* circular list of all threads */
85 
86 static kcondvar_t reaper_cv;		/* synchronization var */
87 kthread_t	*thread_deathrow;	/* circular list of reapable threads */
88 kthread_t	*lwp_deathrow;		/* circular list of reapable threads */
89 kmutex_t	reaplock;		/* protects lwp and thread deathrows */
90 kmutex_t	thread_free_lock;	/* protects clock from reaper */
91 int	thread_reapcnt = 0;		/* number of threads on deathrow */
92 int	lwp_reapcnt = 0;		/* number of lwps on deathrow */
93 int	reaplimit = 16;			/* delay reaping until reaplimit */
94 
95 extern int nthread;
96 
97 id_t	syscid;				/* system scheduling class ID */
98 void	*segkp_thread;			/* cookie for segkp pool */
99 
100 int lwp_cache_sz = 32;
101 int t_cache_sz = 8;
102 static kt_did_t next_t_id = 1;
103 
104 /*
105  * Min/Max stack sizes for stack size parameters
106  */
107 #define	MAX_STKSIZE	(32 * DEFAULTSTKSZ)
108 #define	MIN_STKSIZE	DEFAULTSTKSZ
109 
110 /*
111  * default_stksize overrides lwp_default_stksize if it is set.
112  */
113 int	default_stksize;
114 int	lwp_default_stksize;
115 
116 static zone_key_t zone_thread_key;
117 
118 /*
119  * forward declarations for internal thread specific data (tsd)
120  */
121 static void *tsd_realloc(void *, size_t, size_t);
122 
123 /*ARGSUSED*/
124 static int
125 turnstile_constructor(void *buf, void *cdrarg, int kmflags)
126 {
127 	bzero(buf, sizeof (turnstile_t));
128 	return (0);
129 }
130 
131 /*ARGSUSED*/
132 static void
133 turnstile_destructor(void *buf, void *cdrarg)
134 {
135 	turnstile_t *ts = buf;
136 
137 	ASSERT(ts->ts_free == NULL);
138 	ASSERT(ts->ts_waiters == 0);
139 	ASSERT(ts->ts_inheritor == NULL);
140 	ASSERT(ts->ts_sleepq[0].sq_first == NULL);
141 	ASSERT(ts->ts_sleepq[1].sq_first == NULL);
142 }
143 
144 void
145 thread_init(void)
146 {
147 	kthread_t *tp;
148 	extern char sys_name[];
149 	extern void idle();
150 	struct cpu *cpu = CPU;
151 
152 	mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL));
153 
154 #if defined(__i386) || defined(__amd64)
155 	thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
156 	    PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0);
157 
158 	/*
159 	 * "struct _klwp" includes a "struct pcb", which includes a
160 	 * "struct fpu", which needs to be 16-byte aligned on amd64
161 	 * (and even on i386 for fxsave/fxrstor).
162 	 */
163 	lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
164 	    16, NULL, NULL, NULL, NULL, NULL, 0);
165 #else
166 	/*
167 	 * Allocate thread structures from static_arena.  This prevents
168 	 * issues where a thread tries to relocate its own thread
169 	 * structure and touches it after the mapping has been suspended.
170 	 */
171 	thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
172 	    PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0);
173 
174 	lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
175 	    0, NULL, NULL, NULL, NULL, NULL, 0);
176 #endif
177 
178 	turnstile_cache = kmem_cache_create("turnstile_cache",
179 	    sizeof (turnstile_t), 0,
180 	    turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0);
181 
182 	label_init();
183 	cred_init();
184 
185 	rctl_init();
186 	project_init();
187 	zone_init();
188 	task_init();
189 	tcache_init();
190 	pool_init();
191 
192 	curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
193 
194 	/*
195 	 * Originally, we had two parameters to set default stack
196 	 * size: one for lwp's (lwp_default_stksize), and one for
197 	 * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz).
198 	 * Now we have a third parameter that overrides both if it is
199 	 * set to a legal stack size, called default_stksize.
200 	 */
201 
202 	if (default_stksize == 0) {
203 		default_stksize = DEFAULTSTKSZ;
204 	} else if (default_stksize % PAGESIZE != 0 ||
205 	    default_stksize > MAX_STKSIZE ||
206 	    default_stksize < MIN_STKSIZE) {
207 		cmn_err(CE_WARN, "Illegal stack size. Using %d",
208 		    (int)DEFAULTSTKSZ);
209 		default_stksize = DEFAULTSTKSZ;
210 	} else {
211 		lwp_default_stksize = default_stksize;
212 	}
213 
214 	if (lwp_default_stksize == 0) {
215 		lwp_default_stksize = default_stksize;
216 	} else if (lwp_default_stksize % PAGESIZE != 0 ||
217 	    lwp_default_stksize > MAX_STKSIZE ||
218 	    lwp_default_stksize < MIN_STKSIZE) {
219 		cmn_err(CE_WARN, "Illegal stack size. Using %d",
220 		    default_stksize);
221 		lwp_default_stksize = default_stksize;
222 	}
223 
224 	segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz,
225 	    lwp_default_stksize,
226 	    (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED));
227 
228 	segkp_thread = segkp_cache_init(segkp, t_cache_sz,
229 	    default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON);
230 
231 	(void) getcid(sys_name, &syscid);
232 	curthread->t_cid = syscid;	/* current thread is t0 */
233 
234 	/*
235 	 * Set up the first CPU's idle thread.
236 	 * It runs whenever the CPU has nothing worthwhile to do.
237 	 */
238 	tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1);
239 	cpu->cpu_idle_thread = tp;
240 	tp->t_preempt = 1;
241 	tp->t_disp_queue = cpu->cpu_disp;
242 	ASSERT(tp->t_disp_queue != NULL);
243 	tp->t_bound_cpu = cpu;
244 	tp->t_affinitycnt = 1;
245 
246 	/*
247 	 * Registering a thread in the callback table is usually
248 	 * done in the initialization code of the thread. In this
249 	 * case, we do it right after thread creation to avoid
250 	 * blocking idle thread while registering itself. It also
251 	 * avoids the possibility of reregistration in case a CPU
252 	 * restarts its idle thread.
253 	 */
254 	CALLB_CPR_INIT_SAFE(tp, "idle");
255 
256 	/*
257 	 * Finish initializing the kernel memory allocator now that
258 	 * thread_create() is available.
259 	 */
260 	kmem_thread_init();
261 
262 	if (boothowto & RB_DEBUG)
263 		kdi_dvec_thravail();
264 }
265 
266 /*
267  * Create a thread.
268  *
269  * thread_create() blocks for memory if necessary.  It never fails.
270  *
271  * If stk is NULL, the thread is created at the base of the stack
272  * and cannot be swapped.
273  */
274 kthread_t *
275 thread_create(
276 	caddr_t	stk,
277 	size_t	stksize,
278 	void	(*proc)(),
279 	void	*arg,
280 	size_t	len,
281 	proc_t	 *pp,
282 	int	state,
283 	pri_t	pri)
284 {
285 	kthread_t *t;
286 	extern struct classfuncs sys_classfuncs;
287 	turnstile_t *ts;
288 
289 	/*
290 	 * Every thread keeps a turnstile around in case it needs to block.
291 	 * The only reason the turnstile is not simply part of the thread
292 	 * structure is that we may have to break the association whenever
293 	 * more than one thread blocks on a given synchronization object.
294 	 * From a memory-management standpoint, turnstiles are like the
295 	 * "attached mblks" that hang off dblks in the streams allocator.
296 	 */
297 	ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
298 
299 	if (stk == NULL) {
300 		/*
301 		 * alloc both thread and stack in segkp chunk
302 		 */
303 
304 		if (stksize < default_stksize)
305 			stksize = default_stksize;
306 
307 		if (stksize == default_stksize) {
308 			stk = (caddr_t)segkp_cache_get(segkp_thread);
309 		} else {
310 			stksize = roundup(stksize, PAGESIZE);
311 			stk = (caddr_t)segkp_get(segkp, stksize,
312 			    (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED));
313 		}
314 
315 		ASSERT(stk != NULL);
316 
317 		/*
318 		 * The machine-dependent mutex code may require that
319 		 * thread pointers (since they may be used for mutex owner
320 		 * fields) have certain alignment requirements.
321 		 * PTR24_ALIGN is the size of the alignment quanta.
322 		 * XXX - assumes stack grows toward low addresses.
323 		 */
324 		if (stksize <= sizeof (kthread_t) + PTR24_ALIGN)
325 			cmn_err(CE_PANIC, "thread_create: proposed stack size"
326 			    " too small to hold thread.");
327 #ifdef STACK_GROWTH_DOWN
328 		stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1);
329 		stksize &= -PTR24_ALIGN;	/* make thread aligned */
330 		t = (kthread_t *)(stk + stksize);
331 		bzero(t, sizeof (kthread_t));
332 #ifdef	C2_AUDIT
333 		if (audit_active)
334 			audit_thread_create(t);
335 #endif
336 		t->t_stk = stk + stksize;
337 		t->t_stkbase = stk;
338 #else	/* stack grows to larger addresses */
339 		stksize -= SA(sizeof (kthread_t));
340 		t = (kthread_t *)(stk);
341 		bzero(t, sizeof (kthread_t));
342 		t->t_stk = stk + sizeof (kthread_t);
343 		t->t_stkbase = stk + stksize + sizeof (kthread_t);
344 #endif	/* STACK_GROWTH_DOWN */
345 		t->t_flag |= T_TALLOCSTK;
346 		t->t_swap = stk;
347 	} else {
348 		t = kmem_cache_alloc(thread_cache, KM_SLEEP);
349 		bzero(t, sizeof (kthread_t));
350 		ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0);
351 #ifdef	C2_AUDIT
352 		if (audit_active)
353 			audit_thread_create(t);
354 #endif
355 		/*
356 		 * Initialize t_stk to the kernel stack pointer to use
357 		 * upon entry to the kernel
358 		 */
359 #ifdef STACK_GROWTH_DOWN
360 		t->t_stk = stk + stksize;
361 		t->t_stkbase = stk;
362 #else
363 		t->t_stk = stk;			/* 3b2-like */
364 		t->t_stkbase = stk + stksize;
365 #endif /* STACK_GROWTH_DOWN */
366 	}
367 
368 	/* set default stack flag */
369 	if (stksize == lwp_default_stksize)
370 		t->t_flag |= T_DFLTSTK;
371 
372 	t->t_ts = ts;
373 
374 	/*
375 	 * p_cred could be NULL if it thread_create is called before cred_init
376 	 * is called in main.
377 	 */
378 	mutex_enter(&pp->p_crlock);
379 	if (pp->p_cred)
380 		crhold(t->t_cred = pp->p_cred);
381 	mutex_exit(&pp->p_crlock);
382 	t->t_start = gethrestime_sec();
383 	t->t_startpc = proc;
384 	t->t_procp = pp;
385 	t->t_clfuncs = &sys_classfuncs.thread;
386 	t->t_cid = syscid;
387 	t->t_pri = pri;
388 	t->t_stime = lbolt;
389 	t->t_schedflag = TS_LOAD | TS_DONT_SWAP;
390 	t->t_bind_cpu = PBIND_NONE;
391 	t->t_bind_pset = PS_NONE;
392 	t->t_plockp = &pp->p_lock;
393 	t->t_copyops = NULL;
394 	t->t_taskq = NULL;
395 	t->t_anttime = 0;
396 	t->t_hatdepth = 0;
397 
398 	t->t_dtrace_vtime = 1;	/* assure vtimestamp is always non-zero */
399 
400 	CPU_STATS_ADDQ(CPU, sys, nthreads, 1);
401 #ifndef NPROBE
402 	/* Kernel probe */
403 	tnf_thread_create(t);
404 #endif /* NPROBE */
405 	LOCK_INIT_CLEAR(&t->t_lock);
406 
407 	/*
408 	 * Callers who give us a NULL proc must do their own
409 	 * stack initialization.  e.g. lwp_create()
410 	 */
411 	if (proc != NULL) {
412 		t->t_stk = thread_stk_init(t->t_stk);
413 		thread_load(t, proc, arg, len);
414 	}
415 
416 	/*
417 	 * Put a hold on project0. If this thread is actually in a
418 	 * different project, then t_proj will be changed later in
419 	 * lwp_create().  All kernel-only threads must be in project 0.
420 	 */
421 	t->t_proj = project_hold(proj0p);
422 
423 	lgrp_affinity_init(&t->t_lgrp_affinity);
424 
425 	mutex_enter(&pidlock);
426 	nthread++;
427 	t->t_did = next_t_id++;
428 	t->t_prev = curthread->t_prev;
429 	t->t_next = curthread;
430 
431 	/*
432 	 * Add the thread to the list of all threads, and initialize
433 	 * its t_cpu pointer.  We need to block preemption since
434 	 * cpu_offline walks the thread list looking for threads
435 	 * with t_cpu pointing to the CPU being offlined.  We want
436 	 * to make sure that the list is consistent and that if t_cpu
437 	 * is set, the thread is on the list.
438 	 */
439 	kpreempt_disable();
440 	curthread->t_prev->t_next = t;
441 	curthread->t_prev = t;
442 
443 	/*
444 	 * Threads should never have a NULL t_cpu pointer so assign it
445 	 * here.  If the thread is being created with state TS_RUN a
446 	 * better CPU may be chosen when it is placed on the run queue.
447 	 *
448 	 * We need to keep kernel preemption disabled when setting all
449 	 * three fields to keep them in sync.  Also, always create in
450 	 * the default partition since that's where kernel threads go
451 	 * (if this isn't a kernel thread, t_cpupart will be changed
452 	 * in lwp_create before setting the thread runnable).
453 	 */
454 	t->t_cpupart = &cp_default;
455 
456 	/*
457 	 * For now, affiliate this thread with the root lgroup.
458 	 * Since the kernel does not (presently) allocate its memory
459 	 * in a locality aware fashion, the root is an appropriate home.
460 	 * If this thread is later associated with an lwp, it will have
461 	 * it's lgroup re-assigned at that time.
462 	 */
463 	lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
464 
465 	/*
466 	 * Inherit the current cpu.  If this cpu isn't part of the chosen
467 	 * lgroup, a new cpu will be chosen by cpu_choose when the thread
468 	 * is ready to run.
469 	 */
470 	if (CPU->cpu_part == &cp_default)
471 		t->t_cpu = CPU;
472 	else
473 		t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl,
474 		    t->t_pri, NULL);
475 
476 	t->t_disp_queue = t->t_cpu->cpu_disp;
477 	kpreempt_enable();
478 
479 	/*
480 	 * Initialize thread state and the dispatcher lock pointer.
481 	 * Need to hold onto pidlock to block allthreads walkers until
482 	 * the state is set.
483 	 */
484 	switch (state) {
485 	case TS_RUN:
486 		curthread->t_oldspl = splhigh();	/* get dispatcher spl */
487 		THREAD_SET_STATE(t, TS_STOPPED, &transition_lock);
488 		CL_SETRUN(t);
489 		thread_unlock(t);
490 		break;
491 
492 	case TS_ONPROC:
493 		THREAD_ONPROC(t, t->t_cpu);
494 		break;
495 
496 	case TS_FREE:
497 		/*
498 		 * Free state will be used for intr threads.
499 		 * The interrupt routine must set the thread dispatcher
500 		 * lock pointer (t_lockp) if starting on a CPU
501 		 * other than the current one.
502 		 */
503 		THREAD_FREEINTR(t, CPU);
504 		break;
505 
506 	case TS_STOPPED:
507 		THREAD_SET_STATE(t, TS_STOPPED, &stop_lock);
508 		break;
509 
510 	default:			/* TS_SLEEP, TS_ZOMB or TS_TRANS */
511 		cmn_err(CE_PANIC, "thread_create: invalid state %d", state);
512 	}
513 	mutex_exit(&pidlock);
514 	return (t);
515 }
516 
517 /*
518  * Move thread to project0 and take care of project reference counters.
519  */
520 void
521 thread_rele(kthread_t *t)
522 {
523 	kproject_t *kpj;
524 
525 	thread_lock(t);
526 
527 	ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0);
528 	kpj = ttoproj(t);
529 	t->t_proj = proj0p;
530 
531 	thread_unlock(t);
532 
533 	if (kpj != proj0p) {
534 		project_rele(kpj);
535 		(void) project_hold(proj0p);
536 	}
537 }
538 
539 
540 void	(*ip_cleanup_func)(void);
541 
542 void
543 thread_exit()
544 {
545 	kthread_t *t = curthread;
546 
547 	if ((t->t_proc_flag & TP_ZTHREAD) != 0)
548 		cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called");
549 
550 	if (ip_cleanup_func != NULL)
551 		(*ip_cleanup_func)();
552 
553 	tsd_exit();		/* Clean up this thread's TSD */
554 
555 	kcpc_passivate();	/* clean up performance counter state */
556 
557 	/*
558 	 * No kernel thread should have called poll() without arranging
559 	 * calling pollcleanup() here.
560 	 */
561 	ASSERT(t->t_pollstate == NULL);
562 	ASSERT(t->t_schedctl == NULL);
563 	if (t->t_door)
564 		door_slam();	/* in case thread did an upcall */
565 
566 #ifndef NPROBE
567 	/* Kernel probe */
568 	if (t->t_tnf_tpdp)
569 		tnf_thread_exit();
570 #endif /* NPROBE */
571 
572 	thread_rele(t);
573 	t->t_preempt++;
574 
575 	/*
576 	 * remove thread from the all threads list so that
577 	 * death-row can use the same pointers.
578 	 */
579 	mutex_enter(&pidlock);
580 	t->t_next->t_prev = t->t_prev;
581 	t->t_prev->t_next = t->t_next;
582 	ASSERT(allthreads != t);	/* t0 never exits */
583 	cv_broadcast(&t->t_joincv);	/* wake up anyone in thread_join */
584 	mutex_exit(&pidlock);
585 
586 	if (t->t_ctx != NULL)
587 		exitctx(t);
588 	if (t->t_procp->p_pctx != NULL)
589 		exitpctx(t->t_procp);
590 
591 	t->t_state = TS_ZOMB;	/* set zombie thread */
592 
593 	swtch_from_zombie();	/* give up the CPU */
594 	/* NOTREACHED */
595 }
596 
597 /*
598  * Check to see if the specified thread is active (defined as being on
599  * the thread list).  This is certainly a slow way to do this; if there's
600  * ever a reason to speed it up, we could maintain a hash table of active
601  * threads indexed by their t_did.
602  */
603 static kthread_t *
604 did_to_thread(kt_did_t tid)
605 {
606 	kthread_t *t;
607 
608 	ASSERT(MUTEX_HELD(&pidlock));
609 	for (t = curthread->t_next; t != curthread; t = t->t_next) {
610 		if (t->t_did == tid)
611 			break;
612 	}
613 	if (t->t_did == tid)
614 		return (t);
615 	else
616 		return (NULL);
617 }
618 
619 /*
620  * Wait for specified thread to exit.  Returns immediately if the thread
621  * could not be found, meaning that it has either already exited or never
622  * existed.
623  */
624 void
625 thread_join(kt_did_t tid)
626 {
627 	kthread_t *t;
628 
629 	ASSERT(tid != curthread->t_did);
630 	ASSERT(tid != t0.t_did);
631 
632 	mutex_enter(&pidlock);
633 	/*
634 	 * Make sure we check that the thread is on the thread list
635 	 * before blocking on it; otherwise we could end up blocking on
636 	 * a cv that's already been freed.  In other words, don't cache
637 	 * the thread pointer across calls to cv_wait.
638 	 *
639 	 * The choice of loop invariant means that whenever a thread
640 	 * is taken off the allthreads list, a cv_broadcast must be
641 	 * performed on that thread's t_joincv to wake up any waiters.
642 	 * The broadcast doesn't have to happen right away, but it
643 	 * shouldn't be postponed indefinitely (e.g., by doing it in
644 	 * thread_free which may only be executed when the deathrow
645 	 * queue is processed.
646 	 */
647 	while (t = did_to_thread(tid))
648 		cv_wait(&t->t_joincv, &pidlock);
649 	mutex_exit(&pidlock);
650 }
651 
652 void
653 thread_free(kthread_t *t)
654 {
655 	ASSERT(t != &t0 && t->t_state == TS_FREE);
656 	ASSERT(t->t_door == NULL);
657 	ASSERT(t->t_schedctl == NULL);
658 	ASSERT(t->t_pollstate == NULL);
659 
660 	t->t_pri = 0;
661 	t->t_pc = 0;
662 	t->t_sp = 0;
663 	t->t_wchan0 = NULL;
664 	t->t_wchan = NULL;
665 	if (t->t_cred != NULL) {
666 		crfree(t->t_cred);
667 		t->t_cred = 0;
668 	}
669 	if (t->t_pdmsg) {
670 		kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1);
671 		t->t_pdmsg = NULL;
672 	}
673 #ifdef	C2_AUDIT
674 	if (audit_active)
675 		audit_thread_free(t);
676 #endif
677 #ifndef NPROBE
678 	if (t->t_tnf_tpdp)
679 		tnf_thread_free(t);
680 #endif /* NPROBE */
681 	if (t->t_cldata) {
682 		CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata);
683 	}
684 	if (t->t_rprof != NULL) {
685 		kmem_free(t->t_rprof, sizeof (*t->t_rprof));
686 		t->t_rprof = NULL;
687 	}
688 	t->t_lockp = NULL;	/* nothing should try to lock this thread now */
689 	if (t->t_lwp)
690 		lwp_freeregs(t->t_lwp, 0);
691 	if (t->t_ctx)
692 		freectx(t, 0);
693 	if (t->t_procp->p_pctx)
694 		freepctx(t->t_procp, 0);
695 	t->t_stk = NULL;
696 	if (t->t_lwp)
697 		lwp_stk_fini(t->t_lwp);
698 	lock_clear(&t->t_lock);
699 
700 	if (t->t_ts->ts_waiters > 0)
701 		panic("thread_free: turnstile still active");
702 
703 	kmem_cache_free(turnstile_cache, t->t_ts);
704 
705 	free_afd(&t->t_activefd);
706 
707 	/*
708 	 * Barrier for clock thread.  The clock holds this lock to
709 	 * keep the thread from going away while it's looking at it.
710 	 */
711 	mutex_enter(&thread_free_lock);
712 	mutex_exit(&thread_free_lock);
713 
714 	ASSERT(ttoproj(t) == proj0p);
715 	project_rele(ttoproj(t));
716 
717 	lgrp_affinity_free(&t->t_lgrp_affinity);
718 
719 	/*
720 	 * Free thread struct and its stack.
721 	 */
722 	if (t->t_flag & T_TALLOCSTK) {
723 		/* thread struct is embedded in stack */
724 		segkp_release(segkp, t->t_swap);
725 		mutex_enter(&pidlock);
726 		nthread--;
727 		mutex_exit(&pidlock);
728 	} else {
729 		if (t->t_swap) {
730 			segkp_release(segkp, t->t_swap);
731 			t->t_swap = NULL;
732 		}
733 		if (t->t_lwp) {
734 			kmem_cache_free(lwp_cache, t->t_lwp);
735 			t->t_lwp = NULL;
736 		}
737 		mutex_enter(&pidlock);
738 		nthread--;
739 		mutex_exit(&pidlock);
740 		kmem_cache_free(thread_cache, t);
741 	}
742 }
743 
744 /*
745  * Removes threads associated with the given zone from a deathrow queue.
746  * tp is a pointer to the head of the deathrow queue, and countp is a
747  * pointer to the current deathrow count.  Returns a linked list of
748  * threads removed from the list.
749  */
750 static kthread_t *
751 thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid)
752 {
753 	kthread_t *tmp, *list = NULL;
754 	cred_t *cr;
755 
756 	ASSERT(MUTEX_HELD(&reaplock));
757 	while (*tp != NULL) {
758 		if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) {
759 			tmp = *tp;
760 			*tp = tmp->t_forw;
761 			tmp->t_forw = list;
762 			list = tmp;
763 			(*countp)--;
764 		} else {
765 			tp = &(*tp)->t_forw;
766 		}
767 	}
768 	return (list);
769 }
770 
771 static void
772 thread_reap_list(kthread_t *t)
773 {
774 	kthread_t *next;
775 
776 	while (t != NULL) {
777 		next = t->t_forw;
778 		thread_free(t);
779 		t = next;
780 	}
781 }
782 
783 /* ARGSUSED */
784 static void
785 thread_zone_destroy(zoneid_t zoneid, void *unused)
786 {
787 	kthread_t *t, *l;
788 
789 	mutex_enter(&reaplock);
790 	/*
791 	 * Pull threads and lwps associated with zone off deathrow lists.
792 	 */
793 	t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid);
794 	l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid);
795 	mutex_exit(&reaplock);
796 
797 	/*
798 	 * Reap threads
799 	 */
800 	thread_reap_list(t);
801 
802 	/*
803 	 * Reap lwps
804 	 */
805 	thread_reap_list(l);
806 }
807 
808 /*
809  * cleanup zombie threads that are on deathrow.
810  */
811 void
812 thread_reaper()
813 {
814 	kthread_t *t, *l;
815 	callb_cpr_t cprinfo;
816 
817 	/*
818 	 * Register callback to clean up threads when zone is destroyed.
819 	 */
820 	zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy);
821 
822 	CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper");
823 	for (;;) {
824 		mutex_enter(&reaplock);
825 		while (thread_deathrow == NULL && lwp_deathrow == NULL) {
826 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
827 			cv_wait(&reaper_cv, &reaplock);
828 			CALLB_CPR_SAFE_END(&cprinfo, &reaplock);
829 		}
830 		t = thread_deathrow;
831 		l = lwp_deathrow;
832 		thread_deathrow = NULL;
833 		lwp_deathrow = NULL;
834 		thread_reapcnt = 0;
835 		lwp_reapcnt = 0;
836 		mutex_exit(&reaplock);
837 
838 		/*
839 		 * Reap threads
840 		 */
841 		thread_reap_list(t);
842 
843 		/*
844 		 * Reap lwps
845 		 */
846 		thread_reap_list(l);
847 	}
848 }
849 
850 /*
851  * This is called by resume() to put a zombie thread onto deathrow.
852  * The thread's state is changed to TS_FREE to indicate that is reapable.
853  * This is called from the idle thread so it must not block (just spin).
854  */
855 void
856 reapq_add(kthread_t *t)
857 {
858 	mutex_enter(&reaplock);
859 
860 	/*
861 	 * lwp_deathrow contains only threads with lwp linkage
862 	 * that are of the default stacksize. Anything else goes
863 	 * on thread_deathrow.
864 	 */
865 	if (ttolwp(t) && (t->t_flag & T_DFLTSTK)) {
866 		t->t_forw = lwp_deathrow;
867 		lwp_deathrow = t;
868 		lwp_reapcnt++;
869 	} else {
870 		t->t_forw = thread_deathrow;
871 		thread_deathrow = t;
872 		thread_reapcnt++;
873 	}
874 	if (lwp_reapcnt + thread_reapcnt > reaplimit)
875 		cv_signal(&reaper_cv);	/* wake the reaper */
876 	t->t_state = TS_FREE;
877 	lock_clear(&t->t_lock);
878 	mutex_exit(&reaplock);
879 }
880 
881 /*
882  * Install thread context ops for the current thread.
883  */
884 void
885 installctx(
886 	kthread_t *t,
887 	void	*arg,
888 	void	(*save)(void *),
889 	void	(*restore)(void *),
890 	void	(*fork)(void *, void *),
891 	void	(*lwp_create)(void *, void *),
892 	void	(*exit)(void *),
893 	void	(*free)(void *, int))
894 {
895 	struct ctxop *ctx;
896 
897 	ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP);
898 	ctx->save_op = save;
899 	ctx->restore_op = restore;
900 	ctx->fork_op = fork;
901 	ctx->lwp_create_op = lwp_create;
902 	ctx->exit_op = exit;
903 	ctx->free_op = free;
904 	ctx->arg = arg;
905 	ctx->next = t->t_ctx;
906 	t->t_ctx = ctx;
907 }
908 
909 /*
910  * Remove thread context ops from the current thread.
911  * (Or allow the agent thread to remove thread context ops from another
912  * thread in the same, stopped, process)
913  */
914 int
915 removectx(
916 	kthread_t *t,
917 	void	*arg,
918 	void	(*save)(void *),
919 	void	(*restore)(void *),
920 	void	(*fork)(void *, void *),
921 	void	(*lwp_create)(void *, void *),
922 	void	(*exit)(void *),
923 	void	(*free)(void *, int))
924 {
925 	struct ctxop *ctx, *prev_ctx;
926 
927 	ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL ||
928 	    ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
929 
930 	prev_ctx = NULL;
931 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) {
932 		if (ctx->save_op == save && ctx->restore_op == restore &&
933 		    ctx->fork_op == fork && ctx->lwp_create_op == lwp_create &&
934 		    ctx->exit_op == exit && ctx->free_op == free &&
935 		    ctx->arg == arg) {
936 			if (prev_ctx)
937 				prev_ctx->next = ctx->next;
938 			else
939 				t->t_ctx = ctx->next;
940 			if (ctx->free_op != NULL)
941 				(ctx->free_op)(ctx->arg, 0);
942 			kmem_free(ctx, sizeof (struct ctxop));
943 			return (1);
944 		}
945 		prev_ctx = ctx;
946 	}
947 	return (0);
948 }
949 
950 void
951 savectx(kthread_t *t)
952 {
953 	struct ctxop *ctx;
954 
955 	ASSERT(t == curthread);
956 	for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
957 		if (ctx->save_op != NULL)
958 			(ctx->save_op)(ctx->arg);
959 }
960 
961 void
962 restorectx(kthread_t *t)
963 {
964 	struct ctxop *ctx;
965 
966 	ASSERT(t == curthread);
967 	for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
968 		if (ctx->restore_op != NULL)
969 			(ctx->restore_op)(ctx->arg);
970 }
971 
972 void
973 forkctx(kthread_t *t, kthread_t *ct)
974 {
975 	struct ctxop *ctx;
976 
977 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
978 		if (ctx->fork_op != NULL)
979 			(ctx->fork_op)(t, ct);
980 }
981 
982 /*
983  * Note that this operator is only invoked via the _lwp_create
984  * system call.  The system may have other reasons to create lwps
985  * e.g. the agent lwp or the doors unreferenced lwp.
986  */
987 void
988 lwp_createctx(kthread_t *t, kthread_t *ct)
989 {
990 	struct ctxop *ctx;
991 
992 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
993 		if (ctx->lwp_create_op != NULL)
994 			(ctx->lwp_create_op)(t, ct);
995 }
996 
997 /*
998  * exitctx is called from thread_exit() and lwp_exit() to perform any actions
999  * needed when the thread/LWP leaves the processor for the last time. This
1000  * routine is not intended to deal with freeing memory; freectx() is used for
1001  * that purpose during thread_free(). This routine is provided to allow for
1002  * clean-up that can't wait until thread_free().
1003  */
1004 void
1005 exitctx(kthread_t *t)
1006 {
1007 	struct ctxop *ctx;
1008 
1009 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1010 		if (ctx->exit_op != NULL)
1011 			(ctx->exit_op)(t);
1012 }
1013 
1014 /*
1015  * freectx is called from thread_free() and exec() to get
1016  * rid of old thread context ops.
1017  */
1018 void
1019 freectx(kthread_t *t, int isexec)
1020 {
1021 	struct ctxop *ctx;
1022 
1023 	while ((ctx = t->t_ctx) != NULL) {
1024 		t->t_ctx = ctx->next;
1025 		if (ctx->free_op != NULL)
1026 			(ctx->free_op)(ctx->arg, isexec);
1027 		kmem_free(ctx, sizeof (struct ctxop));
1028 	}
1029 }
1030 
1031 /*
1032  * Set the thread running; arrange for it to be swapped in if necessary.
1033  */
1034 void
1035 setrun_locked(kthread_t *t)
1036 {
1037 	ASSERT(THREAD_LOCK_HELD(t));
1038 	if (t->t_state == TS_SLEEP) {
1039 		/*
1040 		 * Take off sleep queue.
1041 		 */
1042 		SOBJ_UNSLEEP(t->t_sobj_ops, t);
1043 	} else if (t->t_state & (TS_RUN | TS_ONPROC)) {
1044 		/*
1045 		 * Already on dispatcher queue.
1046 		 */
1047 		return;
1048 	} else if (t->t_state == TS_STOPPED) {
1049 		/*
1050 		 * All of the sending of SIGCONT (TC_XSTART) and /proc
1051 		 * (TC_PSTART) and lwp_continue() (TC_CSTART) must have
1052 		 * requested that the thread be run.
1053 		 * Just calling setrun() is not sufficient to set a stopped
1054 		 * thread running.  TP_TXSTART is always set if the thread
1055 		 * is not stopped by a jobcontrol stop signal.
1056 		 * TP_TPSTART is always set if /proc is not controlling it.
1057 		 * TP_TCSTART is always set if lwp_suspend() didn't stop it.
1058 		 * The thread won't be stopped unless one of these
1059 		 * three mechanisms did it.
1060 		 *
1061 		 * These flags must be set before calling setrun_locked(t).
1062 		 * They can't be passed as arguments because the streams
1063 		 * code calls setrun() indirectly and the mechanism for
1064 		 * doing so admits only one argument.  Note that the
1065 		 * thread must be locked in order to change t_schedflags.
1066 		 */
1067 		if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART)
1068 			return;
1069 		/*
1070 		 * Process is no longer stopped (a thread is running).
1071 		 */
1072 		t->t_whystop = 0;
1073 		t->t_whatstop = 0;
1074 		/*
1075 		 * Strictly speaking, we do not have to clear these
1076 		 * flags here; they are cleared on entry to stop().
1077 		 * However, they are confusing when doing kernel
1078 		 * debugging or when they are revealed by ps(1).
1079 		 */
1080 		t->t_schedflag &= ~TS_ALLSTART;
1081 		THREAD_TRANSITION(t);	/* drop stopped-thread lock */
1082 		ASSERT(t->t_lockp == &transition_lock);
1083 		ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
1084 		/*
1085 		 * Let the class put the process on the dispatcher queue.
1086 		 */
1087 		CL_SETRUN(t);
1088 	}
1089 
1090 
1091 }
1092 
1093 void
1094 setrun(kthread_t *t)
1095 {
1096 	thread_lock(t);
1097 	setrun_locked(t);
1098 	thread_unlock(t);
1099 }
1100 
1101 /*
1102  * Unpin an interrupted thread.
1103  *	When an interrupt occurs, the interrupt is handled on the stack
1104  *	of an interrupt thread, taken from a pool linked to the CPU structure.
1105  *
1106  *	When swtch() is switching away from an interrupt thread because it
1107  *	blocked or was preempted, this routine is called to complete the
1108  *	saving of the interrupted thread state, and returns the interrupted
1109  *	thread pointer so it may be resumed.
1110  *
1111  *	Called by swtch() only at high spl.
1112  */
1113 kthread_t *
1114 thread_unpin()
1115 {
1116 	kthread_t	*t = curthread;	/* current thread */
1117 	kthread_t	*itp;		/* interrupted thread */
1118 	int		i;		/* interrupt level */
1119 	extern int	intr_passivate();
1120 
1121 	ASSERT(t->t_intr != NULL);
1122 
1123 	itp = t->t_intr;		/* interrupted thread */
1124 	t->t_intr = NULL;		/* clear interrupt ptr */
1125 
1126 	/*
1127 	 * Get state from interrupt thread for the one
1128 	 * it interrupted.
1129 	 */
1130 
1131 	i = intr_passivate(t, itp);
1132 
1133 	TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1134 		"intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1135 		i, t, t, itp, itp);
1136 
1137 	/*
1138 	 * Dissociate the current thread from the interrupted thread's LWP.
1139 	 */
1140 	t->t_lwp = NULL;
1141 
1142 	/*
1143 	 * Interrupt handlers above the level that spinlocks block must
1144 	 * not block.
1145 	 */
1146 #if DEBUG
1147 	if (i < 0 || i > LOCK_LEVEL)
1148 		cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i);
1149 #endif
1150 
1151 	/*
1152 	 * Compute the CPU's base interrupt level based on the active
1153 	 * interrupts.
1154 	 */
1155 	ASSERT(CPU->cpu_intr_actv & (1 << i));
1156 	set_base_spl();
1157 
1158 	return (itp);
1159 }
1160 
1161 /*
1162  * Create and initialize an interrupt thread.
1163  *	Returns non-zero on error.
1164  *	Called at spl7() or better.
1165  */
1166 void
1167 thread_create_intr(struct cpu *cp)
1168 {
1169 	kthread_t *tp;
1170 
1171 	tp = thread_create(NULL, 0,
1172 	    (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0);
1173 
1174 	/*
1175 	 * Set the thread in the TS_FREE state.  The state will change
1176 	 * to TS_ONPROC only while the interrupt is active.  Think of these
1177 	 * as being on a private free list for the CPU.  Being TS_FREE keeps
1178 	 * inactive interrupt threads out of debugger thread lists.
1179 	 *
1180 	 * We cannot call thread_create with TS_FREE because of the current
1181 	 * checks there for ONPROC.  Fix this when thread_create takes flags.
1182 	 */
1183 	THREAD_FREEINTR(tp, cp);
1184 
1185 	/*
1186 	 * Nobody should ever reference the credentials of an interrupt
1187 	 * thread so make it NULL to catch any such references.
1188 	 */
1189 	tp->t_cred = NULL;
1190 	tp->t_flag |= T_INTR_THREAD;
1191 	tp->t_cpu = cp;
1192 	tp->t_bound_cpu = cp;
1193 	tp->t_disp_queue = cp->cpu_disp;
1194 	tp->t_affinitycnt = 1;
1195 	tp->t_preempt = 1;
1196 
1197 	/*
1198 	 * Don't make a user-requested binding on this thread so that
1199 	 * the processor can be offlined.
1200 	 */
1201 	tp->t_bind_cpu = PBIND_NONE;	/* no USER-requested binding */
1202 	tp->t_bind_pset = PS_NONE;
1203 
1204 #if defined(__i386) || defined(__amd64)
1205 	tp->t_stk -= STACK_ALIGN;
1206 	*(tp->t_stk) = 0;		/* terminate intr thread stack */
1207 #endif
1208 
1209 	/*
1210 	 * Link onto CPU's interrupt pool.
1211 	 */
1212 	tp->t_link = cp->cpu_intr_thread;
1213 	cp->cpu_intr_thread = tp;
1214 }
1215 
1216 /*
1217  * TSD -- THREAD SPECIFIC DATA
1218  */
1219 static kmutex_t		tsd_mutex;	 /* linked list spin lock */
1220 static uint_t		tsd_nkeys;	 /* size of destructor array */
1221 /* per-key destructor funcs */
1222 static void 		(**tsd_destructor)(void *);
1223 /* list of tsd_thread's */
1224 static struct tsd_thread	*tsd_list;
1225 
1226 /*
1227  * Default destructor
1228  *	Needed because NULL destructor means that the key is unused
1229  */
1230 /* ARGSUSED */
1231 void
1232 tsd_defaultdestructor(void *value)
1233 {}
1234 
1235 /*
1236  * Create a key (index into per thread array)
1237  *	Locks out tsd_create, tsd_destroy, and tsd_exit
1238  *	May allocate memory with lock held
1239  */
1240 void
1241 tsd_create(uint_t *keyp, void (*destructor)(void *))
1242 {
1243 	int	i;
1244 	uint_t	nkeys;
1245 
1246 	/*
1247 	 * if key is allocated, do nothing
1248 	 */
1249 	mutex_enter(&tsd_mutex);
1250 	if (*keyp) {
1251 		mutex_exit(&tsd_mutex);
1252 		return;
1253 	}
1254 	/*
1255 	 * find an unused key
1256 	 */
1257 	if (destructor == NULL)
1258 		destructor = tsd_defaultdestructor;
1259 
1260 	for (i = 0; i < tsd_nkeys; ++i)
1261 		if (tsd_destructor[i] == NULL)
1262 			break;
1263 
1264 	/*
1265 	 * if no unused keys, increase the size of the destructor array
1266 	 */
1267 	if (i == tsd_nkeys) {
1268 		if ((nkeys = (tsd_nkeys << 1)) == 0)
1269 			nkeys = 1;
1270 		tsd_destructor =
1271 		    (void (**)(void *))tsd_realloc((void *)tsd_destructor,
1272 		    (size_t)(tsd_nkeys * sizeof (void (*)(void *))),
1273 		    (size_t)(nkeys * sizeof (void (*)(void *))));
1274 		tsd_nkeys = nkeys;
1275 	}
1276 
1277 	/*
1278 	 * allocate the next available unused key
1279 	 */
1280 	tsd_destructor[i] = destructor;
1281 	*keyp = i + 1;
1282 	mutex_exit(&tsd_mutex);
1283 }
1284 
1285 /*
1286  * Destroy a key -- this is for unloadable modules
1287  *
1288  * Assumes that the caller is preventing tsd_set and tsd_get
1289  * Locks out tsd_create, tsd_destroy, and tsd_exit
1290  * May free memory with lock held
1291  */
1292 void
1293 tsd_destroy(uint_t *keyp)
1294 {
1295 	uint_t key;
1296 	struct tsd_thread *tsd;
1297 
1298 	/*
1299 	 * protect the key namespace and our destructor lists
1300 	 */
1301 	mutex_enter(&tsd_mutex);
1302 	key = *keyp;
1303 	*keyp = 0;
1304 
1305 	ASSERT(key <= tsd_nkeys);
1306 
1307 	/*
1308 	 * if the key is valid
1309 	 */
1310 	if (key != 0) {
1311 		uint_t k = key - 1;
1312 		/*
1313 		 * for every thread with TSD, call key's destructor
1314 		 */
1315 		for (tsd = tsd_list; tsd; tsd = tsd->ts_next) {
1316 			/*
1317 			 * no TSD for key in this thread
1318 			 */
1319 			if (key > tsd->ts_nkeys)
1320 				continue;
1321 			/*
1322 			 * call destructor for key
1323 			 */
1324 			if (tsd->ts_value[k] && tsd_destructor[k])
1325 				(*tsd_destructor[k])(tsd->ts_value[k]);
1326 			/*
1327 			 * reset value for key
1328 			 */
1329 			tsd->ts_value[k] = NULL;
1330 		}
1331 		/*
1332 		 * actually free the key (NULL destructor == unused)
1333 		 */
1334 		tsd_destructor[k] = NULL;
1335 	}
1336 
1337 	mutex_exit(&tsd_mutex);
1338 }
1339 
1340 /*
1341  * Quickly return the per thread value that was stored with the specified key
1342  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1343  */
1344 void *
1345 tsd_get(uint_t key)
1346 {
1347 	return (tsd_agent_get(curthread, key));
1348 }
1349 
1350 /*
1351  * Set a per thread value indexed with the specified key
1352  */
1353 int
1354 tsd_set(uint_t key, void *value)
1355 {
1356 	return (tsd_agent_set(curthread, key, value));
1357 }
1358 
1359 /*
1360  * Like tsd_get(), except that the agent lwp can get the tsd of
1361  * another thread in the same process (the agent thread only runs when the
1362  * process is completely stopped by /proc), or syslwp is creating a new lwp.
1363  */
1364 void *
1365 tsd_agent_get(kthread_t *t, uint_t key)
1366 {
1367 	struct tsd_thread *tsd = t->t_tsd;
1368 
1369 	ASSERT(t == curthread ||
1370 	    ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1371 
1372 	if (key && tsd != NULL && key <= tsd->ts_nkeys)
1373 		return (tsd->ts_value[key - 1]);
1374 	return (NULL);
1375 }
1376 
1377 /*
1378  * Like tsd_set(), except that the agent lwp can set the tsd of
1379  * another thread in the same process, or syslwp can set the tsd
1380  * of a thread it's in the middle of creating.
1381  *
1382  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1383  * May lock out tsd_destroy (and tsd_create), may allocate memory with
1384  * lock held
1385  */
1386 int
1387 tsd_agent_set(kthread_t *t, uint_t key, void *value)
1388 {
1389 	struct tsd_thread *tsd = t->t_tsd;
1390 
1391 	ASSERT(t == curthread ||
1392 	    ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1393 
1394 	if (key == 0)
1395 		return (EINVAL);
1396 	if (tsd == NULL)
1397 		tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1398 	if (key <= tsd->ts_nkeys) {
1399 		tsd->ts_value[key - 1] = value;
1400 		return (0);
1401 	}
1402 
1403 	ASSERT(key <= tsd_nkeys);
1404 
1405 	/*
1406 	 * lock out tsd_destroy()
1407 	 */
1408 	mutex_enter(&tsd_mutex);
1409 	if (tsd->ts_nkeys == 0) {
1410 		/*
1411 		 * Link onto list of threads with TSD
1412 		 */
1413 		if ((tsd->ts_next = tsd_list) != NULL)
1414 			tsd_list->ts_prev = tsd;
1415 		tsd_list = tsd;
1416 	}
1417 
1418 	/*
1419 	 * Allocate thread local storage and set the value for key
1420 	 */
1421 	tsd->ts_value = tsd_realloc(tsd->ts_value,
1422 	    tsd->ts_nkeys * sizeof (void *),
1423 	    key * sizeof (void *));
1424 	tsd->ts_nkeys = key;
1425 	tsd->ts_value[key - 1] = value;
1426 	mutex_exit(&tsd_mutex);
1427 
1428 	return (0);
1429 }
1430 
1431 
1432 /*
1433  * Return the per thread value that was stored with the specified key
1434  *	If necessary, create the key and the value
1435  *	Assumes the caller is protecting *keyp from tsd_destroy
1436  */
1437 void *
1438 tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void))
1439 {
1440 	void *value;
1441 	uint_t key = *keyp;
1442 	struct tsd_thread *tsd = curthread->t_tsd;
1443 
1444 	if (tsd == NULL)
1445 		tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1446 	if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1]))
1447 		return (value);
1448 	if (key == 0)
1449 		tsd_create(keyp, destroy);
1450 	(void) tsd_set(*keyp, value = (*allocate)());
1451 
1452 	return (value);
1453 }
1454 
1455 /*
1456  * Called from thread_exit() to run the destructor function for each tsd
1457  *	Locks out tsd_create and tsd_destroy
1458  *	Assumes that the destructor *DOES NOT* use tsd
1459  */
1460 void
1461 tsd_exit(void)
1462 {
1463 	int i;
1464 	struct tsd_thread *tsd = curthread->t_tsd;
1465 
1466 	if (tsd == NULL)
1467 		return;
1468 
1469 	if (tsd->ts_nkeys == 0) {
1470 		kmem_free(tsd, sizeof (*tsd));
1471 		curthread->t_tsd = NULL;
1472 		return;
1473 	}
1474 
1475 	/*
1476 	 * lock out tsd_create and tsd_destroy, call
1477 	 * the destructor, and mark the value as destroyed.
1478 	 */
1479 	mutex_enter(&tsd_mutex);
1480 
1481 	for (i = 0; i < tsd->ts_nkeys; i++) {
1482 		if (tsd->ts_value[i] && tsd_destructor[i])
1483 			(*tsd_destructor[i])(tsd->ts_value[i]);
1484 		tsd->ts_value[i] = NULL;
1485 	}
1486 
1487 	/*
1488 	 * remove from linked list of threads with TSD
1489 	 */
1490 	if (tsd->ts_next)
1491 		tsd->ts_next->ts_prev = tsd->ts_prev;
1492 	if (tsd->ts_prev)
1493 		tsd->ts_prev->ts_next = tsd->ts_next;
1494 	if (tsd_list == tsd)
1495 		tsd_list = tsd->ts_next;
1496 
1497 	mutex_exit(&tsd_mutex);
1498 
1499 	/*
1500 	 * free up the TSD
1501 	 */
1502 	kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *));
1503 	kmem_free(tsd, sizeof (struct tsd_thread));
1504 	curthread->t_tsd = NULL;
1505 }
1506 
1507 /*
1508  * realloc
1509  */
1510 static void *
1511 tsd_realloc(void *old, size_t osize, size_t nsize)
1512 {
1513 	void *new;
1514 
1515 	new = kmem_zalloc(nsize, KM_SLEEP);
1516 	if (old) {
1517 		bcopy(old, new, osize);
1518 		kmem_free(old, osize);
1519 	}
1520 	return (new);
1521 }
1522 
1523 /*
1524  * Check to see if an interrupt thread might be active at a given ipl.
1525  * If so return true.
1526  * We must be conservative--it is ok to give a false yes, but a false no
1527  * will cause disaster.  (But if the situation changes after we check it is
1528  * ok--the caller is trying to ensure that an interrupt routine has been
1529  * exited).
1530  * This is used when trying to remove an interrupt handler from an autovector
1531  * list in avintr.c.
1532  */
1533 int
1534 intr_active(struct cpu *cp, int level)
1535 {
1536 	if (level <= LOCK_LEVEL)
1537 		return (cp->cpu_thread != cp->cpu_dispthread);
1538 	else
1539 		return (CPU_ON_INTR(cp));
1540 }
1541 
1542 /*
1543  * Return non-zero if an interrupt is being serviced.
1544  */
1545 int
1546 servicing_interrupt()
1547 {
1548 	int onintr = 0;
1549 
1550 	/* Are we an interrupt thread */
1551 	if (curthread->t_flag & T_INTR_THREAD)
1552 		return (1);
1553 	/* Are we servicing a high level interrupt? */
1554 	if (CPU_ON_INTR(CPU)) {
1555 		kpreempt_disable();
1556 		onintr = CPU_ON_INTR(CPU);
1557 		kpreempt_enable();
1558 	}
1559 	return (onintr);
1560 }
1561 
1562 
1563 /*
1564  * Change the dispatch priority of a thread in the system.
1565  * Used when raising or lowering a thread's priority.
1566  * (E.g., priority inheritance)
1567  *
1568  * Since threads are queued according to their priority, we
1569  * we must check the thread's state to determine whether it
1570  * is on a queue somewhere. If it is, we've got to:
1571  *
1572  *	o Dequeue the thread.
1573  *	o Change its effective priority.
1574  *	o Enqueue the thread.
1575  *
1576  * Assumptions: The thread whose priority we wish to change
1577  * must be locked before we call thread_change_(e)pri().
1578  * The thread_change(e)pri() function doesn't drop the thread
1579  * lock--that must be done by its caller.
1580  */
1581 void
1582 thread_change_epri(kthread_t *t, pri_t disp_pri)
1583 {
1584 	uint_t	state;
1585 
1586 	ASSERT(THREAD_LOCK_HELD(t));
1587 
1588 	/*
1589 	 * If the inherited priority hasn't actually changed,
1590 	 * just return.
1591 	 */
1592 	if (t->t_epri == disp_pri)
1593 		return;
1594 
1595 	state = t->t_state;
1596 
1597 	/*
1598 	 * If it's not on a queue, change the priority with
1599 	 * impunity.
1600 	 */
1601 	if ((state & (TS_SLEEP | TS_RUN)) == 0) {
1602 		t->t_epri = disp_pri;
1603 
1604 		if (state == TS_ONPROC) {
1605 			cpu_t *cp = t->t_disp_queue->disp_cpu;
1606 
1607 			if (t == cp->cpu_dispthread)
1608 				cp->cpu_dispatch_pri = DISP_PRIO(t);
1609 		}
1610 		return;
1611 	}
1612 
1613 	/*
1614 	 * It's either on a sleep queue or a run queue.
1615 	 */
1616 	if (state == TS_SLEEP) {
1617 
1618 		/*
1619 		 * Take the thread out of its sleep queue.
1620 		 * Change the inherited priority.
1621 		 * Re-enqueue the thread.
1622 		 * Each synchronization object exports a function
1623 		 * to do this in an appropriate manner.
1624 		 */
1625 		SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri);
1626 	} else {
1627 		/*
1628 		 * The thread is on a run queue.
1629 		 * Note: setbackdq() may not put the thread
1630 		 * back on the same run queue where it originally
1631 		 * resided.
1632 		 */
1633 		(void) dispdeq(t);
1634 		t->t_epri = disp_pri;
1635 		setbackdq(t);
1636 	}
1637 }	/* end of thread_change_epri */
1638 
1639 /*
1640  * Function: Change the t_pri field of a thread.
1641  * Side Effects: Adjust the thread ordering on a run queue
1642  *		 or sleep queue, if necessary.
1643  * Returns: 1 if the thread was on a run queue, else 0.
1644  */
1645 int
1646 thread_change_pri(kthread_t *t, pri_t disp_pri, int front)
1647 {
1648 	uint_t	state;
1649 	int	on_rq = 0;
1650 
1651 	ASSERT(THREAD_LOCK_HELD(t));
1652 
1653 	state = t->t_state;
1654 	THREAD_WILLCHANGE_PRI(t, disp_pri);
1655 
1656 	/*
1657 	 * If it's not on a queue, change the priority with
1658 	 * impunity.
1659 	 */
1660 	if ((state & (TS_SLEEP | TS_RUN)) == 0) {
1661 		t->t_pri = disp_pri;
1662 
1663 		if (state == TS_ONPROC) {
1664 			cpu_t *cp = t->t_disp_queue->disp_cpu;
1665 
1666 			if (t == cp->cpu_dispthread)
1667 				cp->cpu_dispatch_pri = DISP_PRIO(t);
1668 		}
1669 		return (0);
1670 	}
1671 
1672 	/*
1673 	 * It's either on a sleep queue or a run queue.
1674 	 */
1675 	if (state == TS_SLEEP) {
1676 		/*
1677 		 * If the priority has changed, take the thread out of
1678 		 * its sleep queue and change the priority.
1679 		 * Re-enqueue the thread.
1680 		 * Each synchronization object exports a function
1681 		 * to do this in an appropriate manner.
1682 		 */
1683 		if (disp_pri != t->t_pri)
1684 			SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri);
1685 	} else {
1686 		/*
1687 		 * The thread is on a run queue.
1688 		 * Note: setbackdq() may not put the thread
1689 		 * back on the same run queue where it originally
1690 		 * resided.
1691 		 *
1692 		 * We still requeue the thread even if the priority
1693 		 * is unchanged to preserve round-robin (and other)
1694 		 * effects between threads of the same priority.
1695 		 */
1696 		on_rq = dispdeq(t);
1697 		ASSERT(on_rq);
1698 		t->t_pri = disp_pri;
1699 		if (front) {
1700 			setfrontdq(t);
1701 		} else {
1702 			setbackdq(t);
1703 		}
1704 	}
1705 	return (on_rq);
1706 }
1707