xref: /illumos-gate/usr/src/uts/common/disp/thread.c (revision a07094369b21309434206d9b3601d162693466fc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/sysmacros.h>
32 #include <sys/signal.h>
33 #include <sys/stack.h>
34 #include <sys/pcb.h>
35 #include <sys/user.h>
36 #include <sys/systm.h>
37 #include <sys/sysinfo.h>
38 #include <sys/var.h>
39 #include <sys/errno.h>
40 #include <sys/cmn_err.h>
41 #include <sys/cred.h>
42 #include <sys/resource.h>
43 #include <sys/task.h>
44 #include <sys/project.h>
45 #include <sys/proc.h>
46 #include <sys/debug.h>
47 #include <sys/inline.h>
48 #include <sys/disp.h>
49 #include <sys/class.h>
50 #include <vm/seg_kmem.h>
51 #include <vm/seg_kp.h>
52 #include <sys/machlock.h>
53 #include <sys/kmem.h>
54 #include <sys/varargs.h>
55 #include <sys/turnstile.h>
56 #include <sys/poll.h>
57 #include <sys/vtrace.h>
58 #include <sys/callb.h>
59 #include <c2/audit.h>
60 #include <sys/tnf.h>
61 #include <sys/sobject.h>
62 #include <sys/cpupart.h>
63 #include <sys/pset.h>
64 #include <sys/door.h>
65 #include <sys/spl.h>
66 #include <sys/copyops.h>
67 #include <sys/rctl.h>
68 #include <sys/pool.h>
69 #include <sys/zone.h>
70 #include <sys/cpc_impl.h>
71 #include <sys/sdt.h>
72 #include <sys/reboot.h>
73 #include <sys/kdi.h>
74 
75 struct kmem_cache *thread_cache;	/* cache of free threads */
76 struct kmem_cache *lwp_cache;		/* cache of free lwps */
77 struct kmem_cache *turnstile_cache;	/* cache of free turnstiles */
78 
79 /*
80  * allthreads is only for use by kmem_readers.  All kernel loops can use
81  * the current thread as a start/end point.
82  */
83 static kthread_t *allthreads = &t0;	/* circular list of all threads */
84 
85 static kcondvar_t reaper_cv;		/* synchronization var */
86 kthread_t	*thread_deathrow;	/* circular list of reapable threads */
87 kthread_t	*lwp_deathrow;		/* circular list of reapable threads */
88 kmutex_t	reaplock;		/* protects lwp and thread deathrows */
89 kmutex_t	thread_free_lock;	/* protects clock from reaper */
90 int	thread_reapcnt = 0;		/* number of threads on deathrow */
91 int	lwp_reapcnt = 0;		/* number of lwps on deathrow */
92 int	reaplimit = 16;			/* delay reaping until reaplimit */
93 
94 extern int nthread;
95 
96 id_t	syscid;				/* system scheduling class ID */
97 void	*segkp_thread;			/* cookie for segkp pool */
98 
99 int lwp_cache_sz = 32;
100 int t_cache_sz = 8;
101 static kt_did_t next_t_id = 1;
102 
103 /*
104  * Min/Max stack sizes for stack size parameters
105  */
106 #define	MAX_STKSIZE	(32 * DEFAULTSTKSZ)
107 #define	MIN_STKSIZE	DEFAULTSTKSZ
108 
109 /*
110  * default_stksize overrides lwp_default_stksize if it is set.
111  */
112 int	default_stksize;
113 int	lwp_default_stksize;
114 
115 static zone_key_t zone_thread_key;
116 
117 /*
118  * forward declarations for internal thread specific data (tsd)
119  */
120 static void *tsd_realloc(void *, size_t, size_t);
121 
122 /*ARGSUSED*/
123 static int
124 turnstile_constructor(void *buf, void *cdrarg, int kmflags)
125 {
126 	bzero(buf, sizeof (turnstile_t));
127 	return (0);
128 }
129 
130 /*ARGSUSED*/
131 static void
132 turnstile_destructor(void *buf, void *cdrarg)
133 {
134 	turnstile_t *ts = buf;
135 
136 	ASSERT(ts->ts_free == NULL);
137 	ASSERT(ts->ts_waiters == 0);
138 	ASSERT(ts->ts_inheritor == NULL);
139 	ASSERT(ts->ts_sleepq[0].sq_first == NULL);
140 	ASSERT(ts->ts_sleepq[1].sq_first == NULL);
141 }
142 
143 void
144 thread_init(void)
145 {
146 	kthread_t *tp;
147 	extern char sys_name[];
148 	extern void idle();
149 	struct cpu *cpu = CPU;
150 
151 	mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL));
152 
153 #if defined(__i386) || defined(__amd64)
154 	thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
155 	    PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0);
156 
157 	/*
158 	 * "struct _klwp" includes a "struct pcb", which includes a
159 	 * "struct fpu", which needs to be 16-byte aligned on amd64
160 	 * (and even on i386 for fxsave/fxrstor).
161 	 */
162 	lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
163 	    16, NULL, NULL, NULL, NULL, NULL, 0);
164 #else
165 	/*
166 	 * Allocate thread structures from static_arena.  This prevents
167 	 * issues where a thread tries to relocate its own thread
168 	 * structure and touches it after the mapping has been suspended.
169 	 */
170 	thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
171 	    PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0);
172 
173 	lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
174 	    0, NULL, NULL, NULL, NULL, NULL, 0);
175 #endif
176 
177 	turnstile_cache = kmem_cache_create("turnstile_cache",
178 	    sizeof (turnstile_t), 0,
179 	    turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0);
180 
181 	cred_init();
182 
183 	rctl_init();
184 	project_init();
185 	zone_init();
186 	task_init();
187 	pool_init();
188 
189 	curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
190 
191 	/*
192 	 * Originally, we had two parameters to set default stack
193 	 * size: one for lwp's (lwp_default_stksize), and one for
194 	 * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz).
195 	 * Now we have a third parameter that overrides both if it is
196 	 * set to a legal stack size, called default_stksize.
197 	 */
198 
199 	if (default_stksize == 0) {
200 		default_stksize = DEFAULTSTKSZ;
201 	} else if (default_stksize % PAGESIZE != 0 ||
202 	    default_stksize > MAX_STKSIZE ||
203 	    default_stksize < MIN_STKSIZE) {
204 		cmn_err(CE_WARN, "Illegal stack size. Using %d",
205 		    (int)DEFAULTSTKSZ);
206 		default_stksize = DEFAULTSTKSZ;
207 	} else {
208 		lwp_default_stksize = default_stksize;
209 	}
210 
211 	if (lwp_default_stksize == 0) {
212 		lwp_default_stksize = default_stksize;
213 	} else if (lwp_default_stksize % PAGESIZE != 0 ||
214 	    lwp_default_stksize > MAX_STKSIZE ||
215 	    lwp_default_stksize < MIN_STKSIZE) {
216 		cmn_err(CE_WARN, "Illegal stack size. Using %d",
217 		    default_stksize);
218 		lwp_default_stksize = default_stksize;
219 	}
220 
221 	segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz,
222 	    lwp_default_stksize,
223 	    (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED));
224 
225 	segkp_thread = segkp_cache_init(segkp, t_cache_sz,
226 	    default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON);
227 
228 	(void) getcid(sys_name, &syscid);
229 	curthread->t_cid = syscid;	/* current thread is t0 */
230 
231 	/*
232 	 * Set up the first CPU's idle thread.
233 	 * It runs whenever the CPU has nothing worthwhile to do.
234 	 */
235 	tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1);
236 	cpu->cpu_idle_thread = tp;
237 	tp->t_preempt = 1;
238 	tp->t_disp_queue = cpu->cpu_disp;
239 	ASSERT(tp->t_disp_queue != NULL);
240 	tp->t_bound_cpu = cpu;
241 	tp->t_affinitycnt = 1;
242 
243 	/*
244 	 * Registering a thread in the callback table is usually
245 	 * done in the initialization code of the thread. In this
246 	 * case, we do it right after thread creation to avoid
247 	 * blocking idle thread while registering itself. It also
248 	 * avoids the possibility of reregistration in case a CPU
249 	 * restarts its idle thread.
250 	 */
251 	CALLB_CPR_INIT_SAFE(tp, "idle");
252 
253 	/*
254 	 * Finish initializing the kernel memory allocator now that
255 	 * thread_create() is available.
256 	 */
257 	kmem_thread_init();
258 
259 	if (boothowto & RB_DEBUG)
260 		kdi_dvec_thravail();
261 }
262 
263 /*
264  * Create a thread.
265  *
266  * thread_create() blocks for memory if necessary.  It never fails.
267  *
268  * If stk is NULL, the thread is created at the base of the stack
269  * and cannot be swapped.
270  */
271 kthread_t *
272 thread_create(
273 	caddr_t	stk,
274 	size_t	stksize,
275 	void	(*proc)(),
276 	void	*arg,
277 	size_t	len,
278 	proc_t	 *pp,
279 	int	state,
280 	pri_t	pri)
281 {
282 	kthread_t *t;
283 	extern struct classfuncs sys_classfuncs;
284 	turnstile_t *ts;
285 
286 	/*
287 	 * Every thread keeps a turnstile around in case it needs to block.
288 	 * The only reason the turnstile is not simply part of the thread
289 	 * structure is that we may have to break the association whenever
290 	 * more than one thread blocks on a given synchronization object.
291 	 * From a memory-management standpoint, turnstiles are like the
292 	 * "attached mblks" that hang off dblks in the streams allocator.
293 	 */
294 	ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
295 
296 	if (stk == NULL) {
297 		/*
298 		 * alloc both thread and stack in segkp chunk
299 		 */
300 
301 		if (stksize < default_stksize)
302 			stksize = default_stksize;
303 
304 		if (stksize == default_stksize) {
305 			stk = (caddr_t)segkp_cache_get(segkp_thread);
306 		} else {
307 			stksize = roundup(stksize, PAGESIZE);
308 			stk = (caddr_t)segkp_get(segkp, stksize,
309 			    (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED));
310 		}
311 
312 		ASSERT(stk != NULL);
313 
314 		/*
315 		 * The machine-dependent mutex code may require that
316 		 * thread pointers (since they may be used for mutex owner
317 		 * fields) have certain alignment requirements.
318 		 * PTR24_ALIGN is the size of the alignment quanta.
319 		 * XXX - assumes stack grows toward low addresses.
320 		 */
321 		if (stksize <= sizeof (kthread_t) + PTR24_ALIGN)
322 			cmn_err(CE_PANIC, "thread_create: proposed stack size"
323 			    " too small to hold thread.");
324 #ifdef STACK_GROWTH_DOWN
325 		stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1);
326 		stksize &= -PTR24_ALIGN;	/* make thread aligned */
327 		t = (kthread_t *)(stk + stksize);
328 		bzero(t, sizeof (kthread_t));
329 #ifdef	C2_AUDIT
330 		if (audit_active)
331 			audit_thread_create(t);
332 #endif
333 		t->t_stk = stk + stksize;
334 		t->t_stkbase = stk;
335 #else	/* stack grows to larger addresses */
336 		stksize -= SA(sizeof (kthread_t));
337 		t = (kthread_t *)(stk);
338 		bzero(t, sizeof (kthread_t));
339 		t->t_stk = stk + sizeof (kthread_t);
340 		t->t_stkbase = stk + stksize + sizeof (kthread_t);
341 #endif	/* STACK_GROWTH_DOWN */
342 		t->t_flag |= T_TALLOCSTK;
343 		t->t_swap = stk;
344 	} else {
345 		t = kmem_cache_alloc(thread_cache, KM_SLEEP);
346 		bzero(t, sizeof (kthread_t));
347 		ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0);
348 #ifdef	C2_AUDIT
349 		if (audit_active)
350 			audit_thread_create(t);
351 #endif
352 		/*
353 		 * Initialize t_stk to the kernel stack pointer to use
354 		 * upon entry to the kernel
355 		 */
356 #ifdef STACK_GROWTH_DOWN
357 		t->t_stk = stk + stksize;
358 		t->t_stkbase = stk;
359 #else
360 		t->t_stk = stk;			/* 3b2-like */
361 		t->t_stkbase = stk + stksize;
362 #endif /* STACK_GROWTH_DOWN */
363 	}
364 
365 	/* set default stack flag */
366 	if (stksize == lwp_default_stksize)
367 		t->t_flag |= T_DFLTSTK;
368 
369 	t->t_ts = ts;
370 
371 	/*
372 	 * p_cred could be NULL if it thread_create is called before cred_init
373 	 * is called in main.
374 	 */
375 	mutex_enter(&pp->p_crlock);
376 	if (pp->p_cred)
377 		crhold(t->t_cred = pp->p_cred);
378 	mutex_exit(&pp->p_crlock);
379 	t->t_start = gethrestime_sec();
380 	t->t_startpc = proc;
381 	t->t_procp = pp;
382 	t->t_clfuncs = &sys_classfuncs.thread;
383 	t->t_cid = syscid;
384 	t->t_pri = pri;
385 	t->t_stime = lbolt;
386 	t->t_schedflag = TS_LOAD | TS_DONT_SWAP;
387 	t->t_bind_cpu = PBIND_NONE;
388 	t->t_bind_pset = PS_NONE;
389 	t->t_plockp = &pp->p_lock;
390 	t->t_copyops = NULL;
391 	t->t_taskq = NULL;
392 	t->t_anttime = 0;
393 	t->t_hatdepth = 0;
394 
395 	t->t_dtrace_vtime = 1;	/* assure vtimestamp is always non-zero */
396 
397 	CPU_STATS_ADDQ(CPU, sys, nthreads, 1);
398 #ifndef NPROBE
399 	/* Kernel probe */
400 	tnf_thread_create(t);
401 #endif /* NPROBE */
402 	LOCK_INIT_CLEAR(&t->t_lock);
403 
404 	/*
405 	 * Callers who give us a NULL proc must do their own
406 	 * stack initialization.  e.g. lwp_create()
407 	 */
408 	if (proc != NULL) {
409 		t->t_stk = thread_stk_init(t->t_stk);
410 		thread_load(t, proc, arg, len);
411 	}
412 
413 	/*
414 	 * Put a hold on project0. If this thread is actually in a
415 	 * different project, then t_proj will be changed later in
416 	 * lwp_create().  All kernel-only threads must be in project 0.
417 	 */
418 	t->t_proj = project_hold(proj0p);
419 
420 	lgrp_affinity_init(&t->t_lgrp_affinity);
421 
422 	mutex_enter(&pidlock);
423 	nthread++;
424 	t->t_did = next_t_id++;
425 	t->t_prev = curthread->t_prev;
426 	t->t_next = curthread;
427 
428 	/*
429 	 * Add the thread to the list of all threads, and initialize
430 	 * its t_cpu pointer.  We need to block preemption since
431 	 * cpu_offline walks the thread list looking for threads
432 	 * with t_cpu pointing to the CPU being offlined.  We want
433 	 * to make sure that the list is consistent and that if t_cpu
434 	 * is set, the thread is on the list.
435 	 */
436 	kpreempt_disable();
437 	curthread->t_prev->t_next = t;
438 	curthread->t_prev = t;
439 
440 	/*
441 	 * Threads should never have a NULL t_cpu pointer so assign it
442 	 * here.  If the thread is being created with state TS_RUN a
443 	 * better CPU may be chosen when it is placed on the run queue.
444 	 *
445 	 * We need to keep kernel preemption disabled when setting all
446 	 * three fields to keep them in sync.  Also, always create in
447 	 * the default partition since that's where kernel threads go
448 	 * (if this isn't a kernel thread, t_cpupart will be changed
449 	 * in lwp_create before setting the thread runnable).
450 	 */
451 	t->t_cpupart = &cp_default;
452 
453 	/*
454 	 * For now, affiliate this thread with the root lgroup.
455 	 * Since the kernel does not (presently) allocate its memory
456 	 * in a locality aware fashion, the root is an appropriate home.
457 	 * If this thread is later associated with an lwp, it will have
458 	 * it's lgroup re-assigned at that time.
459 	 */
460 	lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
461 
462 	/*
463 	 * Inherit the current cpu.  If this cpu isn't part of the chosen
464 	 * lgroup, a new cpu will be chosen by cpu_choose when the thread
465 	 * is ready to run.
466 	 */
467 	if (CPU->cpu_part == &cp_default)
468 		t->t_cpu = CPU;
469 	else
470 		t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl,
471 		    t->t_pri, NULL);
472 
473 	t->t_disp_queue = t->t_cpu->cpu_disp;
474 	kpreempt_enable();
475 
476 	/*
477 	 * Initialize thread state and the dispatcher lock pointer.
478 	 * Need to hold onto pidlock to block allthreads walkers until
479 	 * the state is set.
480 	 */
481 	switch (state) {
482 	case TS_RUN:
483 		curthread->t_oldspl = splhigh();	/* get dispatcher spl */
484 		THREAD_SET_STATE(t, TS_STOPPED, &transition_lock);
485 		CL_SETRUN(t);
486 		thread_unlock(t);
487 		break;
488 
489 	case TS_ONPROC:
490 		THREAD_ONPROC(t, t->t_cpu);
491 		break;
492 
493 	case TS_FREE:
494 		/*
495 		 * Free state will be used for intr threads.
496 		 * The interrupt routine must set the thread dispatcher
497 		 * lock pointer (t_lockp) if starting on a CPU
498 		 * other than the current one.
499 		 */
500 		THREAD_FREEINTR(t, CPU);
501 		break;
502 
503 	case TS_STOPPED:
504 		THREAD_SET_STATE(t, TS_STOPPED, &stop_lock);
505 		break;
506 
507 	default:			/* TS_SLEEP, TS_ZOMB or TS_TRANS */
508 		cmn_err(CE_PANIC, "thread_create: invalid state %d", state);
509 	}
510 	mutex_exit(&pidlock);
511 	return (t);
512 }
513 
514 /*
515  * Move thread to project0 and take care of project reference counters.
516  */
517 void
518 thread_rele(kthread_t *t)
519 {
520 	kproject_t *kpj;
521 
522 	thread_lock(t);
523 
524 	ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0);
525 	kpj = ttoproj(t);
526 	t->t_proj = proj0p;
527 
528 	thread_unlock(t);
529 
530 	if (kpj != proj0p) {
531 		project_rele(kpj);
532 		(void) project_hold(proj0p);
533 	}
534 }
535 
536 
537 void	(*ip_cleanup_func)(void);
538 
539 void
540 thread_exit()
541 {
542 	kthread_t *t = curthread;
543 
544 	if ((t->t_proc_flag & TP_ZTHREAD) != 0)
545 		cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called");
546 
547 	if (ip_cleanup_func != NULL)
548 		(*ip_cleanup_func)();
549 
550 	tsd_exit();		/* Clean up this thread's TSD */
551 
552 	kcpc_passivate();	/* clean up performance counter state */
553 
554 	/*
555 	 * No kernel thread should have called poll() without arranging
556 	 * calling pollcleanup() here.
557 	 */
558 	ASSERT(t->t_pollstate == NULL);
559 	ASSERT(t->t_schedctl == NULL);
560 	if (t->t_door)
561 		door_slam();	/* in case thread did an upcall */
562 
563 #ifndef NPROBE
564 	/* Kernel probe */
565 	if (t->t_tnf_tpdp)
566 		tnf_thread_exit();
567 #endif /* NPROBE */
568 
569 	thread_rele(t);
570 	t->t_preempt++;
571 
572 	/*
573 	 * remove thread from the all threads list so that
574 	 * death-row can use the same pointers.
575 	 */
576 	mutex_enter(&pidlock);
577 	t->t_next->t_prev = t->t_prev;
578 	t->t_prev->t_next = t->t_next;
579 	ASSERT(allthreads != t);	/* t0 never exits */
580 	cv_broadcast(&t->t_joincv);	/* wake up anyone in thread_join */
581 	mutex_exit(&pidlock);
582 
583 	if (t->t_ctx != NULL)
584 		exitctx(t);
585 	if (t->t_procp->p_pctx != NULL)
586 		exitpctx(t->t_procp);
587 
588 	t->t_state = TS_ZOMB;	/* set zombie thread */
589 
590 	swtch_from_zombie();	/* give up the CPU */
591 	/* NOTREACHED */
592 }
593 
594 /*
595  * Check to see if the specified thread is active (defined as being on
596  * the thread list).  This is certainly a slow way to do this; if there's
597  * ever a reason to speed it up, we could maintain a hash table of active
598  * threads indexed by their t_did.
599  */
600 static kthread_t *
601 did_to_thread(kt_did_t tid)
602 {
603 	kthread_t *t;
604 
605 	ASSERT(MUTEX_HELD(&pidlock));
606 	for (t = curthread->t_next; t != curthread; t = t->t_next) {
607 		if (t->t_did == tid)
608 			break;
609 	}
610 	if (t->t_did == tid)
611 		return (t);
612 	else
613 		return (NULL);
614 }
615 
616 /*
617  * Wait for specified thread to exit.  Returns immediately if the thread
618  * could not be found, meaning that it has either already exited or never
619  * existed.
620  */
621 void
622 thread_join(kt_did_t tid)
623 {
624 	kthread_t *t;
625 
626 	ASSERT(tid != curthread->t_did);
627 	ASSERT(tid != t0.t_did);
628 
629 	mutex_enter(&pidlock);
630 	/*
631 	 * Make sure we check that the thread is on the thread list
632 	 * before blocking on it; otherwise we could end up blocking on
633 	 * a cv that's already been freed.  In other words, don't cache
634 	 * the thread pointer across calls to cv_wait.
635 	 *
636 	 * The choice of loop invariant means that whenever a thread
637 	 * is taken off the allthreads list, a cv_broadcast must be
638 	 * performed on that thread's t_joincv to wake up any waiters.
639 	 * The broadcast doesn't have to happen right away, but it
640 	 * shouldn't be postponed indefinitely (e.g., by doing it in
641 	 * thread_free which may only be executed when the deathrow
642 	 * queue is processed.
643 	 */
644 	while (t = did_to_thread(tid))
645 		cv_wait(&t->t_joincv, &pidlock);
646 	mutex_exit(&pidlock);
647 }
648 
649 void
650 thread_free(kthread_t *t)
651 {
652 	ASSERT(t != &t0 && t->t_state == TS_FREE);
653 	ASSERT(t->t_door == NULL);
654 	ASSERT(t->t_schedctl == NULL);
655 	ASSERT(t->t_pollstate == NULL);
656 
657 	t->t_pri = 0;
658 	t->t_pc = 0;
659 	t->t_sp = 0;
660 	t->t_wchan0 = NULL;
661 	t->t_wchan = NULL;
662 	if (t->t_cred != NULL) {
663 		crfree(t->t_cred);
664 		t->t_cred = 0;
665 	}
666 	if (t->t_pdmsg) {
667 		kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1);
668 		t->t_pdmsg = NULL;
669 	}
670 #ifdef	C2_AUDIT
671 	if (audit_active)
672 		audit_thread_free(t);
673 #endif
674 #ifndef NPROBE
675 	if (t->t_tnf_tpdp)
676 		tnf_thread_free(t);
677 #endif /* NPROBE */
678 	if (t->t_cldata) {
679 		CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata);
680 	}
681 	if (t->t_rprof != NULL) {
682 		kmem_free(t->t_rprof, sizeof (*t->t_rprof));
683 		t->t_rprof = NULL;
684 	}
685 	t->t_lockp = NULL;	/* nothing should try to lock this thread now */
686 	if (t->t_lwp)
687 		lwp_freeregs(t->t_lwp, 0);
688 	if (t->t_ctx)
689 		freectx(t, 0);
690 	if (t->t_procp->p_pctx)
691 		freepctx(t->t_procp, 0);
692 	t->t_stk = NULL;
693 	if (t->t_lwp)
694 		lwp_stk_fini(t->t_lwp);
695 	lock_clear(&t->t_lock);
696 
697 	if (t->t_ts->ts_waiters > 0)
698 		panic("thread_free: turnstile still active");
699 
700 	kmem_cache_free(turnstile_cache, t->t_ts);
701 
702 	free_afd(&t->t_activefd);
703 
704 	/*
705 	 * Barrier for clock thread.  The clock holds this lock to
706 	 * keep the thread from going away while it's looking at it.
707 	 */
708 	mutex_enter(&thread_free_lock);
709 	mutex_exit(&thread_free_lock);
710 
711 	ASSERT(ttoproj(t) == proj0p);
712 	project_rele(ttoproj(t));
713 
714 	lgrp_affinity_free(&t->t_lgrp_affinity);
715 
716 	/*
717 	 * Free thread struct and its stack.
718 	 */
719 	if (t->t_flag & T_TALLOCSTK) {
720 		/* thread struct is embedded in stack */
721 		segkp_release(segkp, t->t_swap);
722 		mutex_enter(&pidlock);
723 		nthread--;
724 		mutex_exit(&pidlock);
725 	} else {
726 		if (t->t_swap) {
727 			segkp_release(segkp, t->t_swap);
728 			t->t_swap = NULL;
729 		}
730 		if (t->t_lwp) {
731 			kmem_cache_free(lwp_cache, t->t_lwp);
732 			t->t_lwp = NULL;
733 		}
734 		mutex_enter(&pidlock);
735 		nthread--;
736 		mutex_exit(&pidlock);
737 		kmem_cache_free(thread_cache, t);
738 	}
739 }
740 
741 /*
742  * Removes threads associated with the given zone from a deathrow queue.
743  * tp is a pointer to the head of the deathrow queue, and countp is a
744  * pointer to the current deathrow count.  Returns a linked list of
745  * threads removed from the list.
746  */
747 static kthread_t *
748 thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid)
749 {
750 	kthread_t *tmp, *list = NULL;
751 	cred_t *cr;
752 
753 	ASSERT(MUTEX_HELD(&reaplock));
754 	while (*tp != NULL) {
755 		if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) {
756 			tmp = *tp;
757 			*tp = tmp->t_forw;
758 			tmp->t_forw = list;
759 			list = tmp;
760 			(*countp)--;
761 		} else {
762 			tp = &(*tp)->t_forw;
763 		}
764 	}
765 	return (list);
766 }
767 
768 static void
769 thread_reap_list(kthread_t *t)
770 {
771 	kthread_t *next;
772 
773 	while (t != NULL) {
774 		next = t->t_forw;
775 		thread_free(t);
776 		t = next;
777 	}
778 }
779 
780 /* ARGSUSED */
781 static void
782 thread_zone_destroy(zoneid_t zoneid, void *unused)
783 {
784 	kthread_t *t, *l;
785 
786 	mutex_enter(&reaplock);
787 	/*
788 	 * Pull threads and lwps associated with zone off deathrow lists.
789 	 */
790 	t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid);
791 	l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid);
792 	mutex_exit(&reaplock);
793 
794 	/*
795 	 * Reap threads
796 	 */
797 	thread_reap_list(t);
798 
799 	/*
800 	 * Reap lwps
801 	 */
802 	thread_reap_list(l);
803 }
804 
805 /*
806  * cleanup zombie threads that are on deathrow.
807  */
808 void
809 thread_reaper()
810 {
811 	kthread_t *t, *l;
812 	callb_cpr_t cprinfo;
813 
814 	/*
815 	 * Register callback to clean up threads when zone is destroyed.
816 	 */
817 	zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy);
818 
819 	CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper");
820 	for (;;) {
821 		mutex_enter(&reaplock);
822 		while (thread_deathrow == NULL && lwp_deathrow == NULL) {
823 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
824 			cv_wait(&reaper_cv, &reaplock);
825 			CALLB_CPR_SAFE_END(&cprinfo, &reaplock);
826 		}
827 		t = thread_deathrow;
828 		l = lwp_deathrow;
829 		thread_deathrow = NULL;
830 		lwp_deathrow = NULL;
831 		thread_reapcnt = 0;
832 		lwp_reapcnt = 0;
833 		mutex_exit(&reaplock);
834 
835 		/*
836 		 * Reap threads
837 		 */
838 		thread_reap_list(t);
839 
840 		/*
841 		 * Reap lwps
842 		 */
843 		thread_reap_list(l);
844 	}
845 }
846 
847 /*
848  * This is called by resume() to put a zombie thread onto deathrow.
849  * The thread's state is changed to TS_FREE to indicate that is reapable.
850  * This is called from the idle thread so it must not block (just spin).
851  */
852 void
853 reapq_add(kthread_t *t)
854 {
855 	mutex_enter(&reaplock);
856 
857 	/*
858 	 * lwp_deathrow contains only threads with lwp linkage
859 	 * that are of the default stacksize. Anything else goes
860 	 * on thread_deathrow.
861 	 */
862 	if (ttolwp(t) && (t->t_flag & T_DFLTSTK)) {
863 		t->t_forw = lwp_deathrow;
864 		lwp_deathrow = t;
865 		lwp_reapcnt++;
866 	} else {
867 		t->t_forw = thread_deathrow;
868 		thread_deathrow = t;
869 		thread_reapcnt++;
870 	}
871 	if (lwp_reapcnt + thread_reapcnt > reaplimit)
872 		cv_signal(&reaper_cv);	/* wake the reaper */
873 	t->t_state = TS_FREE;
874 	lock_clear(&t->t_lock);
875 	mutex_exit(&reaplock);
876 }
877 
878 /*
879  * Install thread context ops for the current thread.
880  */
881 void
882 installctx(
883 	kthread_t *t,
884 	void	*arg,
885 	void	(*save)(void *),
886 	void	(*restore)(void *),
887 	void	(*fork)(void *, void *),
888 	void	(*lwp_create)(void *, void *),
889 	void	(*exit)(void *),
890 	void	(*free)(void *, int))
891 {
892 	struct ctxop *ctx;
893 
894 	ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP);
895 	ctx->save_op = save;
896 	ctx->restore_op = restore;
897 	ctx->fork_op = fork;
898 	ctx->lwp_create_op = lwp_create;
899 	ctx->exit_op = exit;
900 	ctx->free_op = free;
901 	ctx->arg = arg;
902 	ctx->next = t->t_ctx;
903 	t->t_ctx = ctx;
904 }
905 
906 /*
907  * Remove thread context ops from the current thread.
908  * (Or allow the agent thread to remove thread context ops from another
909  * thread in the same, stopped, process)
910  */
911 int
912 removectx(
913 	kthread_t *t,
914 	void	*arg,
915 	void	(*save)(void *),
916 	void	(*restore)(void *),
917 	void	(*fork)(void *, void *),
918 	void	(*lwp_create)(void *, void *),
919 	void	(*exit)(void *),
920 	void	(*free)(void *, int))
921 {
922 	struct ctxop *ctx, *prev_ctx;
923 
924 	ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL ||
925 	    ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
926 
927 	prev_ctx = NULL;
928 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) {
929 		if (ctx->save_op == save && ctx->restore_op == restore &&
930 		    ctx->fork_op == fork && ctx->lwp_create_op == lwp_create &&
931 		    ctx->exit_op == exit && ctx->free_op == free &&
932 		    ctx->arg == arg) {
933 			if (prev_ctx)
934 				prev_ctx->next = ctx->next;
935 			else
936 				t->t_ctx = ctx->next;
937 			if (ctx->free_op != NULL)
938 				(ctx->free_op)(ctx->arg, 0);
939 			kmem_free(ctx, sizeof (struct ctxop));
940 			return (1);
941 		}
942 		prev_ctx = ctx;
943 	}
944 	return (0);
945 }
946 
947 void
948 savectx(kthread_t *t)
949 {
950 	struct ctxop *ctx;
951 
952 	ASSERT(t == curthread);
953 	for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
954 		if (ctx->save_op != NULL)
955 			(ctx->save_op)(ctx->arg);
956 }
957 
958 void
959 restorectx(kthread_t *t)
960 {
961 	struct ctxop *ctx;
962 
963 	ASSERT(t == curthread);
964 	for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
965 		if (ctx->restore_op != NULL)
966 			(ctx->restore_op)(ctx->arg);
967 }
968 
969 void
970 forkctx(kthread_t *t, kthread_t *ct)
971 {
972 	struct ctxop *ctx;
973 
974 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
975 		if (ctx->fork_op != NULL)
976 			(ctx->fork_op)(t, ct);
977 }
978 
979 /*
980  * Note that this operator is only invoked via the _lwp_create
981  * system call.  The system may have other reasons to create lwps
982  * e.g. the agent lwp or the doors unreferenced lwp.
983  */
984 void
985 lwp_createctx(kthread_t *t, kthread_t *ct)
986 {
987 	struct ctxop *ctx;
988 
989 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
990 		if (ctx->lwp_create_op != NULL)
991 			(ctx->lwp_create_op)(t, ct);
992 }
993 
994 /*
995  * exitctx is called from thread_exit() and lwp_exit() to perform any actions
996  * needed when the thread/LWP leaves the processor for the last time. This
997  * routine is not intended to deal with freeing memory; freectx() is used for
998  * that purpose during thread_free(). This routine is provided to allow for
999  * clean-up that can't wait until thread_free().
1000  */
1001 void
1002 exitctx(kthread_t *t)
1003 {
1004 	struct ctxop *ctx;
1005 
1006 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1007 		if (ctx->exit_op != NULL)
1008 			(ctx->exit_op)(t);
1009 }
1010 
1011 /*
1012  * freectx is called from thread_free() and exec() to get
1013  * rid of old thread context ops.
1014  */
1015 void
1016 freectx(kthread_t *t, int isexec)
1017 {
1018 	struct ctxop *ctx;
1019 
1020 	while ((ctx = t->t_ctx) != NULL) {
1021 		t->t_ctx = ctx->next;
1022 		if (ctx->free_op != NULL)
1023 			(ctx->free_op)(ctx->arg, isexec);
1024 		kmem_free(ctx, sizeof (struct ctxop));
1025 	}
1026 }
1027 
1028 /*
1029  * Set the thread running; arrange for it to be swapped in if necessary.
1030  */
1031 void
1032 setrun_locked(kthread_t *t)
1033 {
1034 	ASSERT(THREAD_LOCK_HELD(t));
1035 	if (t->t_state == TS_SLEEP) {
1036 		/*
1037 		 * Take off sleep queue.
1038 		 */
1039 		SOBJ_UNSLEEP(t->t_sobj_ops, t);
1040 	} else if (t->t_state & (TS_RUN | TS_ONPROC)) {
1041 		/*
1042 		 * Already on dispatcher queue.
1043 		 */
1044 		return;
1045 	} else if (t->t_state == TS_STOPPED) {
1046 		/*
1047 		 * All of the sending of SIGCONT (TC_XSTART) and /proc
1048 		 * (TC_PSTART) and lwp_continue() (TC_CSTART) must have
1049 		 * requested that the thread be run.
1050 		 * Just calling setrun() is not sufficient to set a stopped
1051 		 * thread running.  TP_TXSTART is always set if the thread
1052 		 * is not stopped by a jobcontrol stop signal.
1053 		 * TP_TPSTART is always set if /proc is not controlling it.
1054 		 * TP_TCSTART is always set if lwp_suspend() didn't stop it.
1055 		 * The thread won't be stopped unless one of these
1056 		 * three mechanisms did it.
1057 		 *
1058 		 * These flags must be set before calling setrun_locked(t).
1059 		 * They can't be passed as arguments because the streams
1060 		 * code calls setrun() indirectly and the mechanism for
1061 		 * doing so admits only one argument.  Note that the
1062 		 * thread must be locked in order to change t_schedflags.
1063 		 */
1064 		if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART)
1065 			return;
1066 		/*
1067 		 * Process is no longer stopped (a thread is running).
1068 		 */
1069 		t->t_whystop = 0;
1070 		t->t_whatstop = 0;
1071 		/*
1072 		 * Strictly speaking, we do not have to clear these
1073 		 * flags here; they are cleared on entry to stop().
1074 		 * However, they are confusing when doing kernel
1075 		 * debugging or when they are revealed by ps(1).
1076 		 */
1077 		t->t_schedflag &= ~TS_ALLSTART;
1078 		THREAD_TRANSITION(t);	/* drop stopped-thread lock */
1079 		ASSERT(t->t_lockp == &transition_lock);
1080 		ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
1081 		/*
1082 		 * Let the class put the process on the dispatcher queue.
1083 		 */
1084 		CL_SETRUN(t);
1085 	}
1086 
1087 
1088 }
1089 
1090 void
1091 setrun(kthread_t *t)
1092 {
1093 	thread_lock(t);
1094 	setrun_locked(t);
1095 	thread_unlock(t);
1096 }
1097 
1098 /*
1099  * Unpin an interrupted thread.
1100  *	When an interrupt occurs, the interrupt is handled on the stack
1101  *	of an interrupt thread, taken from a pool linked to the CPU structure.
1102  *
1103  *	When swtch() is switching away from an interrupt thread because it
1104  *	blocked or was preempted, this routine is called to complete the
1105  *	saving of the interrupted thread state, and returns the interrupted
1106  *	thread pointer so it may be resumed.
1107  *
1108  *	Called by swtch() only at high spl.
1109  */
1110 kthread_t *
1111 thread_unpin()
1112 {
1113 	kthread_t	*t = curthread;	/* current thread */
1114 	kthread_t	*itp;		/* interrupted thread */
1115 	int		i;		/* interrupt level */
1116 	extern int	intr_passivate();
1117 
1118 	ASSERT(t->t_intr != NULL);
1119 
1120 	itp = t->t_intr;		/* interrupted thread */
1121 	t->t_intr = NULL;		/* clear interrupt ptr */
1122 
1123 	/*
1124 	 * Get state from interrupt thread for the one
1125 	 * it interrupted.
1126 	 */
1127 
1128 	i = intr_passivate(t, itp);
1129 
1130 	TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1131 		"intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1132 		i, t, t, itp, itp);
1133 
1134 	/*
1135 	 * Dissociate the current thread from the interrupted thread's LWP.
1136 	 */
1137 	t->t_lwp = NULL;
1138 
1139 	/*
1140 	 * Interrupt handlers above the level that spinlocks block must
1141 	 * not block.
1142 	 */
1143 #if DEBUG
1144 	if (i < 0 || i > LOCK_LEVEL)
1145 		cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i);
1146 #endif
1147 
1148 	/*
1149 	 * Compute the CPU's base interrupt level based on the active
1150 	 * interrupts.
1151 	 */
1152 	ASSERT(CPU->cpu_intr_actv & (1 << i));
1153 	set_base_spl();
1154 
1155 	return (itp);
1156 }
1157 
1158 /*
1159  * Create and initialize an interrupt thread.
1160  *	Returns non-zero on error.
1161  *	Called at spl7() or better.
1162  */
1163 void
1164 thread_create_intr(struct cpu *cp)
1165 {
1166 	kthread_t *tp;
1167 
1168 	tp = thread_create(NULL, 0,
1169 	    (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0);
1170 
1171 	/*
1172 	 * Set the thread in the TS_FREE state.  The state will change
1173 	 * to TS_ONPROC only while the interrupt is active.  Think of these
1174 	 * as being on a private free list for the CPU.  Being TS_FREE keeps
1175 	 * inactive interrupt threads out of debugger thread lists.
1176 	 *
1177 	 * We cannot call thread_create with TS_FREE because of the current
1178 	 * checks there for ONPROC.  Fix this when thread_create takes flags.
1179 	 */
1180 	THREAD_FREEINTR(tp, cp);
1181 
1182 	/*
1183 	 * Nobody should ever reference the credentials of an interrupt
1184 	 * thread so make it NULL to catch any such references.
1185 	 */
1186 	tp->t_cred = NULL;
1187 	tp->t_flag |= T_INTR_THREAD;
1188 	tp->t_cpu = cp;
1189 	tp->t_bound_cpu = cp;
1190 	tp->t_disp_queue = cp->cpu_disp;
1191 	tp->t_affinitycnt = 1;
1192 	tp->t_preempt = 1;
1193 
1194 	/*
1195 	 * Don't make a user-requested binding on this thread so that
1196 	 * the processor can be offlined.
1197 	 */
1198 	tp->t_bind_cpu = PBIND_NONE;	/* no USER-requested binding */
1199 	tp->t_bind_pset = PS_NONE;
1200 
1201 #if defined(__i386) || defined(__amd64)
1202 	tp->t_stk -= STACK_ALIGN;
1203 	*(tp->t_stk) = 0;		/* terminate intr thread stack */
1204 #endif
1205 
1206 	/*
1207 	 * Link onto CPU's interrupt pool.
1208 	 */
1209 	tp->t_link = cp->cpu_intr_thread;
1210 	cp->cpu_intr_thread = tp;
1211 }
1212 
1213 /*
1214  * TSD -- THREAD SPECIFIC DATA
1215  */
1216 static kmutex_t		tsd_mutex;	 /* linked list spin lock */
1217 static uint_t		tsd_nkeys;	 /* size of destructor array */
1218 /* per-key destructor funcs */
1219 static void 		(**tsd_destructor)(void *);
1220 /* list of tsd_thread's */
1221 static struct tsd_thread	*tsd_list;
1222 
1223 /*
1224  * Default destructor
1225  *	Needed because NULL destructor means that the key is unused
1226  */
1227 /* ARGSUSED */
1228 void
1229 tsd_defaultdestructor(void *value)
1230 {}
1231 
1232 /*
1233  * Create a key (index into per thread array)
1234  *	Locks out tsd_create, tsd_destroy, and tsd_exit
1235  *	May allocate memory with lock held
1236  */
1237 void
1238 tsd_create(uint_t *keyp, void (*destructor)(void *))
1239 {
1240 	int	i;
1241 	uint_t	nkeys;
1242 
1243 	/*
1244 	 * if key is allocated, do nothing
1245 	 */
1246 	mutex_enter(&tsd_mutex);
1247 	if (*keyp) {
1248 		mutex_exit(&tsd_mutex);
1249 		return;
1250 	}
1251 	/*
1252 	 * find an unused key
1253 	 */
1254 	if (destructor == NULL)
1255 		destructor = tsd_defaultdestructor;
1256 
1257 	for (i = 0; i < tsd_nkeys; ++i)
1258 		if (tsd_destructor[i] == NULL)
1259 			break;
1260 
1261 	/*
1262 	 * if no unused keys, increase the size of the destructor array
1263 	 */
1264 	if (i == tsd_nkeys) {
1265 		if ((nkeys = (tsd_nkeys << 1)) == 0)
1266 			nkeys = 1;
1267 		tsd_destructor =
1268 		    (void (**)(void *))tsd_realloc((void *)tsd_destructor,
1269 		    (size_t)(tsd_nkeys * sizeof (void (*)(void *))),
1270 		    (size_t)(nkeys * sizeof (void (*)(void *))));
1271 		tsd_nkeys = nkeys;
1272 	}
1273 
1274 	/*
1275 	 * allocate the next available unused key
1276 	 */
1277 	tsd_destructor[i] = destructor;
1278 	*keyp = i + 1;
1279 	mutex_exit(&tsd_mutex);
1280 }
1281 
1282 /*
1283  * Destroy a key -- this is for unloadable modules
1284  *
1285  * Assumes that the caller is preventing tsd_set and tsd_get
1286  * Locks out tsd_create, tsd_destroy, and tsd_exit
1287  * May free memory with lock held
1288  */
1289 void
1290 tsd_destroy(uint_t *keyp)
1291 {
1292 	uint_t key;
1293 	struct tsd_thread *tsd;
1294 
1295 	/*
1296 	 * protect the key namespace and our destructor lists
1297 	 */
1298 	mutex_enter(&tsd_mutex);
1299 	key = *keyp;
1300 	*keyp = 0;
1301 
1302 	ASSERT(key <= tsd_nkeys);
1303 
1304 	/*
1305 	 * if the key is valid
1306 	 */
1307 	if (key != 0) {
1308 		uint_t k = key - 1;
1309 		/*
1310 		 * for every thread with TSD, call key's destructor
1311 		 */
1312 		for (tsd = tsd_list; tsd; tsd = tsd->ts_next) {
1313 			/*
1314 			 * no TSD for key in this thread
1315 			 */
1316 			if (key > tsd->ts_nkeys)
1317 				continue;
1318 			/*
1319 			 * call destructor for key
1320 			 */
1321 			if (tsd->ts_value[k] && tsd_destructor[k])
1322 				(*tsd_destructor[k])(tsd->ts_value[k]);
1323 			/*
1324 			 * reset value for key
1325 			 */
1326 			tsd->ts_value[k] = NULL;
1327 		}
1328 		/*
1329 		 * actually free the key (NULL destructor == unused)
1330 		 */
1331 		tsd_destructor[k] = NULL;
1332 	}
1333 
1334 	mutex_exit(&tsd_mutex);
1335 }
1336 
1337 /*
1338  * Quickly return the per thread value that was stored with the specified key
1339  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1340  */
1341 void *
1342 tsd_get(uint_t key)
1343 {
1344 	return (tsd_agent_get(curthread, key));
1345 }
1346 
1347 /*
1348  * Set a per thread value indexed with the specified key
1349  */
1350 int
1351 tsd_set(uint_t key, void *value)
1352 {
1353 	return (tsd_agent_set(curthread, key, value));
1354 }
1355 
1356 /*
1357  * Like tsd_get(), except that the agent lwp can get the tsd of
1358  * another thread in the same process (the agent thread only runs when the
1359  * process is completely stopped by /proc), or syslwp is creating a new lwp.
1360  */
1361 void *
1362 tsd_agent_get(kthread_t *t, uint_t key)
1363 {
1364 	struct tsd_thread *tsd = t->t_tsd;
1365 
1366 	ASSERT(t == curthread ||
1367 	    ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1368 
1369 	if (key && tsd != NULL && key <= tsd->ts_nkeys)
1370 		return (tsd->ts_value[key - 1]);
1371 	return (NULL);
1372 }
1373 
1374 /*
1375  * Like tsd_set(), except that the agent lwp can set the tsd of
1376  * another thread in the same process, or syslwp can set the tsd
1377  * of a thread it's in the middle of creating.
1378  *
1379  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1380  * May lock out tsd_destroy (and tsd_create), may allocate memory with
1381  * lock held
1382  */
1383 int
1384 tsd_agent_set(kthread_t *t, uint_t key, void *value)
1385 {
1386 	struct tsd_thread *tsd = t->t_tsd;
1387 
1388 	ASSERT(t == curthread ||
1389 	    ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1390 
1391 	if (key == 0)
1392 		return (EINVAL);
1393 	if (tsd == NULL)
1394 		tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1395 	if (key <= tsd->ts_nkeys) {
1396 		tsd->ts_value[key - 1] = value;
1397 		return (0);
1398 	}
1399 
1400 	ASSERT(key <= tsd_nkeys);
1401 
1402 	/*
1403 	 * lock out tsd_destroy()
1404 	 */
1405 	mutex_enter(&tsd_mutex);
1406 	if (tsd->ts_nkeys == 0) {
1407 		/*
1408 		 * Link onto list of threads with TSD
1409 		 */
1410 		if ((tsd->ts_next = tsd_list) != NULL)
1411 			tsd_list->ts_prev = tsd;
1412 		tsd_list = tsd;
1413 	}
1414 
1415 	/*
1416 	 * Allocate thread local storage and set the value for key
1417 	 */
1418 	tsd->ts_value = tsd_realloc(tsd->ts_value,
1419 	    tsd->ts_nkeys * sizeof (void *),
1420 	    key * sizeof (void *));
1421 	tsd->ts_nkeys = key;
1422 	tsd->ts_value[key - 1] = value;
1423 	mutex_exit(&tsd_mutex);
1424 
1425 	return (0);
1426 }
1427 
1428 
1429 /*
1430  * Return the per thread value that was stored with the specified key
1431  *	If necessary, create the key and the value
1432  *	Assumes the caller is protecting *keyp from tsd_destroy
1433  */
1434 void *
1435 tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void))
1436 {
1437 	void *value;
1438 	uint_t key = *keyp;
1439 	struct tsd_thread *tsd = curthread->t_tsd;
1440 
1441 	if (tsd == NULL)
1442 		tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1443 	if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1]))
1444 		return (value);
1445 	if (key == 0)
1446 		tsd_create(keyp, destroy);
1447 	(void) tsd_set(*keyp, value = (*allocate)());
1448 
1449 	return (value);
1450 }
1451 
1452 /*
1453  * Called from thread_exit() to run the destructor function for each tsd
1454  *	Locks out tsd_create and tsd_destroy
1455  *	Assumes that the destructor *DOES NOT* use tsd
1456  */
1457 void
1458 tsd_exit(void)
1459 {
1460 	int i;
1461 	struct tsd_thread *tsd = curthread->t_tsd;
1462 
1463 	if (tsd == NULL)
1464 		return;
1465 
1466 	if (tsd->ts_nkeys == 0) {
1467 		kmem_free(tsd, sizeof (*tsd));
1468 		curthread->t_tsd = NULL;
1469 		return;
1470 	}
1471 
1472 	/*
1473 	 * lock out tsd_create and tsd_destroy, call
1474 	 * the destructor, and mark the value as destroyed.
1475 	 */
1476 	mutex_enter(&tsd_mutex);
1477 
1478 	for (i = 0; i < tsd->ts_nkeys; i++) {
1479 		if (tsd->ts_value[i] && tsd_destructor[i])
1480 			(*tsd_destructor[i])(tsd->ts_value[i]);
1481 		tsd->ts_value[i] = NULL;
1482 	}
1483 
1484 	/*
1485 	 * remove from linked list of threads with TSD
1486 	 */
1487 	if (tsd->ts_next)
1488 		tsd->ts_next->ts_prev = tsd->ts_prev;
1489 	if (tsd->ts_prev)
1490 		tsd->ts_prev->ts_next = tsd->ts_next;
1491 	if (tsd_list == tsd)
1492 		tsd_list = tsd->ts_next;
1493 
1494 	mutex_exit(&tsd_mutex);
1495 
1496 	/*
1497 	 * free up the TSD
1498 	 */
1499 	kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *));
1500 	kmem_free(tsd, sizeof (struct tsd_thread));
1501 	curthread->t_tsd = NULL;
1502 }
1503 
1504 /*
1505  * realloc
1506  */
1507 static void *
1508 tsd_realloc(void *old, size_t osize, size_t nsize)
1509 {
1510 	void *new;
1511 
1512 	new = kmem_zalloc(nsize, KM_SLEEP);
1513 	if (old) {
1514 		bcopy(old, new, osize);
1515 		kmem_free(old, osize);
1516 	}
1517 	return (new);
1518 }
1519 
1520 /*
1521  * Check to see if an interrupt thread might be active at a given ipl.
1522  * If so return true.
1523  * We must be conservative--it is ok to give a false yes, but a false no
1524  * will cause disaster.  (But if the situation changes after we check it is
1525  * ok--the caller is trying to ensure that an interrupt routine has been
1526  * exited).
1527  * This is used when trying to remove an interrupt handler from an autovector
1528  * list in avintr.c.
1529  */
1530 int
1531 intr_active(struct cpu *cp, int level)
1532 {
1533 	if (level <= LOCK_LEVEL)
1534 		return (cp->cpu_thread != cp->cpu_dispthread);
1535 	else
1536 		return (CPU_ON_INTR(cp));
1537 }
1538 
1539 /*
1540  * Return non-zero if an interrupt is being serviced.
1541  */
1542 int
1543 servicing_interrupt()
1544 {
1545 	/*
1546 	 * Note: single-OR used on purpose to return non-zero if T_INTR_THREAD
1547 	 * flag set or CPU_ON_INTR(CPU) is non-zero (indicating high-level
1548 	 * interrupt).
1549 	 */
1550 	return ((curthread->t_flag & T_INTR_THREAD) | CPU_ON_INTR(CPU));
1551 }
1552 
1553 
1554 /*
1555  * Change the dispatch priority of a thread in the system.
1556  * Used when raising or lowering a thread's priority.
1557  * (E.g., priority inheritance)
1558  *
1559  * Since threads are queued according to their priority, we
1560  * we must check the thread's state to determine whether it
1561  * is on a queue somewhere. If it is, we've got to:
1562  *
1563  *	o Dequeue the thread.
1564  *	o Change its effective priority.
1565  *	o Enqueue the thread.
1566  *
1567  * Assumptions: The thread whose priority we wish to change
1568  * must be locked before we call thread_change_(e)pri().
1569  * The thread_change(e)pri() function doesn't drop the thread
1570  * lock--that must be done by its caller.
1571  */
1572 void
1573 thread_change_epri(kthread_t *t, pri_t disp_pri)
1574 {
1575 	uint_t	state;
1576 
1577 	ASSERT(THREAD_LOCK_HELD(t));
1578 
1579 	/*
1580 	 * If the inherited priority hasn't actually changed,
1581 	 * just return.
1582 	 */
1583 	if (t->t_epri == disp_pri)
1584 		return;
1585 
1586 	state = t->t_state;
1587 
1588 	/*
1589 	 * If it's not on a queue, change the priority with
1590 	 * impunity.
1591 	 */
1592 	if ((state & (TS_SLEEP | TS_RUN)) == 0) {
1593 		t->t_epri = disp_pri;
1594 
1595 		if (state == TS_ONPROC) {
1596 			cpu_t *cp = t->t_disp_queue->disp_cpu;
1597 
1598 			if (t == cp->cpu_dispthread)
1599 				cp->cpu_dispatch_pri = DISP_PRIO(t);
1600 		}
1601 		return;
1602 	}
1603 
1604 	/*
1605 	 * It's either on a sleep queue or a run queue.
1606 	 */
1607 	if (state == TS_SLEEP) {
1608 
1609 		/*
1610 		 * Take the thread out of its sleep queue.
1611 		 * Change the inherited priority.
1612 		 * Re-enqueue the thread.
1613 		 * Each synchronization object exports a function
1614 		 * to do this in an appropriate manner.
1615 		 */
1616 		SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri);
1617 	} else {
1618 		/*
1619 		 * The thread is on a run queue.
1620 		 * Note: setbackdq() may not put the thread
1621 		 * back on the same run queue where it originally
1622 		 * resided.
1623 		 */
1624 		(void) dispdeq(t);
1625 		t->t_epri = disp_pri;
1626 		setbackdq(t);
1627 	}
1628 }	/* end of thread_change_epri */
1629 
1630 /*
1631  * Function: Change the t_pri field of a thread.
1632  * Side Effects: Adjust the thread ordering on a run queue
1633  *		 or sleep queue, if necessary.
1634  * Returns: 1 if the thread was on a run queue, else 0.
1635  */
1636 int
1637 thread_change_pri(kthread_t *t, pri_t disp_pri, int front)
1638 {
1639 	uint_t	state;
1640 	int	on_rq = 0;
1641 
1642 	ASSERT(THREAD_LOCK_HELD(t));
1643 
1644 	state = t->t_state;
1645 	THREAD_WILLCHANGE_PRI(t, disp_pri);
1646 
1647 	/*
1648 	 * If it's not on a queue, change the priority with
1649 	 * impunity.
1650 	 */
1651 	if ((state & (TS_SLEEP | TS_RUN)) == 0) {
1652 		t->t_pri = disp_pri;
1653 
1654 		if (state == TS_ONPROC) {
1655 			cpu_t *cp = t->t_disp_queue->disp_cpu;
1656 
1657 			if (t == cp->cpu_dispthread)
1658 				cp->cpu_dispatch_pri = DISP_PRIO(t);
1659 		}
1660 		return (0);
1661 	}
1662 
1663 	/*
1664 	 * It's either on a sleep queue or a run queue.
1665 	 */
1666 	if (state == TS_SLEEP) {
1667 		/*
1668 		 * If the priority has changed, take the thread out of
1669 		 * its sleep queue and change the priority.
1670 		 * Re-enqueue the thread.
1671 		 * Each synchronization object exports a function
1672 		 * to do this in an appropriate manner.
1673 		 */
1674 		if (disp_pri != t->t_pri)
1675 			SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri);
1676 	} else {
1677 		/*
1678 		 * The thread is on a run queue.
1679 		 * Note: setbackdq() may not put the thread
1680 		 * back on the same run queue where it originally
1681 		 * resided.
1682 		 *
1683 		 * We still requeue the thread even if the priority
1684 		 * is unchanged to preserve round-robin (and other)
1685 		 * effects between threads of the same priority.
1686 		 */
1687 		on_rq = dispdeq(t);
1688 		ASSERT(on_rq);
1689 		t->t_pri = disp_pri;
1690 		if (front) {
1691 			setfrontdq(t);
1692 		} else {
1693 			setbackdq(t);
1694 		}
1695 	}
1696 	return (on_rq);
1697 }
1698