xref: /titanic_52/usr/src/uts/common/disp/thread.c (revision b6c3f7863936abeae522e48a13887dddeb691a45)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/sysmacros.h>
31 #include <sys/signal.h>
32 #include <sys/stack.h>
33 #include <sys/pcb.h>
34 #include <sys/user.h>
35 #include <sys/systm.h>
36 #include <sys/sysinfo.h>
37 #include <sys/errno.h>
38 #include <sys/cmn_err.h>
39 #include <sys/cred.h>
40 #include <sys/resource.h>
41 #include <sys/task.h>
42 #include <sys/project.h>
43 #include <sys/proc.h>
44 #include <sys/debug.h>
45 #include <sys/disp.h>
46 #include <sys/class.h>
47 #include <vm/seg_kmem.h>
48 #include <vm/seg_kp.h>
49 #include <sys/machlock.h>
50 #include <sys/kmem.h>
51 #include <sys/varargs.h>
52 #include <sys/turnstile.h>
53 #include <sys/poll.h>
54 #include <sys/vtrace.h>
55 #include <sys/callb.h>
56 #include <c2/audit.h>
57 #include <sys/tnf.h>
58 #include <sys/sobject.h>
59 #include <sys/cpupart.h>
60 #include <sys/pset.h>
61 #include <sys/door.h>
62 #include <sys/spl.h>
63 #include <sys/copyops.h>
64 #include <sys/rctl.h>
65 #include <sys/brand.h>
66 #include <sys/pool.h>
67 #include <sys/zone.h>
68 #include <sys/tsol/label.h>
69 #include <sys/tsol/tndb.h>
70 #include <sys/cpc_impl.h>
71 #include <sys/sdt.h>
72 #include <sys/reboot.h>
73 #include <sys/kdi.h>
74 #include <sys/waitq.h>
75 #include <sys/cpucaps.h>
76 #include <sys/kiconv.h>
77 
78 struct kmem_cache *thread_cache;	/* cache of free threads */
79 struct kmem_cache *lwp_cache;		/* cache of free lwps */
80 struct kmem_cache *turnstile_cache;	/* cache of free turnstiles */
81 
82 /*
83  * allthreads is only for use by kmem_readers.  All kernel loops can use
84  * the current thread as a start/end point.
85  */
86 static kthread_t *allthreads = &t0;	/* circular list of all threads */
87 
88 static kcondvar_t reaper_cv;		/* synchronization var */
89 kthread_t	*thread_deathrow;	/* circular list of reapable threads */
90 kthread_t	*lwp_deathrow;		/* circular list of reapable threads */
91 kmutex_t	reaplock;		/* protects lwp and thread deathrows */
92 int	thread_reapcnt = 0;		/* number of threads on deathrow */
93 int	lwp_reapcnt = 0;		/* number of lwps on deathrow */
94 int	reaplimit = 16;			/* delay reaping until reaplimit */
95 
96 thread_free_lock_t	*thread_free_lock;
97 					/* protects tick thread from reaper */
98 
99 extern int nthread;
100 
101 id_t	syscid;				/* system scheduling class ID */
102 void	*segkp_thread;			/* cookie for segkp pool */
103 
104 int lwp_cache_sz = 32;
105 int t_cache_sz = 8;
106 static kt_did_t next_t_id = 1;
107 
108 /*
109  * Min/Max stack sizes for stack size parameters
110  */
111 #define	MAX_STKSIZE	(32 * DEFAULTSTKSZ)
112 #define	MIN_STKSIZE	DEFAULTSTKSZ
113 
114 /*
115  * default_stksize overrides lwp_default_stksize if it is set.
116  */
117 int	default_stksize;
118 int	lwp_default_stksize;
119 
120 static zone_key_t zone_thread_key;
121 
122 /*
123  * forward declarations for internal thread specific data (tsd)
124  */
125 static void *tsd_realloc(void *, size_t, size_t);
126 
127 void thread_reaper(void);
128 
129 /*ARGSUSED*/
130 static int
131 turnstile_constructor(void *buf, void *cdrarg, int kmflags)
132 {
133 	bzero(buf, sizeof (turnstile_t));
134 	return (0);
135 }
136 
137 /*ARGSUSED*/
138 static void
139 turnstile_destructor(void *buf, void *cdrarg)
140 {
141 	turnstile_t *ts = buf;
142 
143 	ASSERT(ts->ts_free == NULL);
144 	ASSERT(ts->ts_waiters == 0);
145 	ASSERT(ts->ts_inheritor == NULL);
146 	ASSERT(ts->ts_sleepq[0].sq_first == NULL);
147 	ASSERT(ts->ts_sleepq[1].sq_first == NULL);
148 }
149 
150 void
151 thread_init(void)
152 {
153 	kthread_t *tp;
154 	extern char sys_name[];
155 	extern void idle();
156 	struct cpu *cpu = CPU;
157 	int i;
158 	kmutex_t *lp;
159 
160 	mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL));
161 	thread_free_lock =
162 	    kmem_alloc(sizeof (thread_free_lock_t) * THREAD_FREE_NUM, KM_SLEEP);
163 	for (i = 0; i < THREAD_FREE_NUM; i++) {
164 		lp = &thread_free_lock[i].tf_lock;
165 		mutex_init(lp, NULL, MUTEX_DEFAULT, NULL);
166 	}
167 
168 #if defined(__i386) || defined(__amd64)
169 	thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
170 	    PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0);
171 
172 	/*
173 	 * "struct _klwp" includes a "struct pcb", which includes a
174 	 * "struct fpu", which needs to be 16-byte aligned on amd64
175 	 * (and even on i386 for fxsave/fxrstor).
176 	 */
177 	lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
178 	    16, NULL, NULL, NULL, NULL, NULL, 0);
179 #else
180 	/*
181 	 * Allocate thread structures from static_arena.  This prevents
182 	 * issues where a thread tries to relocate its own thread
183 	 * structure and touches it after the mapping has been suspended.
184 	 */
185 	thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t),
186 	    PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0);
187 
188 	lwp_stk_cache_init();
189 
190 	lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t),
191 	    0, NULL, NULL, NULL, NULL, NULL, 0);
192 #endif
193 
194 	turnstile_cache = kmem_cache_create("turnstile_cache",
195 	    sizeof (turnstile_t), 0,
196 	    turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0);
197 
198 	label_init();
199 	cred_init();
200 
201 	/*
202 	 * Initialize various resource management facilities.
203 	 */
204 	rctl_init();
205 	cpucaps_init();
206 	/*
207 	 * Zone_init() should be called before project_init() so that project ID
208 	 * for the first project is initialized correctly.
209 	 */
210 	zone_init();
211 	project_init();
212 	brand_init();
213 	kiconv_init();
214 	task_init();
215 	tcache_init();
216 	pool_init();
217 
218 	curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
219 
220 	/*
221 	 * Originally, we had two parameters to set default stack
222 	 * size: one for lwp's (lwp_default_stksize), and one for
223 	 * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz).
224 	 * Now we have a third parameter that overrides both if it is
225 	 * set to a legal stack size, called default_stksize.
226 	 */
227 
228 	if (default_stksize == 0) {
229 		default_stksize = DEFAULTSTKSZ;
230 	} else if (default_stksize % PAGESIZE != 0 ||
231 	    default_stksize > MAX_STKSIZE ||
232 	    default_stksize < MIN_STKSIZE) {
233 		cmn_err(CE_WARN, "Illegal stack size. Using %d",
234 		    (int)DEFAULTSTKSZ);
235 		default_stksize = DEFAULTSTKSZ;
236 	} else {
237 		lwp_default_stksize = default_stksize;
238 	}
239 
240 	if (lwp_default_stksize == 0) {
241 		lwp_default_stksize = default_stksize;
242 	} else if (lwp_default_stksize % PAGESIZE != 0 ||
243 	    lwp_default_stksize > MAX_STKSIZE ||
244 	    lwp_default_stksize < MIN_STKSIZE) {
245 		cmn_err(CE_WARN, "Illegal stack size. Using %d",
246 		    default_stksize);
247 		lwp_default_stksize = default_stksize;
248 	}
249 
250 	segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz,
251 	    lwp_default_stksize,
252 	    (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED));
253 
254 	segkp_thread = segkp_cache_init(segkp, t_cache_sz,
255 	    default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON);
256 
257 	(void) getcid(sys_name, &syscid);
258 	curthread->t_cid = syscid;	/* current thread is t0 */
259 
260 	/*
261 	 * Set up the first CPU's idle thread.
262 	 * It runs whenever the CPU has nothing worthwhile to do.
263 	 */
264 	tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1);
265 	cpu->cpu_idle_thread = tp;
266 	tp->t_preempt = 1;
267 	tp->t_disp_queue = cpu->cpu_disp;
268 	ASSERT(tp->t_disp_queue != NULL);
269 	tp->t_bound_cpu = cpu;
270 	tp->t_affinitycnt = 1;
271 
272 	/*
273 	 * Registering a thread in the callback table is usually
274 	 * done in the initialization code of the thread. In this
275 	 * case, we do it right after thread creation to avoid
276 	 * blocking idle thread while registering itself. It also
277 	 * avoids the possibility of reregistration in case a CPU
278 	 * restarts its idle thread.
279 	 */
280 	CALLB_CPR_INIT_SAFE(tp, "idle");
281 
282 	/*
283 	 * Create the thread_reaper daemon. From this point on, exited
284 	 * threads will get reaped.
285 	 */
286 	(void) thread_create(NULL, 0, (void (*)())thread_reaper,
287 	    NULL, 0, &p0, TS_RUN, minclsyspri);
288 
289 	/*
290 	 * Finish initializing the kernel memory allocator now that
291 	 * thread_create() is available.
292 	 */
293 	kmem_thread_init();
294 
295 	if (boothowto & RB_DEBUG)
296 		kdi_dvec_thravail();
297 }
298 
299 /*
300  * Create a thread.
301  *
302  * thread_create() blocks for memory if necessary.  It never fails.
303  *
304  * If stk is NULL, the thread is created at the base of the stack
305  * and cannot be swapped.
306  */
307 kthread_t *
308 thread_create(
309 	caddr_t	stk,
310 	size_t	stksize,
311 	void	(*proc)(),
312 	void	*arg,
313 	size_t	len,
314 	proc_t	 *pp,
315 	int	state,
316 	pri_t	pri)
317 {
318 	kthread_t *t;
319 	extern struct classfuncs sys_classfuncs;
320 	turnstile_t *ts;
321 
322 	/*
323 	 * Every thread keeps a turnstile around in case it needs to block.
324 	 * The only reason the turnstile is not simply part of the thread
325 	 * structure is that we may have to break the association whenever
326 	 * more than one thread blocks on a given synchronization object.
327 	 * From a memory-management standpoint, turnstiles are like the
328 	 * "attached mblks" that hang off dblks in the streams allocator.
329 	 */
330 	ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP);
331 
332 	if (stk == NULL) {
333 		/*
334 		 * alloc both thread and stack in segkp chunk
335 		 */
336 
337 		if (stksize < default_stksize)
338 			stksize = default_stksize;
339 
340 		if (stksize == default_stksize) {
341 			stk = (caddr_t)segkp_cache_get(segkp_thread);
342 		} else {
343 			stksize = roundup(stksize, PAGESIZE);
344 			stk = (caddr_t)segkp_get(segkp, stksize,
345 			    (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED));
346 		}
347 
348 		ASSERT(stk != NULL);
349 
350 		/*
351 		 * The machine-dependent mutex code may require that
352 		 * thread pointers (since they may be used for mutex owner
353 		 * fields) have certain alignment requirements.
354 		 * PTR24_ALIGN is the size of the alignment quanta.
355 		 * XXX - assumes stack grows toward low addresses.
356 		 */
357 		if (stksize <= sizeof (kthread_t) + PTR24_ALIGN)
358 			cmn_err(CE_PANIC, "thread_create: proposed stack size"
359 			    " too small to hold thread.");
360 #ifdef STACK_GROWTH_DOWN
361 		stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1);
362 		stksize &= -PTR24_ALIGN;	/* make thread aligned */
363 		t = (kthread_t *)(stk + stksize);
364 		bzero(t, sizeof (kthread_t));
365 		if (audit_active)
366 			audit_thread_create(t);
367 		t->t_stk = stk + stksize;
368 		t->t_stkbase = stk;
369 #else	/* stack grows to larger addresses */
370 		stksize -= SA(sizeof (kthread_t));
371 		t = (kthread_t *)(stk);
372 		bzero(t, sizeof (kthread_t));
373 		t->t_stk = stk + sizeof (kthread_t);
374 		t->t_stkbase = stk + stksize + sizeof (kthread_t);
375 #endif	/* STACK_GROWTH_DOWN */
376 		t->t_flag |= T_TALLOCSTK;
377 		t->t_swap = stk;
378 	} else {
379 		t = kmem_cache_alloc(thread_cache, KM_SLEEP);
380 		bzero(t, sizeof (kthread_t));
381 		ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0);
382 		if (audit_active)
383 			audit_thread_create(t);
384 		/*
385 		 * Initialize t_stk to the kernel stack pointer to use
386 		 * upon entry to the kernel
387 		 */
388 #ifdef STACK_GROWTH_DOWN
389 		t->t_stk = stk + stksize;
390 		t->t_stkbase = stk;
391 #else
392 		t->t_stk = stk;			/* 3b2-like */
393 		t->t_stkbase = stk + stksize;
394 #endif /* STACK_GROWTH_DOWN */
395 	}
396 
397 	/* set default stack flag */
398 	if (stksize == lwp_default_stksize)
399 		t->t_flag |= T_DFLTSTK;
400 
401 	t->t_ts = ts;
402 
403 	/*
404 	 * p_cred could be NULL if it thread_create is called before cred_init
405 	 * is called in main.
406 	 */
407 	mutex_enter(&pp->p_crlock);
408 	if (pp->p_cred)
409 		crhold(t->t_cred = pp->p_cred);
410 	mutex_exit(&pp->p_crlock);
411 	t->t_start = gethrestime_sec();
412 	t->t_startpc = proc;
413 	t->t_procp = pp;
414 	t->t_clfuncs = &sys_classfuncs.thread;
415 	t->t_cid = syscid;
416 	t->t_pri = pri;
417 	t->t_stime = lbolt;
418 	t->t_schedflag = TS_LOAD | TS_DONT_SWAP;
419 	t->t_bind_cpu = PBIND_NONE;
420 	t->t_bind_pset = PS_NONE;
421 	t->t_plockp = &pp->p_lock;
422 	t->t_copyops = NULL;
423 	t->t_taskq = NULL;
424 	t->t_anttime = 0;
425 	t->t_hatdepth = 0;
426 
427 	t->t_dtrace_vtime = 1;	/* assure vtimestamp is always non-zero */
428 
429 	CPU_STATS_ADDQ(CPU, sys, nthreads, 1);
430 #ifndef NPROBE
431 	/* Kernel probe */
432 	tnf_thread_create(t);
433 #endif /* NPROBE */
434 	LOCK_INIT_CLEAR(&t->t_lock);
435 
436 	/*
437 	 * Callers who give us a NULL proc must do their own
438 	 * stack initialization.  e.g. lwp_create()
439 	 */
440 	if (proc != NULL) {
441 		t->t_stk = thread_stk_init(t->t_stk);
442 		thread_load(t, proc, arg, len);
443 	}
444 
445 	/*
446 	 * Put a hold on project0. If this thread is actually in a
447 	 * different project, then t_proj will be changed later in
448 	 * lwp_create().  All kernel-only threads must be in project 0.
449 	 */
450 	t->t_proj = project_hold(proj0p);
451 
452 	lgrp_affinity_init(&t->t_lgrp_affinity);
453 
454 	mutex_enter(&pidlock);
455 	nthread++;
456 	t->t_did = next_t_id++;
457 	t->t_prev = curthread->t_prev;
458 	t->t_next = curthread;
459 
460 	/*
461 	 * Add the thread to the list of all threads, and initialize
462 	 * its t_cpu pointer.  We need to block preemption since
463 	 * cpu_offline walks the thread list looking for threads
464 	 * with t_cpu pointing to the CPU being offlined.  We want
465 	 * to make sure that the list is consistent and that if t_cpu
466 	 * is set, the thread is on the list.
467 	 */
468 	kpreempt_disable();
469 	curthread->t_prev->t_next = t;
470 	curthread->t_prev = t;
471 
472 	/*
473 	 * Threads should never have a NULL t_cpu pointer so assign it
474 	 * here.  If the thread is being created with state TS_RUN a
475 	 * better CPU may be chosen when it is placed on the run queue.
476 	 *
477 	 * We need to keep kernel preemption disabled when setting all
478 	 * three fields to keep them in sync.  Also, always create in
479 	 * the default partition since that's where kernel threads go
480 	 * (if this isn't a kernel thread, t_cpupart will be changed
481 	 * in lwp_create before setting the thread runnable).
482 	 */
483 	t->t_cpupart = &cp_default;
484 
485 	/*
486 	 * For now, affiliate this thread with the root lgroup.
487 	 * Since the kernel does not (presently) allocate its memory
488 	 * in a locality aware fashion, the root is an appropriate home.
489 	 * If this thread is later associated with an lwp, it will have
490 	 * it's lgroup re-assigned at that time.
491 	 */
492 	lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
493 
494 	/*
495 	 * Inherit the current cpu.  If this cpu isn't part of the chosen
496 	 * lgroup, a new cpu will be chosen by cpu_choose when the thread
497 	 * is ready to run.
498 	 */
499 	if (CPU->cpu_part == &cp_default)
500 		t->t_cpu = CPU;
501 	else
502 		t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl,
503 		    t->t_pri, NULL);
504 
505 	t->t_disp_queue = t->t_cpu->cpu_disp;
506 	kpreempt_enable();
507 
508 	/*
509 	 * Initialize thread state and the dispatcher lock pointer.
510 	 * Need to hold onto pidlock to block allthreads walkers until
511 	 * the state is set.
512 	 */
513 	switch (state) {
514 	case TS_RUN:
515 		curthread->t_oldspl = splhigh();	/* get dispatcher spl */
516 		THREAD_SET_STATE(t, TS_STOPPED, &transition_lock);
517 		CL_SETRUN(t);
518 		thread_unlock(t);
519 		break;
520 
521 	case TS_ONPROC:
522 		THREAD_ONPROC(t, t->t_cpu);
523 		break;
524 
525 	case TS_FREE:
526 		/*
527 		 * Free state will be used for intr threads.
528 		 * The interrupt routine must set the thread dispatcher
529 		 * lock pointer (t_lockp) if starting on a CPU
530 		 * other than the current one.
531 		 */
532 		THREAD_FREEINTR(t, CPU);
533 		break;
534 
535 	case TS_STOPPED:
536 		THREAD_SET_STATE(t, TS_STOPPED, &stop_lock);
537 		break;
538 
539 	default:			/* TS_SLEEP, TS_ZOMB or TS_TRANS */
540 		cmn_err(CE_PANIC, "thread_create: invalid state %d", state);
541 	}
542 	mutex_exit(&pidlock);
543 	return (t);
544 }
545 
546 /*
547  * Move thread to project0 and take care of project reference counters.
548  */
549 void
550 thread_rele(kthread_t *t)
551 {
552 	kproject_t *kpj;
553 
554 	thread_lock(t);
555 
556 	ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0);
557 	kpj = ttoproj(t);
558 	t->t_proj = proj0p;
559 
560 	thread_unlock(t);
561 
562 	if (kpj != proj0p) {
563 		project_rele(kpj);
564 		(void) project_hold(proj0p);
565 	}
566 }
567 
568 void
569 thread_exit(void)
570 {
571 	kthread_t *t = curthread;
572 
573 	if ((t->t_proc_flag & TP_ZTHREAD) != 0)
574 		cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called");
575 
576 	tsd_exit();		/* Clean up this thread's TSD */
577 
578 	kcpc_passivate();	/* clean up performance counter state */
579 
580 	/*
581 	 * No kernel thread should have called poll() without arranging
582 	 * calling pollcleanup() here.
583 	 */
584 	ASSERT(t->t_pollstate == NULL);
585 	ASSERT(t->t_schedctl == NULL);
586 	if (t->t_door)
587 		door_slam();	/* in case thread did an upcall */
588 
589 #ifndef NPROBE
590 	/* Kernel probe */
591 	if (t->t_tnf_tpdp)
592 		tnf_thread_exit();
593 #endif /* NPROBE */
594 
595 	thread_rele(t);
596 	t->t_preempt++;
597 
598 	/*
599 	 * remove thread from the all threads list so that
600 	 * death-row can use the same pointers.
601 	 */
602 	mutex_enter(&pidlock);
603 	t->t_next->t_prev = t->t_prev;
604 	t->t_prev->t_next = t->t_next;
605 	ASSERT(allthreads != t);	/* t0 never exits */
606 	cv_broadcast(&t->t_joincv);	/* wake up anyone in thread_join */
607 	mutex_exit(&pidlock);
608 
609 	if (t->t_ctx != NULL)
610 		exitctx(t);
611 	if (t->t_procp->p_pctx != NULL)
612 		exitpctx(t->t_procp);
613 
614 	t->t_state = TS_ZOMB;	/* set zombie thread */
615 
616 	swtch_from_zombie();	/* give up the CPU */
617 	/* NOTREACHED */
618 }
619 
620 /*
621  * Check to see if the specified thread is active (defined as being on
622  * the thread list).  This is certainly a slow way to do this; if there's
623  * ever a reason to speed it up, we could maintain a hash table of active
624  * threads indexed by their t_did.
625  */
626 static kthread_t *
627 did_to_thread(kt_did_t tid)
628 {
629 	kthread_t *t;
630 
631 	ASSERT(MUTEX_HELD(&pidlock));
632 	for (t = curthread->t_next; t != curthread; t = t->t_next) {
633 		if (t->t_did == tid)
634 			break;
635 	}
636 	if (t->t_did == tid)
637 		return (t);
638 	else
639 		return (NULL);
640 }
641 
642 /*
643  * Wait for specified thread to exit.  Returns immediately if the thread
644  * could not be found, meaning that it has either already exited or never
645  * existed.
646  */
647 void
648 thread_join(kt_did_t tid)
649 {
650 	kthread_t *t;
651 
652 	ASSERT(tid != curthread->t_did);
653 	ASSERT(tid != t0.t_did);
654 
655 	mutex_enter(&pidlock);
656 	/*
657 	 * Make sure we check that the thread is on the thread list
658 	 * before blocking on it; otherwise we could end up blocking on
659 	 * a cv that's already been freed.  In other words, don't cache
660 	 * the thread pointer across calls to cv_wait.
661 	 *
662 	 * The choice of loop invariant means that whenever a thread
663 	 * is taken off the allthreads list, a cv_broadcast must be
664 	 * performed on that thread's t_joincv to wake up any waiters.
665 	 * The broadcast doesn't have to happen right away, but it
666 	 * shouldn't be postponed indefinitely (e.g., by doing it in
667 	 * thread_free which may only be executed when the deathrow
668 	 * queue is processed.
669 	 */
670 	while (t = did_to_thread(tid))
671 		cv_wait(&t->t_joincv, &pidlock);
672 	mutex_exit(&pidlock);
673 }
674 
675 void
676 thread_free_prevent(kthread_t *t)
677 {
678 	kmutex_t *lp;
679 
680 	lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
681 	mutex_enter(lp);
682 }
683 
684 void
685 thread_free_allow(kthread_t *t)
686 {
687 	kmutex_t *lp;
688 
689 	lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
690 	mutex_exit(lp);
691 }
692 
693 static void
694 thread_free_barrier(kthread_t *t)
695 {
696 	kmutex_t *lp;
697 
698 	lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock;
699 	mutex_enter(lp);
700 	mutex_exit(lp);
701 }
702 
703 void
704 thread_free(kthread_t *t)
705 {
706 	ASSERT(t != &t0 && t->t_state == TS_FREE);
707 	ASSERT(t->t_door == NULL);
708 	ASSERT(t->t_schedctl == NULL);
709 	ASSERT(t->t_pollstate == NULL);
710 
711 	t->t_pri = 0;
712 	t->t_pc = 0;
713 	t->t_sp = 0;
714 	t->t_wchan0 = NULL;
715 	t->t_wchan = NULL;
716 	if (t->t_cred != NULL) {
717 		crfree(t->t_cred);
718 		t->t_cred = 0;
719 	}
720 	if (t->t_pdmsg) {
721 		kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1);
722 		t->t_pdmsg = NULL;
723 	}
724 	if (audit_active)
725 		audit_thread_free(t);
726 #ifndef NPROBE
727 	if (t->t_tnf_tpdp)
728 		tnf_thread_free(t);
729 #endif /* NPROBE */
730 	if (t->t_cldata) {
731 		CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata);
732 	}
733 	if (t->t_rprof != NULL) {
734 		kmem_free(t->t_rprof, sizeof (*t->t_rprof));
735 		t->t_rprof = NULL;
736 	}
737 	t->t_lockp = NULL;	/* nothing should try to lock this thread now */
738 	if (t->t_lwp)
739 		lwp_freeregs(t->t_lwp, 0);
740 	if (t->t_ctx)
741 		freectx(t, 0);
742 	t->t_stk = NULL;
743 	if (t->t_lwp)
744 		lwp_stk_fini(t->t_lwp);
745 	lock_clear(&t->t_lock);
746 
747 	if (t->t_ts->ts_waiters > 0)
748 		panic("thread_free: turnstile still active");
749 
750 	kmem_cache_free(turnstile_cache, t->t_ts);
751 
752 	free_afd(&t->t_activefd);
753 
754 	/*
755 	 * Barrier for the tick accounting code.  The tick accounting code
756 	 * holds this lock to keep the thread from going away while it's
757 	 * looking at it.
758 	 */
759 	thread_free_barrier(t);
760 
761 	ASSERT(ttoproj(t) == proj0p);
762 	project_rele(ttoproj(t));
763 
764 	lgrp_affinity_free(&t->t_lgrp_affinity);
765 
766 	/*
767 	 * Free thread struct and its stack.
768 	 */
769 	if (t->t_flag & T_TALLOCSTK) {
770 		/* thread struct is embedded in stack */
771 		segkp_release(segkp, t->t_swap);
772 		mutex_enter(&pidlock);
773 		nthread--;
774 		mutex_exit(&pidlock);
775 	} else {
776 		if (t->t_swap) {
777 			segkp_release(segkp, t->t_swap);
778 			t->t_swap = NULL;
779 		}
780 		if (t->t_lwp) {
781 			kmem_cache_free(lwp_cache, t->t_lwp);
782 			t->t_lwp = NULL;
783 		}
784 		mutex_enter(&pidlock);
785 		nthread--;
786 		mutex_exit(&pidlock);
787 		kmem_cache_free(thread_cache, t);
788 	}
789 }
790 
791 /*
792  * Removes threads associated with the given zone from a deathrow queue.
793  * tp is a pointer to the head of the deathrow queue, and countp is a
794  * pointer to the current deathrow count.  Returns a linked list of
795  * threads removed from the list.
796  */
797 static kthread_t *
798 thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid)
799 {
800 	kthread_t *tmp, *list = NULL;
801 	cred_t *cr;
802 
803 	ASSERT(MUTEX_HELD(&reaplock));
804 	while (*tp != NULL) {
805 		if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) {
806 			tmp = *tp;
807 			*tp = tmp->t_forw;
808 			tmp->t_forw = list;
809 			list = tmp;
810 			(*countp)--;
811 		} else {
812 			tp = &(*tp)->t_forw;
813 		}
814 	}
815 	return (list);
816 }
817 
818 static void
819 thread_reap_list(kthread_t *t)
820 {
821 	kthread_t *next;
822 
823 	while (t != NULL) {
824 		next = t->t_forw;
825 		thread_free(t);
826 		t = next;
827 	}
828 }
829 
830 /* ARGSUSED */
831 static void
832 thread_zone_destroy(zoneid_t zoneid, void *unused)
833 {
834 	kthread_t *t, *l;
835 
836 	mutex_enter(&reaplock);
837 	/*
838 	 * Pull threads and lwps associated with zone off deathrow lists.
839 	 */
840 	t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid);
841 	l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid);
842 	mutex_exit(&reaplock);
843 
844 	/*
845 	 * Guard against race condition in mutex_owner_running:
846 	 * 	thread=owner(mutex)
847 	 * 	<interrupt>
848 	 * 				thread exits mutex
849 	 * 				thread exits
850 	 * 				thread reaped
851 	 * 				thread struct freed
852 	 * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE.
853 	 * A cross call to all cpus will cause the interrupt handler
854 	 * to reset the PC if it is in mutex_owner_running, refreshing
855 	 * stale thread pointers.
856 	 */
857 	mutex_sync();   /* sync with mutex code */
858 
859 	/*
860 	 * Reap threads
861 	 */
862 	thread_reap_list(t);
863 
864 	/*
865 	 * Reap lwps
866 	 */
867 	thread_reap_list(l);
868 }
869 
870 /*
871  * cleanup zombie threads that are on deathrow.
872  */
873 void
874 thread_reaper()
875 {
876 	kthread_t *t, *l;
877 	callb_cpr_t cprinfo;
878 
879 	/*
880 	 * Register callback to clean up threads when zone is destroyed.
881 	 */
882 	zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy);
883 
884 	CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper");
885 	for (;;) {
886 		mutex_enter(&reaplock);
887 		while (thread_deathrow == NULL && lwp_deathrow == NULL) {
888 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
889 			cv_wait(&reaper_cv, &reaplock);
890 			CALLB_CPR_SAFE_END(&cprinfo, &reaplock);
891 		}
892 		/*
893 		 * mutex_sync() needs to be called when reaping, but
894 		 * not too often.  We limit reaping rate to once
895 		 * per second.  Reaplimit is max rate at which threads can
896 		 * be freed. Does not impact thread destruction/creation.
897 		 */
898 		t = thread_deathrow;
899 		l = lwp_deathrow;
900 		thread_deathrow = NULL;
901 		lwp_deathrow = NULL;
902 		thread_reapcnt = 0;
903 		lwp_reapcnt = 0;
904 		mutex_exit(&reaplock);
905 
906 		/*
907 		 * Guard against race condition in mutex_owner_running:
908 		 * 	thread=owner(mutex)
909 		 * 	<interrupt>
910 		 * 				thread exits mutex
911 		 * 				thread exits
912 		 * 				thread reaped
913 		 * 				thread struct freed
914 		 * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE.
915 		 * A cross call to all cpus will cause the interrupt handler
916 		 * to reset the PC if it is in mutex_owner_running, refreshing
917 		 * stale thread pointers.
918 		 */
919 		mutex_sync();   /* sync with mutex code */
920 		/*
921 		 * Reap threads
922 		 */
923 		thread_reap_list(t);
924 
925 		/*
926 		 * Reap lwps
927 		 */
928 		thread_reap_list(l);
929 		delay(hz);
930 	}
931 }
932 
933 /*
934  * This is called by lwpcreate, etc.() to put a lwp_deathrow thread onto
935  * thread_deathrow. The thread's state is changed already TS_FREE to indicate
936  * that is reapable. The thread already holds the reaplock, and was already
937  * freed.
938  */
939 void
940 reapq_move_lq_to_tq(kthread_t *t)
941 {
942 	ASSERT(t->t_state == TS_FREE);
943 	ASSERT(MUTEX_HELD(&reaplock));
944 	t->t_forw = thread_deathrow;
945 	thread_deathrow = t;
946 	thread_reapcnt++;
947 	if (lwp_reapcnt + thread_reapcnt > reaplimit)
948 		cv_signal(&reaper_cv);  /* wake the reaper */
949 }
950 
951 /*
952  * This is called by resume() to put a zombie thread onto deathrow.
953  * The thread's state is changed to TS_FREE to indicate that is reapable.
954  * This is called from the idle thread so it must not block - just spin.
955  */
956 void
957 reapq_add(kthread_t *t)
958 {
959 	mutex_enter(&reaplock);
960 
961 	/*
962 	 * lwp_deathrow contains only threads with lwp linkage
963 	 * that are of the default stacksize. Anything else goes
964 	 * on thread_deathrow.
965 	 */
966 	if (ttolwp(t) && (t->t_flag & T_DFLTSTK)) {
967 		t->t_forw = lwp_deathrow;
968 		lwp_deathrow = t;
969 		lwp_reapcnt++;
970 	} else {
971 		t->t_forw = thread_deathrow;
972 		thread_deathrow = t;
973 		thread_reapcnt++;
974 	}
975 	if (lwp_reapcnt + thread_reapcnt > reaplimit)
976 		cv_signal(&reaper_cv);	/* wake the reaper */
977 	t->t_state = TS_FREE;
978 	lock_clear(&t->t_lock);
979 
980 	/*
981 	 * Before we return, we need to grab and drop the thread lock for
982 	 * the dead thread.  At this point, the current thread is the idle
983 	 * thread, and the dead thread's CPU lock points to the current
984 	 * CPU -- and we must grab and drop the lock to synchronize with
985 	 * a racing thread walking a blocking chain that the zombie thread
986 	 * was recently in.  By this point, that blocking chain is (by
987 	 * definition) stale:  the dead thread is not holding any locks, and
988 	 * is therefore not in any blocking chains -- but if we do not regrab
989 	 * our lock before freeing the dead thread's data structures, the
990 	 * thread walking the (stale) blocking chain will die on memory
991 	 * corruption when it attempts to drop the dead thread's lock.  We
992 	 * only need do this once because there is no way for the dead thread
993 	 * to ever again be on a blocking chain:  once we have grabbed and
994 	 * dropped the thread lock, we are guaranteed that anyone that could
995 	 * have seen this thread in a blocking chain can no longer see it.
996 	 */
997 	thread_lock(t);
998 	thread_unlock(t);
999 
1000 	mutex_exit(&reaplock);
1001 }
1002 
1003 /*
1004  * Install thread context ops for the current thread.
1005  */
1006 void
1007 installctx(
1008 	kthread_t *t,
1009 	void	*arg,
1010 	void	(*save)(void *),
1011 	void	(*restore)(void *),
1012 	void	(*fork)(void *, void *),
1013 	void	(*lwp_create)(void *, void *),
1014 	void	(*exit)(void *),
1015 	void	(*free)(void *, int))
1016 {
1017 	struct ctxop *ctx;
1018 
1019 	ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP);
1020 	ctx->save_op = save;
1021 	ctx->restore_op = restore;
1022 	ctx->fork_op = fork;
1023 	ctx->lwp_create_op = lwp_create;
1024 	ctx->exit_op = exit;
1025 	ctx->free_op = free;
1026 	ctx->arg = arg;
1027 	ctx->next = t->t_ctx;
1028 	t->t_ctx = ctx;
1029 }
1030 
1031 /*
1032  * Remove the thread context ops from a thread.
1033  */
1034 int
1035 removectx(
1036 	kthread_t *t,
1037 	void	*arg,
1038 	void	(*save)(void *),
1039 	void	(*restore)(void *),
1040 	void	(*fork)(void *, void *),
1041 	void	(*lwp_create)(void *, void *),
1042 	void	(*exit)(void *),
1043 	void	(*free)(void *, int))
1044 {
1045 	struct ctxop *ctx, *prev_ctx;
1046 
1047 	/*
1048 	 * The incoming kthread_t (which is the thread for which the
1049 	 * context ops will be removed) should be one of the following:
1050 	 *
1051 	 * a) the current thread,
1052 	 *
1053 	 * b) a thread of a process that's being forked (SIDL),
1054 	 *
1055 	 * c) a thread that belongs to the same process as the current
1056 	 *    thread and for which the current thread is the agent thread,
1057 	 *
1058 	 * d) a thread that is TS_STOPPED which is indicative of it
1059 	 *    being (if curthread is not an agent) a thread being created
1060 	 *    as part of an lwp creation.
1061 	 */
1062 	ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL ||
1063 	    ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1064 
1065 	/*
1066 	 * Serialize modifications to t->t_ctx to prevent the agent thread
1067 	 * and the target thread from racing with each other during lwp exit.
1068 	 */
1069 	mutex_enter(&t->t_ctx_lock);
1070 	prev_ctx = NULL;
1071 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) {
1072 		if (ctx->save_op == save && ctx->restore_op == restore &&
1073 		    ctx->fork_op == fork && ctx->lwp_create_op == lwp_create &&
1074 		    ctx->exit_op == exit && ctx->free_op == free &&
1075 		    ctx->arg == arg) {
1076 			if (prev_ctx)
1077 				prev_ctx->next = ctx->next;
1078 			else
1079 				t->t_ctx = ctx->next;
1080 			mutex_exit(&t->t_ctx_lock);
1081 			if (ctx->free_op != NULL)
1082 				(ctx->free_op)(ctx->arg, 0);
1083 			kmem_free(ctx, sizeof (struct ctxop));
1084 			return (1);
1085 		}
1086 		prev_ctx = ctx;
1087 	}
1088 	mutex_exit(&t->t_ctx_lock);
1089 
1090 	return (0);
1091 }
1092 
1093 void
1094 savectx(kthread_t *t)
1095 {
1096 	struct ctxop *ctx;
1097 
1098 	ASSERT(t == curthread);
1099 	for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
1100 		if (ctx->save_op != NULL)
1101 			(ctx->save_op)(ctx->arg);
1102 }
1103 
1104 void
1105 restorectx(kthread_t *t)
1106 {
1107 	struct ctxop *ctx;
1108 
1109 	ASSERT(t == curthread);
1110 	for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next)
1111 		if (ctx->restore_op != NULL)
1112 			(ctx->restore_op)(ctx->arg);
1113 }
1114 
1115 void
1116 forkctx(kthread_t *t, kthread_t *ct)
1117 {
1118 	struct ctxop *ctx;
1119 
1120 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1121 		if (ctx->fork_op != NULL)
1122 			(ctx->fork_op)(t, ct);
1123 }
1124 
1125 /*
1126  * Note that this operator is only invoked via the _lwp_create
1127  * system call.  The system may have other reasons to create lwps
1128  * e.g. the agent lwp or the doors unreferenced lwp.
1129  */
1130 void
1131 lwp_createctx(kthread_t *t, kthread_t *ct)
1132 {
1133 	struct ctxop *ctx;
1134 
1135 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1136 		if (ctx->lwp_create_op != NULL)
1137 			(ctx->lwp_create_op)(t, ct);
1138 }
1139 
1140 /*
1141  * exitctx is called from thread_exit() and lwp_exit() to perform any actions
1142  * needed when the thread/LWP leaves the processor for the last time. This
1143  * routine is not intended to deal with freeing memory; freectx() is used for
1144  * that purpose during thread_free(). This routine is provided to allow for
1145  * clean-up that can't wait until thread_free().
1146  */
1147 void
1148 exitctx(kthread_t *t)
1149 {
1150 	struct ctxop *ctx;
1151 
1152 	for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next)
1153 		if (ctx->exit_op != NULL)
1154 			(ctx->exit_op)(t);
1155 }
1156 
1157 /*
1158  * freectx is called from thread_free() and exec() to get
1159  * rid of old thread context ops.
1160  */
1161 void
1162 freectx(kthread_t *t, int isexec)
1163 {
1164 	struct ctxop *ctx;
1165 
1166 	while ((ctx = t->t_ctx) != NULL) {
1167 		t->t_ctx = ctx->next;
1168 		if (ctx->free_op != NULL)
1169 			(ctx->free_op)(ctx->arg, isexec);
1170 		kmem_free(ctx, sizeof (struct ctxop));
1171 	}
1172 }
1173 
1174 /*
1175  * freectx_ctx is called from lwp_create() when lwp is reused from
1176  * lwp_deathrow and its thread structure is added to thread_deathrow.
1177  * The thread structure to which this ctx was attached may be already
1178  * freed by the thread reaper so free_op implementations shouldn't rely
1179  * on thread structure to which this ctx was attached still being around.
1180  */
1181 void
1182 freectx_ctx(struct ctxop *ctx)
1183 {
1184 	struct ctxop *nctx;
1185 
1186 	ASSERT(ctx != NULL);
1187 
1188 	do {
1189 		nctx = ctx->next;
1190 		if (ctx->free_op != NULL)
1191 			(ctx->free_op)(ctx->arg, 0);
1192 		kmem_free(ctx, sizeof (struct ctxop));
1193 	} while ((ctx = nctx) != NULL);
1194 }
1195 
1196 /*
1197  * Set the thread running; arrange for it to be swapped in if necessary.
1198  */
1199 void
1200 setrun_locked(kthread_t *t)
1201 {
1202 	ASSERT(THREAD_LOCK_HELD(t));
1203 	if (t->t_state == TS_SLEEP) {
1204 		/*
1205 		 * Take off sleep queue.
1206 		 */
1207 		SOBJ_UNSLEEP(t->t_sobj_ops, t);
1208 	} else if (t->t_state & (TS_RUN | TS_ONPROC)) {
1209 		/*
1210 		 * Already on dispatcher queue.
1211 		 */
1212 		return;
1213 	} else if (t->t_state == TS_WAIT) {
1214 		waitq_setrun(t);
1215 	} else if (t->t_state == TS_STOPPED) {
1216 		/*
1217 		 * All of the sending of SIGCONT (TC_XSTART) and /proc
1218 		 * (TC_PSTART) and lwp_continue() (TC_CSTART) must have
1219 		 * requested that the thread be run.
1220 		 * Just calling setrun() is not sufficient to set a stopped
1221 		 * thread running.  TP_TXSTART is always set if the thread
1222 		 * is not stopped by a jobcontrol stop signal.
1223 		 * TP_TPSTART is always set if /proc is not controlling it.
1224 		 * TP_TCSTART is always set if lwp_suspend() didn't stop it.
1225 		 * The thread won't be stopped unless one of these
1226 		 * three mechanisms did it.
1227 		 *
1228 		 * These flags must be set before calling setrun_locked(t).
1229 		 * They can't be passed as arguments because the streams
1230 		 * code calls setrun() indirectly and the mechanism for
1231 		 * doing so admits only one argument.  Note that the
1232 		 * thread must be locked in order to change t_schedflags.
1233 		 */
1234 		if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART)
1235 			return;
1236 		/*
1237 		 * Process is no longer stopped (a thread is running).
1238 		 */
1239 		t->t_whystop = 0;
1240 		t->t_whatstop = 0;
1241 		/*
1242 		 * Strictly speaking, we do not have to clear these
1243 		 * flags here; they are cleared on entry to stop().
1244 		 * However, they are confusing when doing kernel
1245 		 * debugging or when they are revealed by ps(1).
1246 		 */
1247 		t->t_schedflag &= ~TS_ALLSTART;
1248 		THREAD_TRANSITION(t);	/* drop stopped-thread lock */
1249 		ASSERT(t->t_lockp == &transition_lock);
1250 		ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL);
1251 		/*
1252 		 * Let the class put the process on the dispatcher queue.
1253 		 */
1254 		CL_SETRUN(t);
1255 	}
1256 }
1257 
1258 void
1259 setrun(kthread_t *t)
1260 {
1261 	thread_lock(t);
1262 	setrun_locked(t);
1263 	thread_unlock(t);
1264 }
1265 
1266 /*
1267  * Unpin an interrupted thread.
1268  *	When an interrupt occurs, the interrupt is handled on the stack
1269  *	of an interrupt thread, taken from a pool linked to the CPU structure.
1270  *
1271  *	When swtch() is switching away from an interrupt thread because it
1272  *	blocked or was preempted, this routine is called to complete the
1273  *	saving of the interrupted thread state, and returns the interrupted
1274  *	thread pointer so it may be resumed.
1275  *
1276  *	Called by swtch() only at high spl.
1277  */
1278 kthread_t *
1279 thread_unpin()
1280 {
1281 	kthread_t	*t = curthread;	/* current thread */
1282 	kthread_t	*itp;		/* interrupted thread */
1283 	int		i;		/* interrupt level */
1284 	extern int	intr_passivate();
1285 
1286 	ASSERT(t->t_intr != NULL);
1287 
1288 	itp = t->t_intr;		/* interrupted thread */
1289 	t->t_intr = NULL;		/* clear interrupt ptr */
1290 
1291 	/*
1292 	 * Get state from interrupt thread for the one
1293 	 * it interrupted.
1294 	 */
1295 
1296 	i = intr_passivate(t, itp);
1297 
1298 	TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1299 	    "intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1300 	    i, t, t, itp, itp);
1301 
1302 	/*
1303 	 * Dissociate the current thread from the interrupted thread's LWP.
1304 	 */
1305 	t->t_lwp = NULL;
1306 
1307 	/*
1308 	 * Interrupt handlers above the level that spinlocks block must
1309 	 * not block.
1310 	 */
1311 #if DEBUG
1312 	if (i < 0 || i > LOCK_LEVEL)
1313 		cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i);
1314 #endif
1315 
1316 	/*
1317 	 * Compute the CPU's base interrupt level based on the active
1318 	 * interrupts.
1319 	 */
1320 	ASSERT(CPU->cpu_intr_actv & (1 << i));
1321 	set_base_spl();
1322 
1323 	return (itp);
1324 }
1325 
1326 /*
1327  * Create and initialize an interrupt thread.
1328  *	Returns non-zero on error.
1329  *	Called at spl7() or better.
1330  */
1331 void
1332 thread_create_intr(struct cpu *cp)
1333 {
1334 	kthread_t *tp;
1335 
1336 	tp = thread_create(NULL, 0,
1337 	    (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0);
1338 
1339 	/*
1340 	 * Set the thread in the TS_FREE state.  The state will change
1341 	 * to TS_ONPROC only while the interrupt is active.  Think of these
1342 	 * as being on a private free list for the CPU.  Being TS_FREE keeps
1343 	 * inactive interrupt threads out of debugger thread lists.
1344 	 *
1345 	 * We cannot call thread_create with TS_FREE because of the current
1346 	 * checks there for ONPROC.  Fix this when thread_create takes flags.
1347 	 */
1348 	THREAD_FREEINTR(tp, cp);
1349 
1350 	/*
1351 	 * Nobody should ever reference the credentials of an interrupt
1352 	 * thread so make it NULL to catch any such references.
1353 	 */
1354 	tp->t_cred = NULL;
1355 	tp->t_flag |= T_INTR_THREAD;
1356 	tp->t_cpu = cp;
1357 	tp->t_bound_cpu = cp;
1358 	tp->t_disp_queue = cp->cpu_disp;
1359 	tp->t_affinitycnt = 1;
1360 	tp->t_preempt = 1;
1361 
1362 	/*
1363 	 * Don't make a user-requested binding on this thread so that
1364 	 * the processor can be offlined.
1365 	 */
1366 	tp->t_bind_cpu = PBIND_NONE;	/* no USER-requested binding */
1367 	tp->t_bind_pset = PS_NONE;
1368 
1369 #if defined(__i386) || defined(__amd64)
1370 	tp->t_stk -= STACK_ALIGN;
1371 	*(tp->t_stk) = 0;		/* terminate intr thread stack */
1372 #endif
1373 
1374 	/*
1375 	 * Link onto CPU's interrupt pool.
1376 	 */
1377 	tp->t_link = cp->cpu_intr_thread;
1378 	cp->cpu_intr_thread = tp;
1379 }
1380 
1381 /*
1382  * TSD -- THREAD SPECIFIC DATA
1383  */
1384 static kmutex_t		tsd_mutex;	 /* linked list spin lock */
1385 static uint_t		tsd_nkeys;	 /* size of destructor array */
1386 /* per-key destructor funcs */
1387 static void 		(**tsd_destructor)(void *);
1388 /* list of tsd_thread's */
1389 static struct tsd_thread	*tsd_list;
1390 
1391 /*
1392  * Default destructor
1393  *	Needed because NULL destructor means that the key is unused
1394  */
1395 /* ARGSUSED */
1396 void
1397 tsd_defaultdestructor(void *value)
1398 {}
1399 
1400 /*
1401  * Create a key (index into per thread array)
1402  *	Locks out tsd_create, tsd_destroy, and tsd_exit
1403  *	May allocate memory with lock held
1404  */
1405 void
1406 tsd_create(uint_t *keyp, void (*destructor)(void *))
1407 {
1408 	int	i;
1409 	uint_t	nkeys;
1410 
1411 	/*
1412 	 * if key is allocated, do nothing
1413 	 */
1414 	mutex_enter(&tsd_mutex);
1415 	if (*keyp) {
1416 		mutex_exit(&tsd_mutex);
1417 		return;
1418 	}
1419 	/*
1420 	 * find an unused key
1421 	 */
1422 	if (destructor == NULL)
1423 		destructor = tsd_defaultdestructor;
1424 
1425 	for (i = 0; i < tsd_nkeys; ++i)
1426 		if (tsd_destructor[i] == NULL)
1427 			break;
1428 
1429 	/*
1430 	 * if no unused keys, increase the size of the destructor array
1431 	 */
1432 	if (i == tsd_nkeys) {
1433 		if ((nkeys = (tsd_nkeys << 1)) == 0)
1434 			nkeys = 1;
1435 		tsd_destructor =
1436 		    (void (**)(void *))tsd_realloc((void *)tsd_destructor,
1437 		    (size_t)(tsd_nkeys * sizeof (void (*)(void *))),
1438 		    (size_t)(nkeys * sizeof (void (*)(void *))));
1439 		tsd_nkeys = nkeys;
1440 	}
1441 
1442 	/*
1443 	 * allocate the next available unused key
1444 	 */
1445 	tsd_destructor[i] = destructor;
1446 	*keyp = i + 1;
1447 	mutex_exit(&tsd_mutex);
1448 }
1449 
1450 /*
1451  * Destroy a key -- this is for unloadable modules
1452  *
1453  * Assumes that the caller is preventing tsd_set and tsd_get
1454  * Locks out tsd_create, tsd_destroy, and tsd_exit
1455  * May free memory with lock held
1456  */
1457 void
1458 tsd_destroy(uint_t *keyp)
1459 {
1460 	uint_t key;
1461 	struct tsd_thread *tsd;
1462 
1463 	/*
1464 	 * protect the key namespace and our destructor lists
1465 	 */
1466 	mutex_enter(&tsd_mutex);
1467 	key = *keyp;
1468 	*keyp = 0;
1469 
1470 	ASSERT(key <= tsd_nkeys);
1471 
1472 	/*
1473 	 * if the key is valid
1474 	 */
1475 	if (key != 0) {
1476 		uint_t k = key - 1;
1477 		/*
1478 		 * for every thread with TSD, call key's destructor
1479 		 */
1480 		for (tsd = tsd_list; tsd; tsd = tsd->ts_next) {
1481 			/*
1482 			 * no TSD for key in this thread
1483 			 */
1484 			if (key > tsd->ts_nkeys)
1485 				continue;
1486 			/*
1487 			 * call destructor for key
1488 			 */
1489 			if (tsd->ts_value[k] && tsd_destructor[k])
1490 				(*tsd_destructor[k])(tsd->ts_value[k]);
1491 			/*
1492 			 * reset value for key
1493 			 */
1494 			tsd->ts_value[k] = NULL;
1495 		}
1496 		/*
1497 		 * actually free the key (NULL destructor == unused)
1498 		 */
1499 		tsd_destructor[k] = NULL;
1500 	}
1501 
1502 	mutex_exit(&tsd_mutex);
1503 }
1504 
1505 /*
1506  * Quickly return the per thread value that was stored with the specified key
1507  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1508  */
1509 void *
1510 tsd_get(uint_t key)
1511 {
1512 	return (tsd_agent_get(curthread, key));
1513 }
1514 
1515 /*
1516  * Set a per thread value indexed with the specified key
1517  */
1518 int
1519 tsd_set(uint_t key, void *value)
1520 {
1521 	return (tsd_agent_set(curthread, key, value));
1522 }
1523 
1524 /*
1525  * Like tsd_get(), except that the agent lwp can get the tsd of
1526  * another thread in the same process (the agent thread only runs when the
1527  * process is completely stopped by /proc), or syslwp is creating a new lwp.
1528  */
1529 void *
1530 tsd_agent_get(kthread_t *t, uint_t key)
1531 {
1532 	struct tsd_thread *tsd = t->t_tsd;
1533 
1534 	ASSERT(t == curthread ||
1535 	    ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1536 
1537 	if (key && tsd != NULL && key <= tsd->ts_nkeys)
1538 		return (tsd->ts_value[key - 1]);
1539 	return (NULL);
1540 }
1541 
1542 /*
1543  * Like tsd_set(), except that the agent lwp can set the tsd of
1544  * another thread in the same process, or syslwp can set the tsd
1545  * of a thread it's in the middle of creating.
1546  *
1547  * Assumes the caller is protecting key from tsd_create and tsd_destroy
1548  * May lock out tsd_destroy (and tsd_create), may allocate memory with
1549  * lock held
1550  */
1551 int
1552 tsd_agent_set(kthread_t *t, uint_t key, void *value)
1553 {
1554 	struct tsd_thread *tsd = t->t_tsd;
1555 
1556 	ASSERT(t == curthread ||
1557 	    ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED);
1558 
1559 	if (key == 0)
1560 		return (EINVAL);
1561 	if (tsd == NULL)
1562 		tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1563 	if (key <= tsd->ts_nkeys) {
1564 		tsd->ts_value[key - 1] = value;
1565 		return (0);
1566 	}
1567 
1568 	ASSERT(key <= tsd_nkeys);
1569 
1570 	/*
1571 	 * lock out tsd_destroy()
1572 	 */
1573 	mutex_enter(&tsd_mutex);
1574 	if (tsd->ts_nkeys == 0) {
1575 		/*
1576 		 * Link onto list of threads with TSD
1577 		 */
1578 		if ((tsd->ts_next = tsd_list) != NULL)
1579 			tsd_list->ts_prev = tsd;
1580 		tsd_list = tsd;
1581 	}
1582 
1583 	/*
1584 	 * Allocate thread local storage and set the value for key
1585 	 */
1586 	tsd->ts_value = tsd_realloc(tsd->ts_value,
1587 	    tsd->ts_nkeys * sizeof (void *),
1588 	    key * sizeof (void *));
1589 	tsd->ts_nkeys = key;
1590 	tsd->ts_value[key - 1] = value;
1591 	mutex_exit(&tsd_mutex);
1592 
1593 	return (0);
1594 }
1595 
1596 
1597 /*
1598  * Return the per thread value that was stored with the specified key
1599  *	If necessary, create the key and the value
1600  *	Assumes the caller is protecting *keyp from tsd_destroy
1601  */
1602 void *
1603 tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void))
1604 {
1605 	void *value;
1606 	uint_t key = *keyp;
1607 	struct tsd_thread *tsd = curthread->t_tsd;
1608 
1609 	if (tsd == NULL)
1610 		tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP);
1611 	if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1]))
1612 		return (value);
1613 	if (key == 0)
1614 		tsd_create(keyp, destroy);
1615 	(void) tsd_set(*keyp, value = (*allocate)());
1616 
1617 	return (value);
1618 }
1619 
1620 /*
1621  * Called from thread_exit() to run the destructor function for each tsd
1622  *	Locks out tsd_create and tsd_destroy
1623  *	Assumes that the destructor *DOES NOT* use tsd
1624  */
1625 void
1626 tsd_exit(void)
1627 {
1628 	int i;
1629 	struct tsd_thread *tsd = curthread->t_tsd;
1630 
1631 	if (tsd == NULL)
1632 		return;
1633 
1634 	if (tsd->ts_nkeys == 0) {
1635 		kmem_free(tsd, sizeof (*tsd));
1636 		curthread->t_tsd = NULL;
1637 		return;
1638 	}
1639 
1640 	/*
1641 	 * lock out tsd_create and tsd_destroy, call
1642 	 * the destructor, and mark the value as destroyed.
1643 	 */
1644 	mutex_enter(&tsd_mutex);
1645 
1646 	for (i = 0; i < tsd->ts_nkeys; i++) {
1647 		if (tsd->ts_value[i] && tsd_destructor[i])
1648 			(*tsd_destructor[i])(tsd->ts_value[i]);
1649 		tsd->ts_value[i] = NULL;
1650 	}
1651 
1652 	/*
1653 	 * remove from linked list of threads with TSD
1654 	 */
1655 	if (tsd->ts_next)
1656 		tsd->ts_next->ts_prev = tsd->ts_prev;
1657 	if (tsd->ts_prev)
1658 		tsd->ts_prev->ts_next = tsd->ts_next;
1659 	if (tsd_list == tsd)
1660 		tsd_list = tsd->ts_next;
1661 
1662 	mutex_exit(&tsd_mutex);
1663 
1664 	/*
1665 	 * free up the TSD
1666 	 */
1667 	kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *));
1668 	kmem_free(tsd, sizeof (struct tsd_thread));
1669 	curthread->t_tsd = NULL;
1670 }
1671 
1672 /*
1673  * realloc
1674  */
1675 static void *
1676 tsd_realloc(void *old, size_t osize, size_t nsize)
1677 {
1678 	void *new;
1679 
1680 	new = kmem_zalloc(nsize, KM_SLEEP);
1681 	if (old) {
1682 		bcopy(old, new, osize);
1683 		kmem_free(old, osize);
1684 	}
1685 	return (new);
1686 }
1687 
1688 /*
1689  * Check to see if an interrupt thread might be active at a given ipl.
1690  * If so return true.
1691  * We must be conservative--it is ok to give a false yes, but a false no
1692  * will cause disaster.  (But if the situation changes after we check it is
1693  * ok--the caller is trying to ensure that an interrupt routine has been
1694  * exited).
1695  * This is used when trying to remove an interrupt handler from an autovector
1696  * list in avintr.c.
1697  */
1698 int
1699 intr_active(struct cpu *cp, int level)
1700 {
1701 	if (level <= LOCK_LEVEL)
1702 		return (cp->cpu_thread != cp->cpu_dispthread);
1703 	else
1704 		return (CPU_ON_INTR(cp));
1705 }
1706 
1707 /*
1708  * Return non-zero if an interrupt is being serviced.
1709  */
1710 int
1711 servicing_interrupt()
1712 {
1713 	int onintr = 0;
1714 
1715 	/* Are we an interrupt thread */
1716 	if (curthread->t_flag & T_INTR_THREAD)
1717 		return (1);
1718 	/* Are we servicing a high level interrupt? */
1719 	if (CPU_ON_INTR(CPU)) {
1720 		kpreempt_disable();
1721 		onintr = CPU_ON_INTR(CPU);
1722 		kpreempt_enable();
1723 	}
1724 	return (onintr);
1725 }
1726 
1727 
1728 /*
1729  * Change the dispatch priority of a thread in the system.
1730  * Used when raising or lowering a thread's priority.
1731  * (E.g., priority inheritance)
1732  *
1733  * Since threads are queued according to their priority, we
1734  * we must check the thread's state to determine whether it
1735  * is on a queue somewhere. If it is, we've got to:
1736  *
1737  *	o Dequeue the thread.
1738  *	o Change its effective priority.
1739  *	o Enqueue the thread.
1740  *
1741  * Assumptions: The thread whose priority we wish to change
1742  * must be locked before we call thread_change_(e)pri().
1743  * The thread_change(e)pri() function doesn't drop the thread
1744  * lock--that must be done by its caller.
1745  */
1746 void
1747 thread_change_epri(kthread_t *t, pri_t disp_pri)
1748 {
1749 	uint_t	state;
1750 
1751 	ASSERT(THREAD_LOCK_HELD(t));
1752 
1753 	/*
1754 	 * If the inherited priority hasn't actually changed,
1755 	 * just return.
1756 	 */
1757 	if (t->t_epri == disp_pri)
1758 		return;
1759 
1760 	state = t->t_state;
1761 
1762 	/*
1763 	 * If it's not on a queue, change the priority with
1764 	 * impunity.
1765 	 */
1766 	if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) {
1767 		t->t_epri = disp_pri;
1768 
1769 		if (state == TS_ONPROC) {
1770 			cpu_t *cp = t->t_disp_queue->disp_cpu;
1771 
1772 			if (t == cp->cpu_dispthread)
1773 				cp->cpu_dispatch_pri = DISP_PRIO(t);
1774 		}
1775 		return;
1776 	}
1777 
1778 	/*
1779 	 * It's either on a sleep queue or a run queue.
1780 	 */
1781 	if (state == TS_SLEEP) {
1782 		/*
1783 		 * Take the thread out of its sleep queue.
1784 		 * Change the inherited priority.
1785 		 * Re-enqueue the thread.
1786 		 * Each synchronization object exports a function
1787 		 * to do this in an appropriate manner.
1788 		 */
1789 		SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri);
1790 	} else if (state == TS_WAIT) {
1791 		/*
1792 		 * Re-enqueue a thread on the wait queue if its
1793 		 * effective priority needs to change.
1794 		 */
1795 		if (disp_pri != t->t_epri)
1796 			waitq_change_pri(t, disp_pri);
1797 	} else {
1798 		/*
1799 		 * The thread is on a run queue.
1800 		 * Note: setbackdq() may not put the thread
1801 		 * back on the same run queue where it originally
1802 		 * resided.
1803 		 */
1804 		(void) dispdeq(t);
1805 		t->t_epri = disp_pri;
1806 		setbackdq(t);
1807 	}
1808 }	/* end of thread_change_epri */
1809 
1810 /*
1811  * Function: Change the t_pri field of a thread.
1812  * Side Effects: Adjust the thread ordering on a run queue
1813  *		 or sleep queue, if necessary.
1814  * Returns: 1 if the thread was on a run queue, else 0.
1815  */
1816 int
1817 thread_change_pri(kthread_t *t, pri_t disp_pri, int front)
1818 {
1819 	uint_t	state;
1820 	int	on_rq = 0;
1821 
1822 	ASSERT(THREAD_LOCK_HELD(t));
1823 
1824 	state = t->t_state;
1825 	THREAD_WILLCHANGE_PRI(t, disp_pri);
1826 
1827 	/*
1828 	 * If it's not on a queue, change the priority with
1829 	 * impunity.
1830 	 */
1831 	if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) {
1832 		t->t_pri = disp_pri;
1833 
1834 		if (state == TS_ONPROC) {
1835 			cpu_t *cp = t->t_disp_queue->disp_cpu;
1836 
1837 			if (t == cp->cpu_dispthread)
1838 				cp->cpu_dispatch_pri = DISP_PRIO(t);
1839 		}
1840 		return (0);
1841 	}
1842 
1843 	/*
1844 	 * It's either on a sleep queue or a run queue.
1845 	 */
1846 	if (state == TS_SLEEP) {
1847 		/*
1848 		 * If the priority has changed, take the thread out of
1849 		 * its sleep queue and change the priority.
1850 		 * Re-enqueue the thread.
1851 		 * Each synchronization object exports a function
1852 		 * to do this in an appropriate manner.
1853 		 */
1854 		if (disp_pri != t->t_pri)
1855 			SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri);
1856 	} else if (state == TS_WAIT) {
1857 		/*
1858 		 * Re-enqueue a thread on the wait queue if its
1859 		 * priority needs to change.
1860 		 */
1861 		if (disp_pri != t->t_pri)
1862 			waitq_change_pri(t, disp_pri);
1863 	} else {
1864 		/*
1865 		 * The thread is on a run queue.
1866 		 * Note: setbackdq() may not put the thread
1867 		 * back on the same run queue where it originally
1868 		 * resided.
1869 		 *
1870 		 * We still requeue the thread even if the priority
1871 		 * is unchanged to preserve round-robin (and other)
1872 		 * effects between threads of the same priority.
1873 		 */
1874 		on_rq = dispdeq(t);
1875 		ASSERT(on_rq);
1876 		t->t_pri = disp_pri;
1877 		if (front) {
1878 			setfrontdq(t);
1879 		} else {
1880 			setbackdq(t);
1881 		}
1882 	}
1883 	return (on_rq);
1884 }
1885