1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/param.h> 30 #include <sys/sysmacros.h> 31 #include <sys/signal.h> 32 #include <sys/stack.h> 33 #include <sys/pcb.h> 34 #include <sys/user.h> 35 #include <sys/systm.h> 36 #include <sys/sysinfo.h> 37 #include <sys/errno.h> 38 #include <sys/cmn_err.h> 39 #include <sys/cred.h> 40 #include <sys/resource.h> 41 #include <sys/task.h> 42 #include <sys/project.h> 43 #include <sys/proc.h> 44 #include <sys/debug.h> 45 #include <sys/disp.h> 46 #include <sys/class.h> 47 #include <vm/seg_kmem.h> 48 #include <vm/seg_kp.h> 49 #include <sys/machlock.h> 50 #include <sys/kmem.h> 51 #include <sys/varargs.h> 52 #include <sys/turnstile.h> 53 #include <sys/poll.h> 54 #include <sys/vtrace.h> 55 #include <sys/callb.h> 56 #include <c2/audit.h> 57 #include <sys/tnf.h> 58 #include <sys/sobject.h> 59 #include <sys/cpupart.h> 60 #include <sys/pset.h> 61 #include <sys/door.h> 62 #include <sys/spl.h> 63 #include <sys/copyops.h> 64 #include <sys/rctl.h> 65 #include <sys/brand.h> 66 #include <sys/pool.h> 67 #include <sys/zone.h> 68 #include <sys/tsol/label.h> 69 #include <sys/tsol/tndb.h> 70 #include <sys/cpc_impl.h> 71 #include <sys/sdt.h> 72 #include <sys/reboot.h> 73 #include <sys/kdi.h> 74 #include <sys/waitq.h> 75 #include <sys/cpucaps.h> 76 #include <sys/kiconv.h> 77 78 struct kmem_cache *thread_cache; /* cache of free threads */ 79 struct kmem_cache *lwp_cache; /* cache of free lwps */ 80 struct kmem_cache *turnstile_cache; /* cache of free turnstiles */ 81 82 /* 83 * allthreads is only for use by kmem_readers. All kernel loops can use 84 * the current thread as a start/end point. 85 */ 86 static kthread_t *allthreads = &t0; /* circular list of all threads */ 87 88 static kcondvar_t reaper_cv; /* synchronization var */ 89 kthread_t *thread_deathrow; /* circular list of reapable threads */ 90 kthread_t *lwp_deathrow; /* circular list of reapable threads */ 91 kmutex_t reaplock; /* protects lwp and thread deathrows */ 92 kmutex_t thread_free_lock; /* protects clock from reaper */ 93 int thread_reapcnt = 0; /* number of threads on deathrow */ 94 int lwp_reapcnt = 0; /* number of lwps on deathrow */ 95 int reaplimit = 16; /* delay reaping until reaplimit */ 96 97 extern int nthread; 98 99 id_t syscid; /* system scheduling class ID */ 100 void *segkp_thread; /* cookie for segkp pool */ 101 102 int lwp_cache_sz = 32; 103 int t_cache_sz = 8; 104 static kt_did_t next_t_id = 1; 105 106 /* 107 * Min/Max stack sizes for stack size parameters 108 */ 109 #define MAX_STKSIZE (32 * DEFAULTSTKSZ) 110 #define MIN_STKSIZE DEFAULTSTKSZ 111 112 /* 113 * default_stksize overrides lwp_default_stksize if it is set. 114 */ 115 int default_stksize; 116 int lwp_default_stksize; 117 118 static zone_key_t zone_thread_key; 119 120 /* 121 * forward declarations for internal thread specific data (tsd) 122 */ 123 static void *tsd_realloc(void *, size_t, size_t); 124 125 void thread_reaper(void); 126 127 /*ARGSUSED*/ 128 static int 129 turnstile_constructor(void *buf, void *cdrarg, int kmflags) 130 { 131 bzero(buf, sizeof (turnstile_t)); 132 return (0); 133 } 134 135 /*ARGSUSED*/ 136 static void 137 turnstile_destructor(void *buf, void *cdrarg) 138 { 139 turnstile_t *ts = buf; 140 141 ASSERT(ts->ts_free == NULL); 142 ASSERT(ts->ts_waiters == 0); 143 ASSERT(ts->ts_inheritor == NULL); 144 ASSERT(ts->ts_sleepq[0].sq_first == NULL); 145 ASSERT(ts->ts_sleepq[1].sq_first == NULL); 146 } 147 148 void 149 thread_init(void) 150 { 151 kthread_t *tp; 152 extern char sys_name[]; 153 extern void idle(); 154 struct cpu *cpu = CPU; 155 156 mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL)); 157 158 #if defined(__i386) || defined(__amd64) 159 thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t), 160 PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0); 161 162 /* 163 * "struct _klwp" includes a "struct pcb", which includes a 164 * "struct fpu", which needs to be 16-byte aligned on amd64 165 * (and even on i386 for fxsave/fxrstor). 166 */ 167 lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t), 168 16, NULL, NULL, NULL, NULL, NULL, 0); 169 #else 170 /* 171 * Allocate thread structures from static_arena. This prevents 172 * issues where a thread tries to relocate its own thread 173 * structure and touches it after the mapping has been suspended. 174 */ 175 thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t), 176 PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0); 177 178 lwp_stk_cache_init(); 179 180 lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t), 181 0, NULL, NULL, NULL, NULL, NULL, 0); 182 #endif 183 184 turnstile_cache = kmem_cache_create("turnstile_cache", 185 sizeof (turnstile_t), 0, 186 turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0); 187 188 label_init(); 189 cred_init(); 190 191 /* 192 * Initialize various resource management facilities. 193 */ 194 rctl_init(); 195 cpucaps_init(); 196 /* 197 * Zone_init() should be called before project_init() so that project ID 198 * for the first project is initialized correctly. 199 */ 200 zone_init(); 201 project_init(); 202 brand_init(); 203 kiconv_init(); 204 task_init(); 205 tcache_init(); 206 pool_init(); 207 208 curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP); 209 210 /* 211 * Originally, we had two parameters to set default stack 212 * size: one for lwp's (lwp_default_stksize), and one for 213 * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz). 214 * Now we have a third parameter that overrides both if it is 215 * set to a legal stack size, called default_stksize. 216 */ 217 218 if (default_stksize == 0) { 219 default_stksize = DEFAULTSTKSZ; 220 } else if (default_stksize % PAGESIZE != 0 || 221 default_stksize > MAX_STKSIZE || 222 default_stksize < MIN_STKSIZE) { 223 cmn_err(CE_WARN, "Illegal stack size. Using %d", 224 (int)DEFAULTSTKSZ); 225 default_stksize = DEFAULTSTKSZ; 226 } else { 227 lwp_default_stksize = default_stksize; 228 } 229 230 if (lwp_default_stksize == 0) { 231 lwp_default_stksize = default_stksize; 232 } else if (lwp_default_stksize % PAGESIZE != 0 || 233 lwp_default_stksize > MAX_STKSIZE || 234 lwp_default_stksize < MIN_STKSIZE) { 235 cmn_err(CE_WARN, "Illegal stack size. Using %d", 236 default_stksize); 237 lwp_default_stksize = default_stksize; 238 } 239 240 segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz, 241 lwp_default_stksize, 242 (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED)); 243 244 segkp_thread = segkp_cache_init(segkp, t_cache_sz, 245 default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON); 246 247 (void) getcid(sys_name, &syscid); 248 curthread->t_cid = syscid; /* current thread is t0 */ 249 250 /* 251 * Set up the first CPU's idle thread. 252 * It runs whenever the CPU has nothing worthwhile to do. 253 */ 254 tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1); 255 cpu->cpu_idle_thread = tp; 256 tp->t_preempt = 1; 257 tp->t_disp_queue = cpu->cpu_disp; 258 ASSERT(tp->t_disp_queue != NULL); 259 tp->t_bound_cpu = cpu; 260 tp->t_affinitycnt = 1; 261 262 /* 263 * Registering a thread in the callback table is usually 264 * done in the initialization code of the thread. In this 265 * case, we do it right after thread creation to avoid 266 * blocking idle thread while registering itself. It also 267 * avoids the possibility of reregistration in case a CPU 268 * restarts its idle thread. 269 */ 270 CALLB_CPR_INIT_SAFE(tp, "idle"); 271 272 /* 273 * Create the thread_reaper daemon. From this point on, exited 274 * threads will get reaped. 275 */ 276 (void) thread_create(NULL, 0, (void (*)())thread_reaper, 277 NULL, 0, &p0, TS_RUN, minclsyspri); 278 279 /* 280 * Finish initializing the kernel memory allocator now that 281 * thread_create() is available. 282 */ 283 kmem_thread_init(); 284 285 if (boothowto & RB_DEBUG) 286 kdi_dvec_thravail(); 287 } 288 289 /* 290 * Create a thread. 291 * 292 * thread_create() blocks for memory if necessary. It never fails. 293 * 294 * If stk is NULL, the thread is created at the base of the stack 295 * and cannot be swapped. 296 */ 297 kthread_t * 298 thread_create( 299 caddr_t stk, 300 size_t stksize, 301 void (*proc)(), 302 void *arg, 303 size_t len, 304 proc_t *pp, 305 int state, 306 pri_t pri) 307 { 308 kthread_t *t; 309 extern struct classfuncs sys_classfuncs; 310 turnstile_t *ts; 311 312 /* 313 * Every thread keeps a turnstile around in case it needs to block. 314 * The only reason the turnstile is not simply part of the thread 315 * structure is that we may have to break the association whenever 316 * more than one thread blocks on a given synchronization object. 317 * From a memory-management standpoint, turnstiles are like the 318 * "attached mblks" that hang off dblks in the streams allocator. 319 */ 320 ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP); 321 322 if (stk == NULL) { 323 /* 324 * alloc both thread and stack in segkp chunk 325 */ 326 327 if (stksize < default_stksize) 328 stksize = default_stksize; 329 330 if (stksize == default_stksize) { 331 stk = (caddr_t)segkp_cache_get(segkp_thread); 332 } else { 333 stksize = roundup(stksize, PAGESIZE); 334 stk = (caddr_t)segkp_get(segkp, stksize, 335 (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED)); 336 } 337 338 ASSERT(stk != NULL); 339 340 /* 341 * The machine-dependent mutex code may require that 342 * thread pointers (since they may be used for mutex owner 343 * fields) have certain alignment requirements. 344 * PTR24_ALIGN is the size of the alignment quanta. 345 * XXX - assumes stack grows toward low addresses. 346 */ 347 if (stksize <= sizeof (kthread_t) + PTR24_ALIGN) 348 cmn_err(CE_PANIC, "thread_create: proposed stack size" 349 " too small to hold thread."); 350 #ifdef STACK_GROWTH_DOWN 351 stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1); 352 stksize &= -PTR24_ALIGN; /* make thread aligned */ 353 t = (kthread_t *)(stk + stksize); 354 bzero(t, sizeof (kthread_t)); 355 #ifdef C2_AUDIT 356 if (audit_active) 357 audit_thread_create(t); 358 #endif 359 t->t_stk = stk + stksize; 360 t->t_stkbase = stk; 361 #else /* stack grows to larger addresses */ 362 stksize -= SA(sizeof (kthread_t)); 363 t = (kthread_t *)(stk); 364 bzero(t, sizeof (kthread_t)); 365 t->t_stk = stk + sizeof (kthread_t); 366 t->t_stkbase = stk + stksize + sizeof (kthread_t); 367 #endif /* STACK_GROWTH_DOWN */ 368 t->t_flag |= T_TALLOCSTK; 369 t->t_swap = stk; 370 } else { 371 t = kmem_cache_alloc(thread_cache, KM_SLEEP); 372 bzero(t, sizeof (kthread_t)); 373 ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0); 374 #ifdef C2_AUDIT 375 if (audit_active) 376 audit_thread_create(t); 377 #endif 378 /* 379 * Initialize t_stk to the kernel stack pointer to use 380 * upon entry to the kernel 381 */ 382 #ifdef STACK_GROWTH_DOWN 383 t->t_stk = stk + stksize; 384 t->t_stkbase = stk; 385 #else 386 t->t_stk = stk; /* 3b2-like */ 387 t->t_stkbase = stk + stksize; 388 #endif /* STACK_GROWTH_DOWN */ 389 } 390 391 /* set default stack flag */ 392 if (stksize == lwp_default_stksize) 393 t->t_flag |= T_DFLTSTK; 394 395 t->t_ts = ts; 396 397 /* 398 * p_cred could be NULL if it thread_create is called before cred_init 399 * is called in main. 400 */ 401 mutex_enter(&pp->p_crlock); 402 if (pp->p_cred) 403 crhold(t->t_cred = pp->p_cred); 404 mutex_exit(&pp->p_crlock); 405 t->t_start = gethrestime_sec(); 406 t->t_startpc = proc; 407 t->t_procp = pp; 408 t->t_clfuncs = &sys_classfuncs.thread; 409 t->t_cid = syscid; 410 t->t_pri = pri; 411 t->t_stime = lbolt; 412 t->t_schedflag = TS_LOAD | TS_DONT_SWAP; 413 t->t_bind_cpu = PBIND_NONE; 414 t->t_bind_pset = PS_NONE; 415 t->t_plockp = &pp->p_lock; 416 t->t_copyops = NULL; 417 t->t_taskq = NULL; 418 t->t_anttime = 0; 419 t->t_hatdepth = 0; 420 421 t->t_dtrace_vtime = 1; /* assure vtimestamp is always non-zero */ 422 423 CPU_STATS_ADDQ(CPU, sys, nthreads, 1); 424 #ifndef NPROBE 425 /* Kernel probe */ 426 tnf_thread_create(t); 427 #endif /* NPROBE */ 428 LOCK_INIT_CLEAR(&t->t_lock); 429 430 /* 431 * Callers who give us a NULL proc must do their own 432 * stack initialization. e.g. lwp_create() 433 */ 434 if (proc != NULL) { 435 t->t_stk = thread_stk_init(t->t_stk); 436 thread_load(t, proc, arg, len); 437 } 438 439 /* 440 * Put a hold on project0. If this thread is actually in a 441 * different project, then t_proj will be changed later in 442 * lwp_create(). All kernel-only threads must be in project 0. 443 */ 444 t->t_proj = project_hold(proj0p); 445 446 lgrp_affinity_init(&t->t_lgrp_affinity); 447 448 mutex_enter(&pidlock); 449 nthread++; 450 t->t_did = next_t_id++; 451 t->t_prev = curthread->t_prev; 452 t->t_next = curthread; 453 454 /* 455 * Add the thread to the list of all threads, and initialize 456 * its t_cpu pointer. We need to block preemption since 457 * cpu_offline walks the thread list looking for threads 458 * with t_cpu pointing to the CPU being offlined. We want 459 * to make sure that the list is consistent and that if t_cpu 460 * is set, the thread is on the list. 461 */ 462 kpreempt_disable(); 463 curthread->t_prev->t_next = t; 464 curthread->t_prev = t; 465 466 /* 467 * Threads should never have a NULL t_cpu pointer so assign it 468 * here. If the thread is being created with state TS_RUN a 469 * better CPU may be chosen when it is placed on the run queue. 470 * 471 * We need to keep kernel preemption disabled when setting all 472 * three fields to keep them in sync. Also, always create in 473 * the default partition since that's where kernel threads go 474 * (if this isn't a kernel thread, t_cpupart will be changed 475 * in lwp_create before setting the thread runnable). 476 */ 477 t->t_cpupart = &cp_default; 478 479 /* 480 * For now, affiliate this thread with the root lgroup. 481 * Since the kernel does not (presently) allocate its memory 482 * in a locality aware fashion, the root is an appropriate home. 483 * If this thread is later associated with an lwp, it will have 484 * it's lgroup re-assigned at that time. 485 */ 486 lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1); 487 488 /* 489 * Inherit the current cpu. If this cpu isn't part of the chosen 490 * lgroup, a new cpu will be chosen by cpu_choose when the thread 491 * is ready to run. 492 */ 493 if (CPU->cpu_part == &cp_default) 494 t->t_cpu = CPU; 495 else 496 t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl, 497 t->t_pri, NULL); 498 499 t->t_disp_queue = t->t_cpu->cpu_disp; 500 kpreempt_enable(); 501 502 /* 503 * Initialize thread state and the dispatcher lock pointer. 504 * Need to hold onto pidlock to block allthreads walkers until 505 * the state is set. 506 */ 507 switch (state) { 508 case TS_RUN: 509 curthread->t_oldspl = splhigh(); /* get dispatcher spl */ 510 THREAD_SET_STATE(t, TS_STOPPED, &transition_lock); 511 CL_SETRUN(t); 512 thread_unlock(t); 513 break; 514 515 case TS_ONPROC: 516 THREAD_ONPROC(t, t->t_cpu); 517 break; 518 519 case TS_FREE: 520 /* 521 * Free state will be used for intr threads. 522 * The interrupt routine must set the thread dispatcher 523 * lock pointer (t_lockp) if starting on a CPU 524 * other than the current one. 525 */ 526 THREAD_FREEINTR(t, CPU); 527 break; 528 529 case TS_STOPPED: 530 THREAD_SET_STATE(t, TS_STOPPED, &stop_lock); 531 break; 532 533 default: /* TS_SLEEP, TS_ZOMB or TS_TRANS */ 534 cmn_err(CE_PANIC, "thread_create: invalid state %d", state); 535 } 536 mutex_exit(&pidlock); 537 return (t); 538 } 539 540 /* 541 * Move thread to project0 and take care of project reference counters. 542 */ 543 void 544 thread_rele(kthread_t *t) 545 { 546 kproject_t *kpj; 547 548 thread_lock(t); 549 550 ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0); 551 kpj = ttoproj(t); 552 t->t_proj = proj0p; 553 554 thread_unlock(t); 555 556 if (kpj != proj0p) { 557 project_rele(kpj); 558 (void) project_hold(proj0p); 559 } 560 } 561 562 void 563 thread_exit(void) 564 { 565 kthread_t *t = curthread; 566 567 if ((t->t_proc_flag & TP_ZTHREAD) != 0) 568 cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called"); 569 570 tsd_exit(); /* Clean up this thread's TSD */ 571 572 kcpc_passivate(); /* clean up performance counter state */ 573 574 /* 575 * No kernel thread should have called poll() without arranging 576 * calling pollcleanup() here. 577 */ 578 ASSERT(t->t_pollstate == NULL); 579 ASSERT(t->t_schedctl == NULL); 580 if (t->t_door) 581 door_slam(); /* in case thread did an upcall */ 582 583 #ifndef NPROBE 584 /* Kernel probe */ 585 if (t->t_tnf_tpdp) 586 tnf_thread_exit(); 587 #endif /* NPROBE */ 588 589 thread_rele(t); 590 t->t_preempt++; 591 592 /* 593 * remove thread from the all threads list so that 594 * death-row can use the same pointers. 595 */ 596 mutex_enter(&pidlock); 597 t->t_next->t_prev = t->t_prev; 598 t->t_prev->t_next = t->t_next; 599 ASSERT(allthreads != t); /* t0 never exits */ 600 cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */ 601 mutex_exit(&pidlock); 602 603 if (t->t_ctx != NULL) 604 exitctx(t); 605 if (t->t_procp->p_pctx != NULL) 606 exitpctx(t->t_procp); 607 608 t->t_state = TS_ZOMB; /* set zombie thread */ 609 610 swtch_from_zombie(); /* give up the CPU */ 611 /* NOTREACHED */ 612 } 613 614 /* 615 * Check to see if the specified thread is active (defined as being on 616 * the thread list). This is certainly a slow way to do this; if there's 617 * ever a reason to speed it up, we could maintain a hash table of active 618 * threads indexed by their t_did. 619 */ 620 static kthread_t * 621 did_to_thread(kt_did_t tid) 622 { 623 kthread_t *t; 624 625 ASSERT(MUTEX_HELD(&pidlock)); 626 for (t = curthread->t_next; t != curthread; t = t->t_next) { 627 if (t->t_did == tid) 628 break; 629 } 630 if (t->t_did == tid) 631 return (t); 632 else 633 return (NULL); 634 } 635 636 /* 637 * Wait for specified thread to exit. Returns immediately if the thread 638 * could not be found, meaning that it has either already exited or never 639 * existed. 640 */ 641 void 642 thread_join(kt_did_t tid) 643 { 644 kthread_t *t; 645 646 ASSERT(tid != curthread->t_did); 647 ASSERT(tid != t0.t_did); 648 649 mutex_enter(&pidlock); 650 /* 651 * Make sure we check that the thread is on the thread list 652 * before blocking on it; otherwise we could end up blocking on 653 * a cv that's already been freed. In other words, don't cache 654 * the thread pointer across calls to cv_wait. 655 * 656 * The choice of loop invariant means that whenever a thread 657 * is taken off the allthreads list, a cv_broadcast must be 658 * performed on that thread's t_joincv to wake up any waiters. 659 * The broadcast doesn't have to happen right away, but it 660 * shouldn't be postponed indefinitely (e.g., by doing it in 661 * thread_free which may only be executed when the deathrow 662 * queue is processed. 663 */ 664 while (t = did_to_thread(tid)) 665 cv_wait(&t->t_joincv, &pidlock); 666 mutex_exit(&pidlock); 667 } 668 669 void 670 thread_free(kthread_t *t) 671 { 672 ASSERT(t != &t0 && t->t_state == TS_FREE); 673 ASSERT(t->t_door == NULL); 674 ASSERT(t->t_schedctl == NULL); 675 ASSERT(t->t_pollstate == NULL); 676 677 t->t_pri = 0; 678 t->t_pc = 0; 679 t->t_sp = 0; 680 t->t_wchan0 = NULL; 681 t->t_wchan = NULL; 682 if (t->t_cred != NULL) { 683 crfree(t->t_cred); 684 t->t_cred = 0; 685 } 686 if (t->t_pdmsg) { 687 kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1); 688 t->t_pdmsg = NULL; 689 } 690 #ifdef C2_AUDIT 691 if (audit_active) 692 audit_thread_free(t); 693 #endif 694 #ifndef NPROBE 695 if (t->t_tnf_tpdp) 696 tnf_thread_free(t); 697 #endif /* NPROBE */ 698 if (t->t_cldata) { 699 CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata); 700 } 701 if (t->t_rprof != NULL) { 702 kmem_free(t->t_rprof, sizeof (*t->t_rprof)); 703 t->t_rprof = NULL; 704 } 705 t->t_lockp = NULL; /* nothing should try to lock this thread now */ 706 if (t->t_lwp) 707 lwp_freeregs(t->t_lwp, 0); 708 if (t->t_ctx) 709 freectx(t, 0); 710 t->t_stk = NULL; 711 if (t->t_lwp) 712 lwp_stk_fini(t->t_lwp); 713 lock_clear(&t->t_lock); 714 715 if (t->t_ts->ts_waiters > 0) 716 panic("thread_free: turnstile still active"); 717 718 kmem_cache_free(turnstile_cache, t->t_ts); 719 720 free_afd(&t->t_activefd); 721 722 /* 723 * Barrier for clock thread. The clock holds this lock to 724 * keep the thread from going away while it's looking at it. 725 */ 726 mutex_enter(&thread_free_lock); 727 mutex_exit(&thread_free_lock); 728 729 ASSERT(ttoproj(t) == proj0p); 730 project_rele(ttoproj(t)); 731 732 lgrp_affinity_free(&t->t_lgrp_affinity); 733 734 /* 735 * Free thread struct and its stack. 736 */ 737 if (t->t_flag & T_TALLOCSTK) { 738 /* thread struct is embedded in stack */ 739 segkp_release(segkp, t->t_swap); 740 mutex_enter(&pidlock); 741 nthread--; 742 mutex_exit(&pidlock); 743 } else { 744 if (t->t_swap) { 745 segkp_release(segkp, t->t_swap); 746 t->t_swap = NULL; 747 } 748 if (t->t_lwp) { 749 kmem_cache_free(lwp_cache, t->t_lwp); 750 t->t_lwp = NULL; 751 } 752 mutex_enter(&pidlock); 753 nthread--; 754 mutex_exit(&pidlock); 755 kmem_cache_free(thread_cache, t); 756 } 757 } 758 759 /* 760 * Removes threads associated with the given zone from a deathrow queue. 761 * tp is a pointer to the head of the deathrow queue, and countp is a 762 * pointer to the current deathrow count. Returns a linked list of 763 * threads removed from the list. 764 */ 765 static kthread_t * 766 thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid) 767 { 768 kthread_t *tmp, *list = NULL; 769 cred_t *cr; 770 771 ASSERT(MUTEX_HELD(&reaplock)); 772 while (*tp != NULL) { 773 if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) { 774 tmp = *tp; 775 *tp = tmp->t_forw; 776 tmp->t_forw = list; 777 list = tmp; 778 (*countp)--; 779 } else { 780 tp = &(*tp)->t_forw; 781 } 782 } 783 return (list); 784 } 785 786 static void 787 thread_reap_list(kthread_t *t) 788 { 789 kthread_t *next; 790 791 while (t != NULL) { 792 next = t->t_forw; 793 thread_free(t); 794 t = next; 795 } 796 } 797 798 /* ARGSUSED */ 799 static void 800 thread_zone_destroy(zoneid_t zoneid, void *unused) 801 { 802 kthread_t *t, *l; 803 804 mutex_enter(&reaplock); 805 /* 806 * Pull threads and lwps associated with zone off deathrow lists. 807 */ 808 t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid); 809 l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid); 810 mutex_exit(&reaplock); 811 812 /* 813 * Reap threads 814 */ 815 thread_reap_list(t); 816 817 /* 818 * Reap lwps 819 */ 820 thread_reap_list(l); 821 } 822 823 /* 824 * cleanup zombie threads that are on deathrow. 825 */ 826 void 827 thread_reaper() 828 { 829 kthread_t *t, *l; 830 callb_cpr_t cprinfo; 831 832 /* 833 * Register callback to clean up threads when zone is destroyed. 834 */ 835 zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy); 836 837 CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper"); 838 for (;;) { 839 mutex_enter(&reaplock); 840 while (thread_deathrow == NULL && lwp_deathrow == NULL) { 841 CALLB_CPR_SAFE_BEGIN(&cprinfo); 842 cv_wait(&reaper_cv, &reaplock); 843 CALLB_CPR_SAFE_END(&cprinfo, &reaplock); 844 } 845 t = thread_deathrow; 846 l = lwp_deathrow; 847 thread_deathrow = NULL; 848 lwp_deathrow = NULL; 849 thread_reapcnt = 0; 850 lwp_reapcnt = 0; 851 mutex_exit(&reaplock); 852 853 /* 854 * Reap threads 855 */ 856 thread_reap_list(t); 857 858 /* 859 * Reap lwps 860 */ 861 thread_reap_list(l); 862 } 863 } 864 865 /* 866 * This is called by resume() to put a zombie thread onto deathrow. 867 * The thread's state is changed to TS_FREE to indicate that is reapable. 868 * This is called from the idle thread so it must not block (just spin). 869 */ 870 void 871 reapq_add(kthread_t *t) 872 { 873 mutex_enter(&reaplock); 874 875 /* 876 * lwp_deathrow contains only threads with lwp linkage 877 * that are of the default stacksize. Anything else goes 878 * on thread_deathrow. 879 */ 880 if (ttolwp(t) && (t->t_flag & T_DFLTSTK)) { 881 t->t_forw = lwp_deathrow; 882 lwp_deathrow = t; 883 lwp_reapcnt++; 884 } else { 885 t->t_forw = thread_deathrow; 886 thread_deathrow = t; 887 thread_reapcnt++; 888 } 889 if (lwp_reapcnt + thread_reapcnt > reaplimit) 890 cv_signal(&reaper_cv); /* wake the reaper */ 891 t->t_state = TS_FREE; 892 lock_clear(&t->t_lock); 893 894 /* 895 * Before we return, we need to grab and drop the thread lock for 896 * the dead thread. At this point, the current thread is the idle 897 * thread, and the dead thread's CPU lock points to the current 898 * CPU -- and we must grab and drop the lock to synchronize with 899 * a racing thread walking a blocking chain that the zombie thread 900 * was recently in. By this point, that blocking chain is (by 901 * definition) stale: the dead thread is not holding any locks, and 902 * is therefore not in any blocking chains -- but if we do not regrab 903 * our lock before freeing the dead thread's data structures, the 904 * thread walking the (stale) blocking chain will die on memory 905 * corruption when it attempts to drop the dead thread's lock. We 906 * only need do this once because there is no way for the dead thread 907 * to ever again be on a blocking chain: once we have grabbed and 908 * dropped the thread lock, we are guaranteed that anyone that could 909 * have seen this thread in a blocking chain can no longer see it. 910 */ 911 thread_lock(t); 912 thread_unlock(t); 913 914 mutex_exit(&reaplock); 915 } 916 917 /* 918 * Install thread context ops for the current thread. 919 */ 920 void 921 installctx( 922 kthread_t *t, 923 void *arg, 924 void (*save)(void *), 925 void (*restore)(void *), 926 void (*fork)(void *, void *), 927 void (*lwp_create)(void *, void *), 928 void (*exit)(void *), 929 void (*free)(void *, int)) 930 { 931 struct ctxop *ctx; 932 933 ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP); 934 ctx->save_op = save; 935 ctx->restore_op = restore; 936 ctx->fork_op = fork; 937 ctx->lwp_create_op = lwp_create; 938 ctx->exit_op = exit; 939 ctx->free_op = free; 940 ctx->arg = arg; 941 ctx->next = t->t_ctx; 942 t->t_ctx = ctx; 943 } 944 945 /* 946 * Remove the thread context ops from a thread. 947 */ 948 int 949 removectx( 950 kthread_t *t, 951 void *arg, 952 void (*save)(void *), 953 void (*restore)(void *), 954 void (*fork)(void *, void *), 955 void (*lwp_create)(void *, void *), 956 void (*exit)(void *), 957 void (*free)(void *, int)) 958 { 959 struct ctxop *ctx, *prev_ctx; 960 961 /* 962 * The incoming kthread_t (which is the thread for which the 963 * context ops will be removed) should be one of the following: 964 * 965 * a) the current thread, 966 * 967 * b) a thread of a process that's being forked (SIDL), 968 * 969 * c) a thread that belongs to the same process as the current 970 * thread and for which the current thread is the agent thread, 971 * 972 * d) a thread that is TS_STOPPED which is indicative of it 973 * being (if curthread is not an agent) a thread being created 974 * as part of an lwp creation. 975 */ 976 ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL || 977 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 978 979 /* 980 * Serialize modifications to t->t_ctx to prevent the agent thread 981 * and the target thread from racing with each other during lwp exit. 982 */ 983 mutex_enter(&t->t_ctx_lock); 984 prev_ctx = NULL; 985 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) { 986 if (ctx->save_op == save && ctx->restore_op == restore && 987 ctx->fork_op == fork && ctx->lwp_create_op == lwp_create && 988 ctx->exit_op == exit && ctx->free_op == free && 989 ctx->arg == arg) { 990 if (prev_ctx) 991 prev_ctx->next = ctx->next; 992 else 993 t->t_ctx = ctx->next; 994 mutex_exit(&t->t_ctx_lock); 995 if (ctx->free_op != NULL) 996 (ctx->free_op)(ctx->arg, 0); 997 kmem_free(ctx, sizeof (struct ctxop)); 998 return (1); 999 } 1000 prev_ctx = ctx; 1001 } 1002 mutex_exit(&t->t_ctx_lock); 1003 1004 return (0); 1005 } 1006 1007 void 1008 savectx(kthread_t *t) 1009 { 1010 struct ctxop *ctx; 1011 1012 ASSERT(t == curthread); 1013 for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next) 1014 if (ctx->save_op != NULL) 1015 (ctx->save_op)(ctx->arg); 1016 } 1017 1018 void 1019 restorectx(kthread_t *t) 1020 { 1021 struct ctxop *ctx; 1022 1023 ASSERT(t == curthread); 1024 for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next) 1025 if (ctx->restore_op != NULL) 1026 (ctx->restore_op)(ctx->arg); 1027 } 1028 1029 void 1030 forkctx(kthread_t *t, kthread_t *ct) 1031 { 1032 struct ctxop *ctx; 1033 1034 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1035 if (ctx->fork_op != NULL) 1036 (ctx->fork_op)(t, ct); 1037 } 1038 1039 /* 1040 * Note that this operator is only invoked via the _lwp_create 1041 * system call. The system may have other reasons to create lwps 1042 * e.g. the agent lwp or the doors unreferenced lwp. 1043 */ 1044 void 1045 lwp_createctx(kthread_t *t, kthread_t *ct) 1046 { 1047 struct ctxop *ctx; 1048 1049 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1050 if (ctx->lwp_create_op != NULL) 1051 (ctx->lwp_create_op)(t, ct); 1052 } 1053 1054 /* 1055 * exitctx is called from thread_exit() and lwp_exit() to perform any actions 1056 * needed when the thread/LWP leaves the processor for the last time. This 1057 * routine is not intended to deal with freeing memory; freectx() is used for 1058 * that purpose during thread_free(). This routine is provided to allow for 1059 * clean-up that can't wait until thread_free(). 1060 */ 1061 void 1062 exitctx(kthread_t *t) 1063 { 1064 struct ctxop *ctx; 1065 1066 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1067 if (ctx->exit_op != NULL) 1068 (ctx->exit_op)(t); 1069 } 1070 1071 /* 1072 * freectx is called from thread_free() and exec() to get 1073 * rid of old thread context ops. 1074 */ 1075 void 1076 freectx(kthread_t *t, int isexec) 1077 { 1078 struct ctxop *ctx; 1079 1080 while ((ctx = t->t_ctx) != NULL) { 1081 t->t_ctx = ctx->next; 1082 if (ctx->free_op != NULL) 1083 (ctx->free_op)(ctx->arg, isexec); 1084 kmem_free(ctx, sizeof (struct ctxop)); 1085 } 1086 } 1087 1088 /* 1089 * Set the thread running; arrange for it to be swapped in if necessary. 1090 */ 1091 void 1092 setrun_locked(kthread_t *t) 1093 { 1094 ASSERT(THREAD_LOCK_HELD(t)); 1095 if (t->t_state == TS_SLEEP) { 1096 /* 1097 * Take off sleep queue. 1098 */ 1099 SOBJ_UNSLEEP(t->t_sobj_ops, t); 1100 } else if (t->t_state & (TS_RUN | TS_ONPROC)) { 1101 /* 1102 * Already on dispatcher queue. 1103 */ 1104 return; 1105 } else if (t->t_state == TS_WAIT) { 1106 waitq_setrun(t); 1107 } else if (t->t_state == TS_STOPPED) { 1108 /* 1109 * All of the sending of SIGCONT (TC_XSTART) and /proc 1110 * (TC_PSTART) and lwp_continue() (TC_CSTART) must have 1111 * requested that the thread be run. 1112 * Just calling setrun() is not sufficient to set a stopped 1113 * thread running. TP_TXSTART is always set if the thread 1114 * is not stopped by a jobcontrol stop signal. 1115 * TP_TPSTART is always set if /proc is not controlling it. 1116 * TP_TCSTART is always set if lwp_suspend() didn't stop it. 1117 * The thread won't be stopped unless one of these 1118 * three mechanisms did it. 1119 * 1120 * These flags must be set before calling setrun_locked(t). 1121 * They can't be passed as arguments because the streams 1122 * code calls setrun() indirectly and the mechanism for 1123 * doing so admits only one argument. Note that the 1124 * thread must be locked in order to change t_schedflags. 1125 */ 1126 if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART) 1127 return; 1128 /* 1129 * Process is no longer stopped (a thread is running). 1130 */ 1131 t->t_whystop = 0; 1132 t->t_whatstop = 0; 1133 /* 1134 * Strictly speaking, we do not have to clear these 1135 * flags here; they are cleared on entry to stop(). 1136 * However, they are confusing when doing kernel 1137 * debugging or when they are revealed by ps(1). 1138 */ 1139 t->t_schedflag &= ~TS_ALLSTART; 1140 THREAD_TRANSITION(t); /* drop stopped-thread lock */ 1141 ASSERT(t->t_lockp == &transition_lock); 1142 ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); 1143 /* 1144 * Let the class put the process on the dispatcher queue. 1145 */ 1146 CL_SETRUN(t); 1147 } 1148 } 1149 1150 void 1151 setrun(kthread_t *t) 1152 { 1153 thread_lock(t); 1154 setrun_locked(t); 1155 thread_unlock(t); 1156 } 1157 1158 /* 1159 * Unpin an interrupted thread. 1160 * When an interrupt occurs, the interrupt is handled on the stack 1161 * of an interrupt thread, taken from a pool linked to the CPU structure. 1162 * 1163 * When swtch() is switching away from an interrupt thread because it 1164 * blocked or was preempted, this routine is called to complete the 1165 * saving of the interrupted thread state, and returns the interrupted 1166 * thread pointer so it may be resumed. 1167 * 1168 * Called by swtch() only at high spl. 1169 */ 1170 kthread_t * 1171 thread_unpin() 1172 { 1173 kthread_t *t = curthread; /* current thread */ 1174 kthread_t *itp; /* interrupted thread */ 1175 int i; /* interrupt level */ 1176 extern int intr_passivate(); 1177 1178 ASSERT(t->t_intr != NULL); 1179 1180 itp = t->t_intr; /* interrupted thread */ 1181 t->t_intr = NULL; /* clear interrupt ptr */ 1182 1183 /* 1184 * Get state from interrupt thread for the one 1185 * it interrupted. 1186 */ 1187 1188 i = intr_passivate(t, itp); 1189 1190 TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE, 1191 "intr_passivate:level %d curthread %p (%T) ithread %p (%T)", 1192 i, t, t, itp, itp); 1193 1194 /* 1195 * Dissociate the current thread from the interrupted thread's LWP. 1196 */ 1197 t->t_lwp = NULL; 1198 1199 /* 1200 * Interrupt handlers above the level that spinlocks block must 1201 * not block. 1202 */ 1203 #if DEBUG 1204 if (i < 0 || i > LOCK_LEVEL) 1205 cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i); 1206 #endif 1207 1208 /* 1209 * Compute the CPU's base interrupt level based on the active 1210 * interrupts. 1211 */ 1212 ASSERT(CPU->cpu_intr_actv & (1 << i)); 1213 set_base_spl(); 1214 1215 return (itp); 1216 } 1217 1218 /* 1219 * Create and initialize an interrupt thread. 1220 * Returns non-zero on error. 1221 * Called at spl7() or better. 1222 */ 1223 void 1224 thread_create_intr(struct cpu *cp) 1225 { 1226 kthread_t *tp; 1227 1228 tp = thread_create(NULL, 0, 1229 (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0); 1230 1231 /* 1232 * Set the thread in the TS_FREE state. The state will change 1233 * to TS_ONPROC only while the interrupt is active. Think of these 1234 * as being on a private free list for the CPU. Being TS_FREE keeps 1235 * inactive interrupt threads out of debugger thread lists. 1236 * 1237 * We cannot call thread_create with TS_FREE because of the current 1238 * checks there for ONPROC. Fix this when thread_create takes flags. 1239 */ 1240 THREAD_FREEINTR(tp, cp); 1241 1242 /* 1243 * Nobody should ever reference the credentials of an interrupt 1244 * thread so make it NULL to catch any such references. 1245 */ 1246 tp->t_cred = NULL; 1247 tp->t_flag |= T_INTR_THREAD; 1248 tp->t_cpu = cp; 1249 tp->t_bound_cpu = cp; 1250 tp->t_disp_queue = cp->cpu_disp; 1251 tp->t_affinitycnt = 1; 1252 tp->t_preempt = 1; 1253 1254 /* 1255 * Don't make a user-requested binding on this thread so that 1256 * the processor can be offlined. 1257 */ 1258 tp->t_bind_cpu = PBIND_NONE; /* no USER-requested binding */ 1259 tp->t_bind_pset = PS_NONE; 1260 1261 #if defined(__i386) || defined(__amd64) 1262 tp->t_stk -= STACK_ALIGN; 1263 *(tp->t_stk) = 0; /* terminate intr thread stack */ 1264 #endif 1265 1266 /* 1267 * Link onto CPU's interrupt pool. 1268 */ 1269 tp->t_link = cp->cpu_intr_thread; 1270 cp->cpu_intr_thread = tp; 1271 } 1272 1273 /* 1274 * TSD -- THREAD SPECIFIC DATA 1275 */ 1276 static kmutex_t tsd_mutex; /* linked list spin lock */ 1277 static uint_t tsd_nkeys; /* size of destructor array */ 1278 /* per-key destructor funcs */ 1279 static void (**tsd_destructor)(void *); 1280 /* list of tsd_thread's */ 1281 static struct tsd_thread *tsd_list; 1282 1283 /* 1284 * Default destructor 1285 * Needed because NULL destructor means that the key is unused 1286 */ 1287 /* ARGSUSED */ 1288 void 1289 tsd_defaultdestructor(void *value) 1290 {} 1291 1292 /* 1293 * Create a key (index into per thread array) 1294 * Locks out tsd_create, tsd_destroy, and tsd_exit 1295 * May allocate memory with lock held 1296 */ 1297 void 1298 tsd_create(uint_t *keyp, void (*destructor)(void *)) 1299 { 1300 int i; 1301 uint_t nkeys; 1302 1303 /* 1304 * if key is allocated, do nothing 1305 */ 1306 mutex_enter(&tsd_mutex); 1307 if (*keyp) { 1308 mutex_exit(&tsd_mutex); 1309 return; 1310 } 1311 /* 1312 * find an unused key 1313 */ 1314 if (destructor == NULL) 1315 destructor = tsd_defaultdestructor; 1316 1317 for (i = 0; i < tsd_nkeys; ++i) 1318 if (tsd_destructor[i] == NULL) 1319 break; 1320 1321 /* 1322 * if no unused keys, increase the size of the destructor array 1323 */ 1324 if (i == tsd_nkeys) { 1325 if ((nkeys = (tsd_nkeys << 1)) == 0) 1326 nkeys = 1; 1327 tsd_destructor = 1328 (void (**)(void *))tsd_realloc((void *)tsd_destructor, 1329 (size_t)(tsd_nkeys * sizeof (void (*)(void *))), 1330 (size_t)(nkeys * sizeof (void (*)(void *)))); 1331 tsd_nkeys = nkeys; 1332 } 1333 1334 /* 1335 * allocate the next available unused key 1336 */ 1337 tsd_destructor[i] = destructor; 1338 *keyp = i + 1; 1339 mutex_exit(&tsd_mutex); 1340 } 1341 1342 /* 1343 * Destroy a key -- this is for unloadable modules 1344 * 1345 * Assumes that the caller is preventing tsd_set and tsd_get 1346 * Locks out tsd_create, tsd_destroy, and tsd_exit 1347 * May free memory with lock held 1348 */ 1349 void 1350 tsd_destroy(uint_t *keyp) 1351 { 1352 uint_t key; 1353 struct tsd_thread *tsd; 1354 1355 /* 1356 * protect the key namespace and our destructor lists 1357 */ 1358 mutex_enter(&tsd_mutex); 1359 key = *keyp; 1360 *keyp = 0; 1361 1362 ASSERT(key <= tsd_nkeys); 1363 1364 /* 1365 * if the key is valid 1366 */ 1367 if (key != 0) { 1368 uint_t k = key - 1; 1369 /* 1370 * for every thread with TSD, call key's destructor 1371 */ 1372 for (tsd = tsd_list; tsd; tsd = tsd->ts_next) { 1373 /* 1374 * no TSD for key in this thread 1375 */ 1376 if (key > tsd->ts_nkeys) 1377 continue; 1378 /* 1379 * call destructor for key 1380 */ 1381 if (tsd->ts_value[k] && tsd_destructor[k]) 1382 (*tsd_destructor[k])(tsd->ts_value[k]); 1383 /* 1384 * reset value for key 1385 */ 1386 tsd->ts_value[k] = NULL; 1387 } 1388 /* 1389 * actually free the key (NULL destructor == unused) 1390 */ 1391 tsd_destructor[k] = NULL; 1392 } 1393 1394 mutex_exit(&tsd_mutex); 1395 } 1396 1397 /* 1398 * Quickly return the per thread value that was stored with the specified key 1399 * Assumes the caller is protecting key from tsd_create and tsd_destroy 1400 */ 1401 void * 1402 tsd_get(uint_t key) 1403 { 1404 return (tsd_agent_get(curthread, key)); 1405 } 1406 1407 /* 1408 * Set a per thread value indexed with the specified key 1409 */ 1410 int 1411 tsd_set(uint_t key, void *value) 1412 { 1413 return (tsd_agent_set(curthread, key, value)); 1414 } 1415 1416 /* 1417 * Like tsd_get(), except that the agent lwp can get the tsd of 1418 * another thread in the same process (the agent thread only runs when the 1419 * process is completely stopped by /proc), or syslwp is creating a new lwp. 1420 */ 1421 void * 1422 tsd_agent_get(kthread_t *t, uint_t key) 1423 { 1424 struct tsd_thread *tsd = t->t_tsd; 1425 1426 ASSERT(t == curthread || 1427 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1428 1429 if (key && tsd != NULL && key <= tsd->ts_nkeys) 1430 return (tsd->ts_value[key - 1]); 1431 return (NULL); 1432 } 1433 1434 /* 1435 * Like tsd_set(), except that the agent lwp can set the tsd of 1436 * another thread in the same process, or syslwp can set the tsd 1437 * of a thread it's in the middle of creating. 1438 * 1439 * Assumes the caller is protecting key from tsd_create and tsd_destroy 1440 * May lock out tsd_destroy (and tsd_create), may allocate memory with 1441 * lock held 1442 */ 1443 int 1444 tsd_agent_set(kthread_t *t, uint_t key, void *value) 1445 { 1446 struct tsd_thread *tsd = t->t_tsd; 1447 1448 ASSERT(t == curthread || 1449 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1450 1451 if (key == 0) 1452 return (EINVAL); 1453 if (tsd == NULL) 1454 tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP); 1455 if (key <= tsd->ts_nkeys) { 1456 tsd->ts_value[key - 1] = value; 1457 return (0); 1458 } 1459 1460 ASSERT(key <= tsd_nkeys); 1461 1462 /* 1463 * lock out tsd_destroy() 1464 */ 1465 mutex_enter(&tsd_mutex); 1466 if (tsd->ts_nkeys == 0) { 1467 /* 1468 * Link onto list of threads with TSD 1469 */ 1470 if ((tsd->ts_next = tsd_list) != NULL) 1471 tsd_list->ts_prev = tsd; 1472 tsd_list = tsd; 1473 } 1474 1475 /* 1476 * Allocate thread local storage and set the value for key 1477 */ 1478 tsd->ts_value = tsd_realloc(tsd->ts_value, 1479 tsd->ts_nkeys * sizeof (void *), 1480 key * sizeof (void *)); 1481 tsd->ts_nkeys = key; 1482 tsd->ts_value[key - 1] = value; 1483 mutex_exit(&tsd_mutex); 1484 1485 return (0); 1486 } 1487 1488 1489 /* 1490 * Return the per thread value that was stored with the specified key 1491 * If necessary, create the key and the value 1492 * Assumes the caller is protecting *keyp from tsd_destroy 1493 */ 1494 void * 1495 tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void)) 1496 { 1497 void *value; 1498 uint_t key = *keyp; 1499 struct tsd_thread *tsd = curthread->t_tsd; 1500 1501 if (tsd == NULL) 1502 tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP); 1503 if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1])) 1504 return (value); 1505 if (key == 0) 1506 tsd_create(keyp, destroy); 1507 (void) tsd_set(*keyp, value = (*allocate)()); 1508 1509 return (value); 1510 } 1511 1512 /* 1513 * Called from thread_exit() to run the destructor function for each tsd 1514 * Locks out tsd_create and tsd_destroy 1515 * Assumes that the destructor *DOES NOT* use tsd 1516 */ 1517 void 1518 tsd_exit(void) 1519 { 1520 int i; 1521 struct tsd_thread *tsd = curthread->t_tsd; 1522 1523 if (tsd == NULL) 1524 return; 1525 1526 if (tsd->ts_nkeys == 0) { 1527 kmem_free(tsd, sizeof (*tsd)); 1528 curthread->t_tsd = NULL; 1529 return; 1530 } 1531 1532 /* 1533 * lock out tsd_create and tsd_destroy, call 1534 * the destructor, and mark the value as destroyed. 1535 */ 1536 mutex_enter(&tsd_mutex); 1537 1538 for (i = 0; i < tsd->ts_nkeys; i++) { 1539 if (tsd->ts_value[i] && tsd_destructor[i]) 1540 (*tsd_destructor[i])(tsd->ts_value[i]); 1541 tsd->ts_value[i] = NULL; 1542 } 1543 1544 /* 1545 * remove from linked list of threads with TSD 1546 */ 1547 if (tsd->ts_next) 1548 tsd->ts_next->ts_prev = tsd->ts_prev; 1549 if (tsd->ts_prev) 1550 tsd->ts_prev->ts_next = tsd->ts_next; 1551 if (tsd_list == tsd) 1552 tsd_list = tsd->ts_next; 1553 1554 mutex_exit(&tsd_mutex); 1555 1556 /* 1557 * free up the TSD 1558 */ 1559 kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *)); 1560 kmem_free(tsd, sizeof (struct tsd_thread)); 1561 curthread->t_tsd = NULL; 1562 } 1563 1564 /* 1565 * realloc 1566 */ 1567 static void * 1568 tsd_realloc(void *old, size_t osize, size_t nsize) 1569 { 1570 void *new; 1571 1572 new = kmem_zalloc(nsize, KM_SLEEP); 1573 if (old) { 1574 bcopy(old, new, osize); 1575 kmem_free(old, osize); 1576 } 1577 return (new); 1578 } 1579 1580 /* 1581 * Check to see if an interrupt thread might be active at a given ipl. 1582 * If so return true. 1583 * We must be conservative--it is ok to give a false yes, but a false no 1584 * will cause disaster. (But if the situation changes after we check it is 1585 * ok--the caller is trying to ensure that an interrupt routine has been 1586 * exited). 1587 * This is used when trying to remove an interrupt handler from an autovector 1588 * list in avintr.c. 1589 */ 1590 int 1591 intr_active(struct cpu *cp, int level) 1592 { 1593 if (level <= LOCK_LEVEL) 1594 return (cp->cpu_thread != cp->cpu_dispthread); 1595 else 1596 return (CPU_ON_INTR(cp)); 1597 } 1598 1599 /* 1600 * Return non-zero if an interrupt is being serviced. 1601 */ 1602 int 1603 servicing_interrupt() 1604 { 1605 int onintr = 0; 1606 1607 /* Are we an interrupt thread */ 1608 if (curthread->t_flag & T_INTR_THREAD) 1609 return (1); 1610 /* Are we servicing a high level interrupt? */ 1611 if (CPU_ON_INTR(CPU)) { 1612 kpreempt_disable(); 1613 onintr = CPU_ON_INTR(CPU); 1614 kpreempt_enable(); 1615 } 1616 return (onintr); 1617 } 1618 1619 1620 /* 1621 * Change the dispatch priority of a thread in the system. 1622 * Used when raising or lowering a thread's priority. 1623 * (E.g., priority inheritance) 1624 * 1625 * Since threads are queued according to their priority, we 1626 * we must check the thread's state to determine whether it 1627 * is on a queue somewhere. If it is, we've got to: 1628 * 1629 * o Dequeue the thread. 1630 * o Change its effective priority. 1631 * o Enqueue the thread. 1632 * 1633 * Assumptions: The thread whose priority we wish to change 1634 * must be locked before we call thread_change_(e)pri(). 1635 * The thread_change(e)pri() function doesn't drop the thread 1636 * lock--that must be done by its caller. 1637 */ 1638 void 1639 thread_change_epri(kthread_t *t, pri_t disp_pri) 1640 { 1641 uint_t state; 1642 1643 ASSERT(THREAD_LOCK_HELD(t)); 1644 1645 /* 1646 * If the inherited priority hasn't actually changed, 1647 * just return. 1648 */ 1649 if (t->t_epri == disp_pri) 1650 return; 1651 1652 state = t->t_state; 1653 1654 /* 1655 * If it's not on a queue, change the priority with 1656 * impunity. 1657 */ 1658 if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) { 1659 t->t_epri = disp_pri; 1660 1661 if (state == TS_ONPROC) { 1662 cpu_t *cp = t->t_disp_queue->disp_cpu; 1663 1664 if (t == cp->cpu_dispthread) 1665 cp->cpu_dispatch_pri = DISP_PRIO(t); 1666 } 1667 return; 1668 } 1669 1670 /* 1671 * It's either on a sleep queue or a run queue. 1672 */ 1673 if (state == TS_SLEEP) { 1674 /* 1675 * Take the thread out of its sleep queue. 1676 * Change the inherited priority. 1677 * Re-enqueue the thread. 1678 * Each synchronization object exports a function 1679 * to do this in an appropriate manner. 1680 */ 1681 SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri); 1682 } else if (state == TS_WAIT) { 1683 /* 1684 * Re-enqueue a thread on the wait queue if its 1685 * effective priority needs to change. 1686 */ 1687 if (disp_pri != t->t_epri) 1688 waitq_change_pri(t, disp_pri); 1689 } else { 1690 /* 1691 * The thread is on a run queue. 1692 * Note: setbackdq() may not put the thread 1693 * back on the same run queue where it originally 1694 * resided. 1695 */ 1696 (void) dispdeq(t); 1697 t->t_epri = disp_pri; 1698 setbackdq(t); 1699 } 1700 } /* end of thread_change_epri */ 1701 1702 /* 1703 * Function: Change the t_pri field of a thread. 1704 * Side Effects: Adjust the thread ordering on a run queue 1705 * or sleep queue, if necessary. 1706 * Returns: 1 if the thread was on a run queue, else 0. 1707 */ 1708 int 1709 thread_change_pri(kthread_t *t, pri_t disp_pri, int front) 1710 { 1711 uint_t state; 1712 int on_rq = 0; 1713 1714 ASSERT(THREAD_LOCK_HELD(t)); 1715 1716 state = t->t_state; 1717 THREAD_WILLCHANGE_PRI(t, disp_pri); 1718 1719 /* 1720 * If it's not on a queue, change the priority with 1721 * impunity. 1722 */ 1723 if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) { 1724 t->t_pri = disp_pri; 1725 1726 if (state == TS_ONPROC) { 1727 cpu_t *cp = t->t_disp_queue->disp_cpu; 1728 1729 if (t == cp->cpu_dispthread) 1730 cp->cpu_dispatch_pri = DISP_PRIO(t); 1731 } 1732 return (0); 1733 } 1734 1735 /* 1736 * It's either on a sleep queue or a run queue. 1737 */ 1738 if (state == TS_SLEEP) { 1739 /* 1740 * If the priority has changed, take the thread out of 1741 * its sleep queue and change the priority. 1742 * Re-enqueue the thread. 1743 * Each synchronization object exports a function 1744 * to do this in an appropriate manner. 1745 */ 1746 if (disp_pri != t->t_pri) 1747 SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri); 1748 } else if (state == TS_WAIT) { 1749 /* 1750 * Re-enqueue a thread on the wait queue if its 1751 * priority needs to change. 1752 */ 1753 if (disp_pri != t->t_pri) 1754 waitq_change_pri(t, disp_pri); 1755 } else { 1756 /* 1757 * The thread is on a run queue. 1758 * Note: setbackdq() may not put the thread 1759 * back on the same run queue where it originally 1760 * resided. 1761 * 1762 * We still requeue the thread even if the priority 1763 * is unchanged to preserve round-robin (and other) 1764 * effects between threads of the same priority. 1765 */ 1766 on_rq = dispdeq(t); 1767 ASSERT(on_rq); 1768 t->t_pri = disp_pri; 1769 if (front) { 1770 setfrontdq(t); 1771 } else { 1772 setbackdq(t); 1773 } 1774 } 1775 return (on_rq); 1776 } 1777