1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/param.h> 30 #include <sys/sysmacros.h> 31 #include <sys/signal.h> 32 #include <sys/stack.h> 33 #include <sys/pcb.h> 34 #include <sys/user.h> 35 #include <sys/systm.h> 36 #include <sys/sysinfo.h> 37 #include <sys/var.h> 38 #include <sys/errno.h> 39 #include <sys/cmn_err.h> 40 #include <sys/cred.h> 41 #include <sys/resource.h> 42 #include <sys/task.h> 43 #include <sys/project.h> 44 #include <sys/proc.h> 45 #include <sys/debug.h> 46 #include <sys/inline.h> 47 #include <sys/disp.h> 48 #include <sys/class.h> 49 #include <vm/seg_kmem.h> 50 #include <vm/seg_kp.h> 51 #include <sys/machlock.h> 52 #include <sys/kmem.h> 53 #include <sys/varargs.h> 54 #include <sys/turnstile.h> 55 #include <sys/poll.h> 56 #include <sys/vtrace.h> 57 #include <sys/callb.h> 58 #include <c2/audit.h> 59 #include <sys/tnf.h> 60 #include <sys/sobject.h> 61 #include <sys/cpupart.h> 62 #include <sys/pset.h> 63 #include <sys/door.h> 64 #include <sys/spl.h> 65 #include <sys/copyops.h> 66 #include <sys/rctl.h> 67 #include <sys/brand.h> 68 #include <sys/pool.h> 69 #include <sys/zone.h> 70 #include <sys/tsol/label.h> 71 #include <sys/tsol/tndb.h> 72 #include <sys/cpc_impl.h> 73 #include <sys/sdt.h> 74 #include <sys/reboot.h> 75 #include <sys/kdi.h> 76 #include <sys/waitq.h> 77 #include <sys/cpucaps.h> 78 79 struct kmem_cache *thread_cache; /* cache of free threads */ 80 struct kmem_cache *lwp_cache; /* cache of free lwps */ 81 struct kmem_cache *turnstile_cache; /* cache of free turnstiles */ 82 83 /* 84 * allthreads is only for use by kmem_readers. All kernel loops can use 85 * the current thread as a start/end point. 86 */ 87 static kthread_t *allthreads = &t0; /* circular list of all threads */ 88 89 static kcondvar_t reaper_cv; /* synchronization var */ 90 kthread_t *thread_deathrow; /* circular list of reapable threads */ 91 kthread_t *lwp_deathrow; /* circular list of reapable threads */ 92 kmutex_t reaplock; /* protects lwp and thread deathrows */ 93 kmutex_t thread_free_lock; /* protects clock from reaper */ 94 int thread_reapcnt = 0; /* number of threads on deathrow */ 95 int lwp_reapcnt = 0; /* number of lwps on deathrow */ 96 int reaplimit = 16; /* delay reaping until reaplimit */ 97 98 extern int nthread; 99 100 id_t syscid; /* system scheduling class ID */ 101 void *segkp_thread; /* cookie for segkp pool */ 102 103 int lwp_cache_sz = 32; 104 int t_cache_sz = 8; 105 static kt_did_t next_t_id = 1; 106 107 /* 108 * Min/Max stack sizes for stack size parameters 109 */ 110 #define MAX_STKSIZE (32 * DEFAULTSTKSZ) 111 #define MIN_STKSIZE DEFAULTSTKSZ 112 113 /* 114 * default_stksize overrides lwp_default_stksize if it is set. 115 */ 116 int default_stksize; 117 int lwp_default_stksize; 118 119 static zone_key_t zone_thread_key; 120 121 /* 122 * forward declarations for internal thread specific data (tsd) 123 */ 124 static void *tsd_realloc(void *, size_t, size_t); 125 126 /*ARGSUSED*/ 127 static int 128 turnstile_constructor(void *buf, void *cdrarg, int kmflags) 129 { 130 bzero(buf, sizeof (turnstile_t)); 131 return (0); 132 } 133 134 /*ARGSUSED*/ 135 static void 136 turnstile_destructor(void *buf, void *cdrarg) 137 { 138 turnstile_t *ts = buf; 139 140 ASSERT(ts->ts_free == NULL); 141 ASSERT(ts->ts_waiters == 0); 142 ASSERT(ts->ts_inheritor == NULL); 143 ASSERT(ts->ts_sleepq[0].sq_first == NULL); 144 ASSERT(ts->ts_sleepq[1].sq_first == NULL); 145 } 146 147 void 148 thread_init(void) 149 { 150 kthread_t *tp; 151 extern char sys_name[]; 152 extern void idle(); 153 struct cpu *cpu = CPU; 154 155 mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL)); 156 157 #if defined(__i386) || defined(__amd64) 158 thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t), 159 PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0); 160 161 /* 162 * "struct _klwp" includes a "struct pcb", which includes a 163 * "struct fpu", which needs to be 16-byte aligned on amd64 164 * (and even on i386 for fxsave/fxrstor). 165 */ 166 lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t), 167 16, NULL, NULL, NULL, NULL, NULL, 0); 168 #else 169 /* 170 * Allocate thread structures from static_arena. This prevents 171 * issues where a thread tries to relocate its own thread 172 * structure and touches it after the mapping has been suspended. 173 */ 174 thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t), 175 PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0); 176 177 lwp_stk_cache_init(); 178 179 lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t), 180 0, NULL, NULL, NULL, NULL, NULL, 0); 181 #endif 182 183 turnstile_cache = kmem_cache_create("turnstile_cache", 184 sizeof (turnstile_t), 0, 185 turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0); 186 187 label_init(); 188 cred_init(); 189 190 /* 191 * Initialize various resource management facilities. 192 */ 193 rctl_init(); 194 cpucaps_init(); 195 /* 196 * Zone_init() should be called before project_init() so that project ID 197 * for the first project is initialized correctly. 198 */ 199 zone_init(); 200 project_init(); 201 brand_init(); 202 task_init(); 203 tcache_init(); 204 pool_init(); 205 206 curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP); 207 208 /* 209 * Originally, we had two parameters to set default stack 210 * size: one for lwp's (lwp_default_stksize), and one for 211 * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz). 212 * Now we have a third parameter that overrides both if it is 213 * set to a legal stack size, called default_stksize. 214 */ 215 216 if (default_stksize == 0) { 217 default_stksize = DEFAULTSTKSZ; 218 } else if (default_stksize % PAGESIZE != 0 || 219 default_stksize > MAX_STKSIZE || 220 default_stksize < MIN_STKSIZE) { 221 cmn_err(CE_WARN, "Illegal stack size. Using %d", 222 (int)DEFAULTSTKSZ); 223 default_stksize = DEFAULTSTKSZ; 224 } else { 225 lwp_default_stksize = default_stksize; 226 } 227 228 if (lwp_default_stksize == 0) { 229 lwp_default_stksize = default_stksize; 230 } else if (lwp_default_stksize % PAGESIZE != 0 || 231 lwp_default_stksize > MAX_STKSIZE || 232 lwp_default_stksize < MIN_STKSIZE) { 233 cmn_err(CE_WARN, "Illegal stack size. Using %d", 234 default_stksize); 235 lwp_default_stksize = default_stksize; 236 } 237 238 segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz, 239 lwp_default_stksize, 240 (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED)); 241 242 segkp_thread = segkp_cache_init(segkp, t_cache_sz, 243 default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON); 244 245 (void) getcid(sys_name, &syscid); 246 curthread->t_cid = syscid; /* current thread is t0 */ 247 248 /* 249 * Set up the first CPU's idle thread. 250 * It runs whenever the CPU has nothing worthwhile to do. 251 */ 252 tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1); 253 cpu->cpu_idle_thread = tp; 254 tp->t_preempt = 1; 255 tp->t_disp_queue = cpu->cpu_disp; 256 ASSERT(tp->t_disp_queue != NULL); 257 tp->t_bound_cpu = cpu; 258 tp->t_affinitycnt = 1; 259 260 /* 261 * Registering a thread in the callback table is usually 262 * done in the initialization code of the thread. In this 263 * case, we do it right after thread creation to avoid 264 * blocking idle thread while registering itself. It also 265 * avoids the possibility of reregistration in case a CPU 266 * restarts its idle thread. 267 */ 268 CALLB_CPR_INIT_SAFE(tp, "idle"); 269 270 /* 271 * Finish initializing the kernel memory allocator now that 272 * thread_create() is available. 273 */ 274 kmem_thread_init(); 275 276 if (boothowto & RB_DEBUG) 277 kdi_dvec_thravail(); 278 } 279 280 /* 281 * Create a thread. 282 * 283 * thread_create() blocks for memory if necessary. It never fails. 284 * 285 * If stk is NULL, the thread is created at the base of the stack 286 * and cannot be swapped. 287 */ 288 kthread_t * 289 thread_create( 290 caddr_t stk, 291 size_t stksize, 292 void (*proc)(), 293 void *arg, 294 size_t len, 295 proc_t *pp, 296 int state, 297 pri_t pri) 298 { 299 kthread_t *t; 300 extern struct classfuncs sys_classfuncs; 301 turnstile_t *ts; 302 303 /* 304 * Every thread keeps a turnstile around in case it needs to block. 305 * The only reason the turnstile is not simply part of the thread 306 * structure is that we may have to break the association whenever 307 * more than one thread blocks on a given synchronization object. 308 * From a memory-management standpoint, turnstiles are like the 309 * "attached mblks" that hang off dblks in the streams allocator. 310 */ 311 ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP); 312 313 if (stk == NULL) { 314 /* 315 * alloc both thread and stack in segkp chunk 316 */ 317 318 if (stksize < default_stksize) 319 stksize = default_stksize; 320 321 if (stksize == default_stksize) { 322 stk = (caddr_t)segkp_cache_get(segkp_thread); 323 } else { 324 stksize = roundup(stksize, PAGESIZE); 325 stk = (caddr_t)segkp_get(segkp, stksize, 326 (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED)); 327 } 328 329 ASSERT(stk != NULL); 330 331 /* 332 * The machine-dependent mutex code may require that 333 * thread pointers (since they may be used for mutex owner 334 * fields) have certain alignment requirements. 335 * PTR24_ALIGN is the size of the alignment quanta. 336 * XXX - assumes stack grows toward low addresses. 337 */ 338 if (stksize <= sizeof (kthread_t) + PTR24_ALIGN) 339 cmn_err(CE_PANIC, "thread_create: proposed stack size" 340 " too small to hold thread."); 341 #ifdef STACK_GROWTH_DOWN 342 stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1); 343 stksize &= -PTR24_ALIGN; /* make thread aligned */ 344 t = (kthread_t *)(stk + stksize); 345 bzero(t, sizeof (kthread_t)); 346 #ifdef C2_AUDIT 347 if (audit_active) 348 audit_thread_create(t); 349 #endif 350 t->t_stk = stk + stksize; 351 t->t_stkbase = stk; 352 #else /* stack grows to larger addresses */ 353 stksize -= SA(sizeof (kthread_t)); 354 t = (kthread_t *)(stk); 355 bzero(t, sizeof (kthread_t)); 356 t->t_stk = stk + sizeof (kthread_t); 357 t->t_stkbase = stk + stksize + sizeof (kthread_t); 358 #endif /* STACK_GROWTH_DOWN */ 359 t->t_flag |= T_TALLOCSTK; 360 t->t_swap = stk; 361 } else { 362 t = kmem_cache_alloc(thread_cache, KM_SLEEP); 363 bzero(t, sizeof (kthread_t)); 364 ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0); 365 #ifdef C2_AUDIT 366 if (audit_active) 367 audit_thread_create(t); 368 #endif 369 /* 370 * Initialize t_stk to the kernel stack pointer to use 371 * upon entry to the kernel 372 */ 373 #ifdef STACK_GROWTH_DOWN 374 t->t_stk = stk + stksize; 375 t->t_stkbase = stk; 376 #else 377 t->t_stk = stk; /* 3b2-like */ 378 t->t_stkbase = stk + stksize; 379 #endif /* STACK_GROWTH_DOWN */ 380 } 381 382 /* set default stack flag */ 383 if (stksize == lwp_default_stksize) 384 t->t_flag |= T_DFLTSTK; 385 386 t->t_ts = ts; 387 388 /* 389 * p_cred could be NULL if it thread_create is called before cred_init 390 * is called in main. 391 */ 392 mutex_enter(&pp->p_crlock); 393 if (pp->p_cred) 394 crhold(t->t_cred = pp->p_cred); 395 mutex_exit(&pp->p_crlock); 396 t->t_start = gethrestime_sec(); 397 t->t_startpc = proc; 398 t->t_procp = pp; 399 t->t_clfuncs = &sys_classfuncs.thread; 400 t->t_cid = syscid; 401 t->t_pri = pri; 402 t->t_stime = lbolt; 403 t->t_schedflag = TS_LOAD | TS_DONT_SWAP; 404 t->t_bind_cpu = PBIND_NONE; 405 t->t_bind_pset = PS_NONE; 406 t->t_plockp = &pp->p_lock; 407 t->t_copyops = NULL; 408 t->t_taskq = NULL; 409 t->t_anttime = 0; 410 t->t_hatdepth = 0; 411 412 t->t_dtrace_vtime = 1; /* assure vtimestamp is always non-zero */ 413 414 CPU_STATS_ADDQ(CPU, sys, nthreads, 1); 415 #ifndef NPROBE 416 /* Kernel probe */ 417 tnf_thread_create(t); 418 #endif /* NPROBE */ 419 LOCK_INIT_CLEAR(&t->t_lock); 420 421 /* 422 * Callers who give us a NULL proc must do their own 423 * stack initialization. e.g. lwp_create() 424 */ 425 if (proc != NULL) { 426 t->t_stk = thread_stk_init(t->t_stk); 427 thread_load(t, proc, arg, len); 428 } 429 430 /* 431 * Put a hold on project0. If this thread is actually in a 432 * different project, then t_proj will be changed later in 433 * lwp_create(). All kernel-only threads must be in project 0. 434 */ 435 t->t_proj = project_hold(proj0p); 436 437 lgrp_affinity_init(&t->t_lgrp_affinity); 438 439 mutex_enter(&pidlock); 440 nthread++; 441 t->t_did = next_t_id++; 442 t->t_prev = curthread->t_prev; 443 t->t_next = curthread; 444 445 /* 446 * Add the thread to the list of all threads, and initialize 447 * its t_cpu pointer. We need to block preemption since 448 * cpu_offline walks the thread list looking for threads 449 * with t_cpu pointing to the CPU being offlined. We want 450 * to make sure that the list is consistent and that if t_cpu 451 * is set, the thread is on the list. 452 */ 453 kpreempt_disable(); 454 curthread->t_prev->t_next = t; 455 curthread->t_prev = t; 456 457 /* 458 * Threads should never have a NULL t_cpu pointer so assign it 459 * here. If the thread is being created with state TS_RUN a 460 * better CPU may be chosen when it is placed on the run queue. 461 * 462 * We need to keep kernel preemption disabled when setting all 463 * three fields to keep them in sync. Also, always create in 464 * the default partition since that's where kernel threads go 465 * (if this isn't a kernel thread, t_cpupart will be changed 466 * in lwp_create before setting the thread runnable). 467 */ 468 t->t_cpupart = &cp_default; 469 470 /* 471 * For now, affiliate this thread with the root lgroup. 472 * Since the kernel does not (presently) allocate its memory 473 * in a locality aware fashion, the root is an appropriate home. 474 * If this thread is later associated with an lwp, it will have 475 * it's lgroup re-assigned at that time. 476 */ 477 lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1); 478 479 /* 480 * Inherit the current cpu. If this cpu isn't part of the chosen 481 * lgroup, a new cpu will be chosen by cpu_choose when the thread 482 * is ready to run. 483 */ 484 if (CPU->cpu_part == &cp_default) 485 t->t_cpu = CPU; 486 else 487 t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl, 488 t->t_pri, NULL); 489 490 t->t_disp_queue = t->t_cpu->cpu_disp; 491 kpreempt_enable(); 492 493 /* 494 * Initialize thread state and the dispatcher lock pointer. 495 * Need to hold onto pidlock to block allthreads walkers until 496 * the state is set. 497 */ 498 switch (state) { 499 case TS_RUN: 500 curthread->t_oldspl = splhigh(); /* get dispatcher spl */ 501 THREAD_SET_STATE(t, TS_STOPPED, &transition_lock); 502 CL_SETRUN(t); 503 thread_unlock(t); 504 break; 505 506 case TS_ONPROC: 507 THREAD_ONPROC(t, t->t_cpu); 508 break; 509 510 case TS_FREE: 511 /* 512 * Free state will be used for intr threads. 513 * The interrupt routine must set the thread dispatcher 514 * lock pointer (t_lockp) if starting on a CPU 515 * other than the current one. 516 */ 517 THREAD_FREEINTR(t, CPU); 518 break; 519 520 case TS_STOPPED: 521 THREAD_SET_STATE(t, TS_STOPPED, &stop_lock); 522 break; 523 524 default: /* TS_SLEEP, TS_ZOMB or TS_TRANS */ 525 cmn_err(CE_PANIC, "thread_create: invalid state %d", state); 526 } 527 mutex_exit(&pidlock); 528 return (t); 529 } 530 531 /* 532 * Move thread to project0 and take care of project reference counters. 533 */ 534 void 535 thread_rele(kthread_t *t) 536 { 537 kproject_t *kpj; 538 539 thread_lock(t); 540 541 ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0); 542 kpj = ttoproj(t); 543 t->t_proj = proj0p; 544 545 thread_unlock(t); 546 547 if (kpj != proj0p) { 548 project_rele(kpj); 549 (void) project_hold(proj0p); 550 } 551 } 552 553 554 void (*ip_cleanup_func)(void); 555 556 void 557 thread_exit() 558 { 559 kthread_t *t = curthread; 560 561 if ((t->t_proc_flag & TP_ZTHREAD) != 0) 562 cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called"); 563 564 if (ip_cleanup_func != NULL) 565 (*ip_cleanup_func)(); 566 567 tsd_exit(); /* Clean up this thread's TSD */ 568 569 kcpc_passivate(); /* clean up performance counter state */ 570 571 /* 572 * No kernel thread should have called poll() without arranging 573 * calling pollcleanup() here. 574 */ 575 ASSERT(t->t_pollstate == NULL); 576 ASSERT(t->t_schedctl == NULL); 577 if (t->t_door) 578 door_slam(); /* in case thread did an upcall */ 579 580 #ifndef NPROBE 581 /* Kernel probe */ 582 if (t->t_tnf_tpdp) 583 tnf_thread_exit(); 584 #endif /* NPROBE */ 585 586 thread_rele(t); 587 t->t_preempt++; 588 589 /* 590 * remove thread from the all threads list so that 591 * death-row can use the same pointers. 592 */ 593 mutex_enter(&pidlock); 594 t->t_next->t_prev = t->t_prev; 595 t->t_prev->t_next = t->t_next; 596 ASSERT(allthreads != t); /* t0 never exits */ 597 cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */ 598 mutex_exit(&pidlock); 599 600 if (t->t_ctx != NULL) 601 exitctx(t); 602 if (t->t_procp->p_pctx != NULL) 603 exitpctx(t->t_procp); 604 605 t->t_state = TS_ZOMB; /* set zombie thread */ 606 607 swtch_from_zombie(); /* give up the CPU */ 608 /* NOTREACHED */ 609 } 610 611 /* 612 * Check to see if the specified thread is active (defined as being on 613 * the thread list). This is certainly a slow way to do this; if there's 614 * ever a reason to speed it up, we could maintain a hash table of active 615 * threads indexed by their t_did. 616 */ 617 static kthread_t * 618 did_to_thread(kt_did_t tid) 619 { 620 kthread_t *t; 621 622 ASSERT(MUTEX_HELD(&pidlock)); 623 for (t = curthread->t_next; t != curthread; t = t->t_next) { 624 if (t->t_did == tid) 625 break; 626 } 627 if (t->t_did == tid) 628 return (t); 629 else 630 return (NULL); 631 } 632 633 /* 634 * Wait for specified thread to exit. Returns immediately if the thread 635 * could not be found, meaning that it has either already exited or never 636 * existed. 637 */ 638 void 639 thread_join(kt_did_t tid) 640 { 641 kthread_t *t; 642 643 ASSERT(tid != curthread->t_did); 644 ASSERT(tid != t0.t_did); 645 646 mutex_enter(&pidlock); 647 /* 648 * Make sure we check that the thread is on the thread list 649 * before blocking on it; otherwise we could end up blocking on 650 * a cv that's already been freed. In other words, don't cache 651 * the thread pointer across calls to cv_wait. 652 * 653 * The choice of loop invariant means that whenever a thread 654 * is taken off the allthreads list, a cv_broadcast must be 655 * performed on that thread's t_joincv to wake up any waiters. 656 * The broadcast doesn't have to happen right away, but it 657 * shouldn't be postponed indefinitely (e.g., by doing it in 658 * thread_free which may only be executed when the deathrow 659 * queue is processed. 660 */ 661 while (t = did_to_thread(tid)) 662 cv_wait(&t->t_joincv, &pidlock); 663 mutex_exit(&pidlock); 664 } 665 666 void 667 thread_free(kthread_t *t) 668 { 669 ASSERT(t != &t0 && t->t_state == TS_FREE); 670 ASSERT(t->t_door == NULL); 671 ASSERT(t->t_schedctl == NULL); 672 ASSERT(t->t_pollstate == NULL); 673 674 t->t_pri = 0; 675 t->t_pc = 0; 676 t->t_sp = 0; 677 t->t_wchan0 = NULL; 678 t->t_wchan = NULL; 679 if (t->t_cred != NULL) { 680 crfree(t->t_cred); 681 t->t_cred = 0; 682 } 683 if (t->t_pdmsg) { 684 kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1); 685 t->t_pdmsg = NULL; 686 } 687 #ifdef C2_AUDIT 688 if (audit_active) 689 audit_thread_free(t); 690 #endif 691 #ifndef NPROBE 692 if (t->t_tnf_tpdp) 693 tnf_thread_free(t); 694 #endif /* NPROBE */ 695 if (t->t_cldata) { 696 CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata); 697 } 698 if (t->t_rprof != NULL) { 699 kmem_free(t->t_rprof, sizeof (*t->t_rprof)); 700 t->t_rprof = NULL; 701 } 702 t->t_lockp = NULL; /* nothing should try to lock this thread now */ 703 if (t->t_lwp) 704 lwp_freeregs(t->t_lwp, 0); 705 if (t->t_ctx) 706 freectx(t, 0); 707 if (t->t_procp->p_pctx) 708 freepctx(t->t_procp, 0); 709 t->t_stk = NULL; 710 if (t->t_lwp) 711 lwp_stk_fini(t->t_lwp); 712 lock_clear(&t->t_lock); 713 714 if (t->t_ts->ts_waiters > 0) 715 panic("thread_free: turnstile still active"); 716 717 kmem_cache_free(turnstile_cache, t->t_ts); 718 719 free_afd(&t->t_activefd); 720 721 /* 722 * Barrier for clock thread. The clock holds this lock to 723 * keep the thread from going away while it's looking at it. 724 */ 725 mutex_enter(&thread_free_lock); 726 mutex_exit(&thread_free_lock); 727 728 ASSERT(ttoproj(t) == proj0p); 729 project_rele(ttoproj(t)); 730 731 lgrp_affinity_free(&t->t_lgrp_affinity); 732 733 /* 734 * Free thread struct and its stack. 735 */ 736 if (t->t_flag & T_TALLOCSTK) { 737 /* thread struct is embedded in stack */ 738 segkp_release(segkp, t->t_swap); 739 mutex_enter(&pidlock); 740 nthread--; 741 mutex_exit(&pidlock); 742 } else { 743 if (t->t_swap) { 744 segkp_release(segkp, t->t_swap); 745 t->t_swap = NULL; 746 } 747 if (t->t_lwp) { 748 kmem_cache_free(lwp_cache, t->t_lwp); 749 t->t_lwp = NULL; 750 } 751 mutex_enter(&pidlock); 752 nthread--; 753 mutex_exit(&pidlock); 754 kmem_cache_free(thread_cache, t); 755 } 756 } 757 758 /* 759 * Removes threads associated with the given zone from a deathrow queue. 760 * tp is a pointer to the head of the deathrow queue, and countp is a 761 * pointer to the current deathrow count. Returns a linked list of 762 * threads removed from the list. 763 */ 764 static kthread_t * 765 thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid) 766 { 767 kthread_t *tmp, *list = NULL; 768 cred_t *cr; 769 770 ASSERT(MUTEX_HELD(&reaplock)); 771 while (*tp != NULL) { 772 if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) { 773 tmp = *tp; 774 *tp = tmp->t_forw; 775 tmp->t_forw = list; 776 list = tmp; 777 (*countp)--; 778 } else { 779 tp = &(*tp)->t_forw; 780 } 781 } 782 return (list); 783 } 784 785 static void 786 thread_reap_list(kthread_t *t) 787 { 788 kthread_t *next; 789 790 while (t != NULL) { 791 next = t->t_forw; 792 thread_free(t); 793 t = next; 794 } 795 } 796 797 /* ARGSUSED */ 798 static void 799 thread_zone_destroy(zoneid_t zoneid, void *unused) 800 { 801 kthread_t *t, *l; 802 803 mutex_enter(&reaplock); 804 /* 805 * Pull threads and lwps associated with zone off deathrow lists. 806 */ 807 t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid); 808 l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid); 809 mutex_exit(&reaplock); 810 811 /* 812 * Reap threads 813 */ 814 thread_reap_list(t); 815 816 /* 817 * Reap lwps 818 */ 819 thread_reap_list(l); 820 } 821 822 /* 823 * cleanup zombie threads that are on deathrow. 824 */ 825 void 826 thread_reaper() 827 { 828 kthread_t *t, *l; 829 callb_cpr_t cprinfo; 830 831 /* 832 * Register callback to clean up threads when zone is destroyed. 833 */ 834 zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy); 835 836 CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper"); 837 for (;;) { 838 mutex_enter(&reaplock); 839 while (thread_deathrow == NULL && lwp_deathrow == NULL) { 840 CALLB_CPR_SAFE_BEGIN(&cprinfo); 841 cv_wait(&reaper_cv, &reaplock); 842 CALLB_CPR_SAFE_END(&cprinfo, &reaplock); 843 } 844 t = thread_deathrow; 845 l = lwp_deathrow; 846 thread_deathrow = NULL; 847 lwp_deathrow = NULL; 848 thread_reapcnt = 0; 849 lwp_reapcnt = 0; 850 mutex_exit(&reaplock); 851 852 /* 853 * Reap threads 854 */ 855 thread_reap_list(t); 856 857 /* 858 * Reap lwps 859 */ 860 thread_reap_list(l); 861 } 862 } 863 864 /* 865 * This is called by resume() to put a zombie thread onto deathrow. 866 * The thread's state is changed to TS_FREE to indicate that is reapable. 867 * This is called from the idle thread so it must not block (just spin). 868 */ 869 void 870 reapq_add(kthread_t *t) 871 { 872 mutex_enter(&reaplock); 873 874 /* 875 * lwp_deathrow contains only threads with lwp linkage 876 * that are of the default stacksize. Anything else goes 877 * on thread_deathrow. 878 */ 879 if (ttolwp(t) && (t->t_flag & T_DFLTSTK)) { 880 t->t_forw = lwp_deathrow; 881 lwp_deathrow = t; 882 lwp_reapcnt++; 883 } else { 884 t->t_forw = thread_deathrow; 885 thread_deathrow = t; 886 thread_reapcnt++; 887 } 888 if (lwp_reapcnt + thread_reapcnt > reaplimit) 889 cv_signal(&reaper_cv); /* wake the reaper */ 890 t->t_state = TS_FREE; 891 lock_clear(&t->t_lock); 892 mutex_exit(&reaplock); 893 } 894 895 /* 896 * Install thread context ops for the current thread. 897 */ 898 void 899 installctx( 900 kthread_t *t, 901 void *arg, 902 void (*save)(void *), 903 void (*restore)(void *), 904 void (*fork)(void *, void *), 905 void (*lwp_create)(void *, void *), 906 void (*exit)(void *), 907 void (*free)(void *, int)) 908 { 909 struct ctxop *ctx; 910 911 ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP); 912 ctx->save_op = save; 913 ctx->restore_op = restore; 914 ctx->fork_op = fork; 915 ctx->lwp_create_op = lwp_create; 916 ctx->exit_op = exit; 917 ctx->free_op = free; 918 ctx->arg = arg; 919 ctx->next = t->t_ctx; 920 t->t_ctx = ctx; 921 } 922 923 /* 924 * Remove the thread context ops from a thread. 925 */ 926 int 927 removectx( 928 kthread_t *t, 929 void *arg, 930 void (*save)(void *), 931 void (*restore)(void *), 932 void (*fork)(void *, void *), 933 void (*lwp_create)(void *, void *), 934 void (*exit)(void *), 935 void (*free)(void *, int)) 936 { 937 struct ctxop *ctx, *prev_ctx; 938 939 /* 940 * The incoming kthread_t (which is the thread for which the 941 * context ops will be removed) should be one of the following: 942 * 943 * a) the current thread, 944 * 945 * b) a thread of a process that's being forked (SIDL), 946 * 947 * c) a thread that belongs to the same process as the current 948 * thread and for which the current thread is the agent thread, 949 * 950 * d) a thread that is TS_STOPPED which is indicative of it 951 * being (if curthread is not an agent) a thread being created 952 * as part of an lwp creation. 953 */ 954 ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL || 955 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 956 957 /* 958 * Serialize modifications to t->t_ctx to prevent the agent thread 959 * and the target thread from racing with each other during lwp exit. 960 */ 961 mutex_enter(&t->t_ctx_lock); 962 prev_ctx = NULL; 963 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) { 964 if (ctx->save_op == save && ctx->restore_op == restore && 965 ctx->fork_op == fork && ctx->lwp_create_op == lwp_create && 966 ctx->exit_op == exit && ctx->free_op == free && 967 ctx->arg == arg) { 968 if (prev_ctx) 969 prev_ctx->next = ctx->next; 970 else 971 t->t_ctx = ctx->next; 972 mutex_exit(&t->t_ctx_lock); 973 if (ctx->free_op != NULL) 974 (ctx->free_op)(ctx->arg, 0); 975 kmem_free(ctx, sizeof (struct ctxop)); 976 return (1); 977 } 978 prev_ctx = ctx; 979 } 980 mutex_exit(&t->t_ctx_lock); 981 982 return (0); 983 } 984 985 void 986 savectx(kthread_t *t) 987 { 988 struct ctxop *ctx; 989 990 ASSERT(t == curthread); 991 for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next) 992 if (ctx->save_op != NULL) 993 (ctx->save_op)(ctx->arg); 994 } 995 996 void 997 restorectx(kthread_t *t) 998 { 999 struct ctxop *ctx; 1000 1001 ASSERT(t == curthread); 1002 for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next) 1003 if (ctx->restore_op != NULL) 1004 (ctx->restore_op)(ctx->arg); 1005 } 1006 1007 void 1008 forkctx(kthread_t *t, kthread_t *ct) 1009 { 1010 struct ctxop *ctx; 1011 1012 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1013 if (ctx->fork_op != NULL) 1014 (ctx->fork_op)(t, ct); 1015 } 1016 1017 /* 1018 * Note that this operator is only invoked via the _lwp_create 1019 * system call. The system may have other reasons to create lwps 1020 * e.g. the agent lwp or the doors unreferenced lwp. 1021 */ 1022 void 1023 lwp_createctx(kthread_t *t, kthread_t *ct) 1024 { 1025 struct ctxop *ctx; 1026 1027 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1028 if (ctx->lwp_create_op != NULL) 1029 (ctx->lwp_create_op)(t, ct); 1030 } 1031 1032 /* 1033 * exitctx is called from thread_exit() and lwp_exit() to perform any actions 1034 * needed when the thread/LWP leaves the processor for the last time. This 1035 * routine is not intended to deal with freeing memory; freectx() is used for 1036 * that purpose during thread_free(). This routine is provided to allow for 1037 * clean-up that can't wait until thread_free(). 1038 */ 1039 void 1040 exitctx(kthread_t *t) 1041 { 1042 struct ctxop *ctx; 1043 1044 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1045 if (ctx->exit_op != NULL) 1046 (ctx->exit_op)(t); 1047 } 1048 1049 /* 1050 * freectx is called from thread_free() and exec() to get 1051 * rid of old thread context ops. 1052 */ 1053 void 1054 freectx(kthread_t *t, int isexec) 1055 { 1056 struct ctxop *ctx; 1057 1058 while ((ctx = t->t_ctx) != NULL) { 1059 t->t_ctx = ctx->next; 1060 if (ctx->free_op != NULL) 1061 (ctx->free_op)(ctx->arg, isexec); 1062 kmem_free(ctx, sizeof (struct ctxop)); 1063 } 1064 } 1065 1066 /* 1067 * Set the thread running; arrange for it to be swapped in if necessary. 1068 */ 1069 void 1070 setrun_locked(kthread_t *t) 1071 { 1072 ASSERT(THREAD_LOCK_HELD(t)); 1073 if (t->t_state == TS_SLEEP) { 1074 /* 1075 * Take off sleep queue. 1076 */ 1077 SOBJ_UNSLEEP(t->t_sobj_ops, t); 1078 } else if (t->t_state & (TS_RUN | TS_ONPROC)) { 1079 /* 1080 * Already on dispatcher queue. 1081 */ 1082 return; 1083 } else if (t->t_state == TS_WAIT) { 1084 waitq_setrun(t); 1085 } else if (t->t_state == TS_STOPPED) { 1086 /* 1087 * All of the sending of SIGCONT (TC_XSTART) and /proc 1088 * (TC_PSTART) and lwp_continue() (TC_CSTART) must have 1089 * requested that the thread be run. 1090 * Just calling setrun() is not sufficient to set a stopped 1091 * thread running. TP_TXSTART is always set if the thread 1092 * is not stopped by a jobcontrol stop signal. 1093 * TP_TPSTART is always set if /proc is not controlling it. 1094 * TP_TCSTART is always set if lwp_suspend() didn't stop it. 1095 * The thread won't be stopped unless one of these 1096 * three mechanisms did it. 1097 * 1098 * These flags must be set before calling setrun_locked(t). 1099 * They can't be passed as arguments because the streams 1100 * code calls setrun() indirectly and the mechanism for 1101 * doing so admits only one argument. Note that the 1102 * thread must be locked in order to change t_schedflags. 1103 */ 1104 if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART) 1105 return; 1106 /* 1107 * Process is no longer stopped (a thread is running). 1108 */ 1109 t->t_whystop = 0; 1110 t->t_whatstop = 0; 1111 /* 1112 * Strictly speaking, we do not have to clear these 1113 * flags here; they are cleared on entry to stop(). 1114 * However, they are confusing when doing kernel 1115 * debugging or when they are revealed by ps(1). 1116 */ 1117 t->t_schedflag &= ~TS_ALLSTART; 1118 THREAD_TRANSITION(t); /* drop stopped-thread lock */ 1119 ASSERT(t->t_lockp == &transition_lock); 1120 ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); 1121 /* 1122 * Let the class put the process on the dispatcher queue. 1123 */ 1124 CL_SETRUN(t); 1125 } 1126 } 1127 1128 void 1129 setrun(kthread_t *t) 1130 { 1131 thread_lock(t); 1132 setrun_locked(t); 1133 thread_unlock(t); 1134 } 1135 1136 /* 1137 * Unpin an interrupted thread. 1138 * When an interrupt occurs, the interrupt is handled on the stack 1139 * of an interrupt thread, taken from a pool linked to the CPU structure. 1140 * 1141 * When swtch() is switching away from an interrupt thread because it 1142 * blocked or was preempted, this routine is called to complete the 1143 * saving of the interrupted thread state, and returns the interrupted 1144 * thread pointer so it may be resumed. 1145 * 1146 * Called by swtch() only at high spl. 1147 */ 1148 kthread_t * 1149 thread_unpin() 1150 { 1151 kthread_t *t = curthread; /* current thread */ 1152 kthread_t *itp; /* interrupted thread */ 1153 int i; /* interrupt level */ 1154 extern int intr_passivate(); 1155 1156 ASSERT(t->t_intr != NULL); 1157 1158 itp = t->t_intr; /* interrupted thread */ 1159 t->t_intr = NULL; /* clear interrupt ptr */ 1160 1161 /* 1162 * Get state from interrupt thread for the one 1163 * it interrupted. 1164 */ 1165 1166 i = intr_passivate(t, itp); 1167 1168 TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE, 1169 "intr_passivate:level %d curthread %p (%T) ithread %p (%T)", 1170 i, t, t, itp, itp); 1171 1172 /* 1173 * Dissociate the current thread from the interrupted thread's LWP. 1174 */ 1175 t->t_lwp = NULL; 1176 1177 /* 1178 * Interrupt handlers above the level that spinlocks block must 1179 * not block. 1180 */ 1181 #if DEBUG 1182 if (i < 0 || i > LOCK_LEVEL) 1183 cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i); 1184 #endif 1185 1186 /* 1187 * Compute the CPU's base interrupt level based on the active 1188 * interrupts. 1189 */ 1190 ASSERT(CPU->cpu_intr_actv & (1 << i)); 1191 set_base_spl(); 1192 1193 return (itp); 1194 } 1195 1196 /* 1197 * Create and initialize an interrupt thread. 1198 * Returns non-zero on error. 1199 * Called at spl7() or better. 1200 */ 1201 void 1202 thread_create_intr(struct cpu *cp) 1203 { 1204 kthread_t *tp; 1205 1206 tp = thread_create(NULL, 0, 1207 (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0); 1208 1209 /* 1210 * Set the thread in the TS_FREE state. The state will change 1211 * to TS_ONPROC only while the interrupt is active. Think of these 1212 * as being on a private free list for the CPU. Being TS_FREE keeps 1213 * inactive interrupt threads out of debugger thread lists. 1214 * 1215 * We cannot call thread_create with TS_FREE because of the current 1216 * checks there for ONPROC. Fix this when thread_create takes flags. 1217 */ 1218 THREAD_FREEINTR(tp, cp); 1219 1220 /* 1221 * Nobody should ever reference the credentials of an interrupt 1222 * thread so make it NULL to catch any such references. 1223 */ 1224 tp->t_cred = NULL; 1225 tp->t_flag |= T_INTR_THREAD; 1226 tp->t_cpu = cp; 1227 tp->t_bound_cpu = cp; 1228 tp->t_disp_queue = cp->cpu_disp; 1229 tp->t_affinitycnt = 1; 1230 tp->t_preempt = 1; 1231 1232 /* 1233 * Don't make a user-requested binding on this thread so that 1234 * the processor can be offlined. 1235 */ 1236 tp->t_bind_cpu = PBIND_NONE; /* no USER-requested binding */ 1237 tp->t_bind_pset = PS_NONE; 1238 1239 #if defined(__i386) || defined(__amd64) 1240 tp->t_stk -= STACK_ALIGN; 1241 *(tp->t_stk) = 0; /* terminate intr thread stack */ 1242 #endif 1243 1244 /* 1245 * Link onto CPU's interrupt pool. 1246 */ 1247 tp->t_link = cp->cpu_intr_thread; 1248 cp->cpu_intr_thread = tp; 1249 } 1250 1251 /* 1252 * TSD -- THREAD SPECIFIC DATA 1253 */ 1254 static kmutex_t tsd_mutex; /* linked list spin lock */ 1255 static uint_t tsd_nkeys; /* size of destructor array */ 1256 /* per-key destructor funcs */ 1257 static void (**tsd_destructor)(void *); 1258 /* list of tsd_thread's */ 1259 static struct tsd_thread *tsd_list; 1260 1261 /* 1262 * Default destructor 1263 * Needed because NULL destructor means that the key is unused 1264 */ 1265 /* ARGSUSED */ 1266 void 1267 tsd_defaultdestructor(void *value) 1268 {} 1269 1270 /* 1271 * Create a key (index into per thread array) 1272 * Locks out tsd_create, tsd_destroy, and tsd_exit 1273 * May allocate memory with lock held 1274 */ 1275 void 1276 tsd_create(uint_t *keyp, void (*destructor)(void *)) 1277 { 1278 int i; 1279 uint_t nkeys; 1280 1281 /* 1282 * if key is allocated, do nothing 1283 */ 1284 mutex_enter(&tsd_mutex); 1285 if (*keyp) { 1286 mutex_exit(&tsd_mutex); 1287 return; 1288 } 1289 /* 1290 * find an unused key 1291 */ 1292 if (destructor == NULL) 1293 destructor = tsd_defaultdestructor; 1294 1295 for (i = 0; i < tsd_nkeys; ++i) 1296 if (tsd_destructor[i] == NULL) 1297 break; 1298 1299 /* 1300 * if no unused keys, increase the size of the destructor array 1301 */ 1302 if (i == tsd_nkeys) { 1303 if ((nkeys = (tsd_nkeys << 1)) == 0) 1304 nkeys = 1; 1305 tsd_destructor = 1306 (void (**)(void *))tsd_realloc((void *)tsd_destructor, 1307 (size_t)(tsd_nkeys * sizeof (void (*)(void *))), 1308 (size_t)(nkeys * sizeof (void (*)(void *)))); 1309 tsd_nkeys = nkeys; 1310 } 1311 1312 /* 1313 * allocate the next available unused key 1314 */ 1315 tsd_destructor[i] = destructor; 1316 *keyp = i + 1; 1317 mutex_exit(&tsd_mutex); 1318 } 1319 1320 /* 1321 * Destroy a key -- this is for unloadable modules 1322 * 1323 * Assumes that the caller is preventing tsd_set and tsd_get 1324 * Locks out tsd_create, tsd_destroy, and tsd_exit 1325 * May free memory with lock held 1326 */ 1327 void 1328 tsd_destroy(uint_t *keyp) 1329 { 1330 uint_t key; 1331 struct tsd_thread *tsd; 1332 1333 /* 1334 * protect the key namespace and our destructor lists 1335 */ 1336 mutex_enter(&tsd_mutex); 1337 key = *keyp; 1338 *keyp = 0; 1339 1340 ASSERT(key <= tsd_nkeys); 1341 1342 /* 1343 * if the key is valid 1344 */ 1345 if (key != 0) { 1346 uint_t k = key - 1; 1347 /* 1348 * for every thread with TSD, call key's destructor 1349 */ 1350 for (tsd = tsd_list; tsd; tsd = tsd->ts_next) { 1351 /* 1352 * no TSD for key in this thread 1353 */ 1354 if (key > tsd->ts_nkeys) 1355 continue; 1356 /* 1357 * call destructor for key 1358 */ 1359 if (tsd->ts_value[k] && tsd_destructor[k]) 1360 (*tsd_destructor[k])(tsd->ts_value[k]); 1361 /* 1362 * reset value for key 1363 */ 1364 tsd->ts_value[k] = NULL; 1365 } 1366 /* 1367 * actually free the key (NULL destructor == unused) 1368 */ 1369 tsd_destructor[k] = NULL; 1370 } 1371 1372 mutex_exit(&tsd_mutex); 1373 } 1374 1375 /* 1376 * Quickly return the per thread value that was stored with the specified key 1377 * Assumes the caller is protecting key from tsd_create and tsd_destroy 1378 */ 1379 void * 1380 tsd_get(uint_t key) 1381 { 1382 return (tsd_agent_get(curthread, key)); 1383 } 1384 1385 /* 1386 * Set a per thread value indexed with the specified key 1387 */ 1388 int 1389 tsd_set(uint_t key, void *value) 1390 { 1391 return (tsd_agent_set(curthread, key, value)); 1392 } 1393 1394 /* 1395 * Like tsd_get(), except that the agent lwp can get the tsd of 1396 * another thread in the same process (the agent thread only runs when the 1397 * process is completely stopped by /proc), or syslwp is creating a new lwp. 1398 */ 1399 void * 1400 tsd_agent_get(kthread_t *t, uint_t key) 1401 { 1402 struct tsd_thread *tsd = t->t_tsd; 1403 1404 ASSERT(t == curthread || 1405 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1406 1407 if (key && tsd != NULL && key <= tsd->ts_nkeys) 1408 return (tsd->ts_value[key - 1]); 1409 return (NULL); 1410 } 1411 1412 /* 1413 * Like tsd_set(), except that the agent lwp can set the tsd of 1414 * another thread in the same process, or syslwp can set the tsd 1415 * of a thread it's in the middle of creating. 1416 * 1417 * Assumes the caller is protecting key from tsd_create and tsd_destroy 1418 * May lock out tsd_destroy (and tsd_create), may allocate memory with 1419 * lock held 1420 */ 1421 int 1422 tsd_agent_set(kthread_t *t, uint_t key, void *value) 1423 { 1424 struct tsd_thread *tsd = t->t_tsd; 1425 1426 ASSERT(t == curthread || 1427 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1428 1429 if (key == 0) 1430 return (EINVAL); 1431 if (tsd == NULL) 1432 tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP); 1433 if (key <= tsd->ts_nkeys) { 1434 tsd->ts_value[key - 1] = value; 1435 return (0); 1436 } 1437 1438 ASSERT(key <= tsd_nkeys); 1439 1440 /* 1441 * lock out tsd_destroy() 1442 */ 1443 mutex_enter(&tsd_mutex); 1444 if (tsd->ts_nkeys == 0) { 1445 /* 1446 * Link onto list of threads with TSD 1447 */ 1448 if ((tsd->ts_next = tsd_list) != NULL) 1449 tsd_list->ts_prev = tsd; 1450 tsd_list = tsd; 1451 } 1452 1453 /* 1454 * Allocate thread local storage and set the value for key 1455 */ 1456 tsd->ts_value = tsd_realloc(tsd->ts_value, 1457 tsd->ts_nkeys * sizeof (void *), 1458 key * sizeof (void *)); 1459 tsd->ts_nkeys = key; 1460 tsd->ts_value[key - 1] = value; 1461 mutex_exit(&tsd_mutex); 1462 1463 return (0); 1464 } 1465 1466 1467 /* 1468 * Return the per thread value that was stored with the specified key 1469 * If necessary, create the key and the value 1470 * Assumes the caller is protecting *keyp from tsd_destroy 1471 */ 1472 void * 1473 tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void)) 1474 { 1475 void *value; 1476 uint_t key = *keyp; 1477 struct tsd_thread *tsd = curthread->t_tsd; 1478 1479 if (tsd == NULL) 1480 tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP); 1481 if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1])) 1482 return (value); 1483 if (key == 0) 1484 tsd_create(keyp, destroy); 1485 (void) tsd_set(*keyp, value = (*allocate)()); 1486 1487 return (value); 1488 } 1489 1490 /* 1491 * Called from thread_exit() to run the destructor function for each tsd 1492 * Locks out tsd_create and tsd_destroy 1493 * Assumes that the destructor *DOES NOT* use tsd 1494 */ 1495 void 1496 tsd_exit(void) 1497 { 1498 int i; 1499 struct tsd_thread *tsd = curthread->t_tsd; 1500 1501 if (tsd == NULL) 1502 return; 1503 1504 if (tsd->ts_nkeys == 0) { 1505 kmem_free(tsd, sizeof (*tsd)); 1506 curthread->t_tsd = NULL; 1507 return; 1508 } 1509 1510 /* 1511 * lock out tsd_create and tsd_destroy, call 1512 * the destructor, and mark the value as destroyed. 1513 */ 1514 mutex_enter(&tsd_mutex); 1515 1516 for (i = 0; i < tsd->ts_nkeys; i++) { 1517 if (tsd->ts_value[i] && tsd_destructor[i]) 1518 (*tsd_destructor[i])(tsd->ts_value[i]); 1519 tsd->ts_value[i] = NULL; 1520 } 1521 1522 /* 1523 * remove from linked list of threads with TSD 1524 */ 1525 if (tsd->ts_next) 1526 tsd->ts_next->ts_prev = tsd->ts_prev; 1527 if (tsd->ts_prev) 1528 tsd->ts_prev->ts_next = tsd->ts_next; 1529 if (tsd_list == tsd) 1530 tsd_list = tsd->ts_next; 1531 1532 mutex_exit(&tsd_mutex); 1533 1534 /* 1535 * free up the TSD 1536 */ 1537 kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *)); 1538 kmem_free(tsd, sizeof (struct tsd_thread)); 1539 curthread->t_tsd = NULL; 1540 } 1541 1542 /* 1543 * realloc 1544 */ 1545 static void * 1546 tsd_realloc(void *old, size_t osize, size_t nsize) 1547 { 1548 void *new; 1549 1550 new = kmem_zalloc(nsize, KM_SLEEP); 1551 if (old) { 1552 bcopy(old, new, osize); 1553 kmem_free(old, osize); 1554 } 1555 return (new); 1556 } 1557 1558 /* 1559 * Check to see if an interrupt thread might be active at a given ipl. 1560 * If so return true. 1561 * We must be conservative--it is ok to give a false yes, but a false no 1562 * will cause disaster. (But if the situation changes after we check it is 1563 * ok--the caller is trying to ensure that an interrupt routine has been 1564 * exited). 1565 * This is used when trying to remove an interrupt handler from an autovector 1566 * list in avintr.c. 1567 */ 1568 int 1569 intr_active(struct cpu *cp, int level) 1570 { 1571 if (level <= LOCK_LEVEL) 1572 return (cp->cpu_thread != cp->cpu_dispthread); 1573 else 1574 return (CPU_ON_INTR(cp)); 1575 } 1576 1577 /* 1578 * Return non-zero if an interrupt is being serviced. 1579 */ 1580 int 1581 servicing_interrupt() 1582 { 1583 int onintr = 0; 1584 1585 /* Are we an interrupt thread */ 1586 if (curthread->t_flag & T_INTR_THREAD) 1587 return (1); 1588 /* Are we servicing a high level interrupt? */ 1589 if (CPU_ON_INTR(CPU)) { 1590 kpreempt_disable(); 1591 onintr = CPU_ON_INTR(CPU); 1592 kpreempt_enable(); 1593 } 1594 return (onintr); 1595 } 1596 1597 1598 /* 1599 * Change the dispatch priority of a thread in the system. 1600 * Used when raising or lowering a thread's priority. 1601 * (E.g., priority inheritance) 1602 * 1603 * Since threads are queued according to their priority, we 1604 * we must check the thread's state to determine whether it 1605 * is on a queue somewhere. If it is, we've got to: 1606 * 1607 * o Dequeue the thread. 1608 * o Change its effective priority. 1609 * o Enqueue the thread. 1610 * 1611 * Assumptions: The thread whose priority we wish to change 1612 * must be locked before we call thread_change_(e)pri(). 1613 * The thread_change(e)pri() function doesn't drop the thread 1614 * lock--that must be done by its caller. 1615 */ 1616 void 1617 thread_change_epri(kthread_t *t, pri_t disp_pri) 1618 { 1619 uint_t state; 1620 1621 ASSERT(THREAD_LOCK_HELD(t)); 1622 1623 /* 1624 * If the inherited priority hasn't actually changed, 1625 * just return. 1626 */ 1627 if (t->t_epri == disp_pri) 1628 return; 1629 1630 state = t->t_state; 1631 1632 /* 1633 * If it's not on a queue, change the priority with 1634 * impunity. 1635 */ 1636 if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) { 1637 t->t_epri = disp_pri; 1638 1639 if (state == TS_ONPROC) { 1640 cpu_t *cp = t->t_disp_queue->disp_cpu; 1641 1642 if (t == cp->cpu_dispthread) 1643 cp->cpu_dispatch_pri = DISP_PRIO(t); 1644 } 1645 return; 1646 } 1647 1648 /* 1649 * It's either on a sleep queue or a run queue. 1650 */ 1651 if (state == TS_SLEEP) { 1652 /* 1653 * Take the thread out of its sleep queue. 1654 * Change the inherited priority. 1655 * Re-enqueue the thread. 1656 * Each synchronization object exports a function 1657 * to do this in an appropriate manner. 1658 */ 1659 SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri); 1660 } else if (state == TS_WAIT) { 1661 /* 1662 * Re-enqueue a thread on the wait queue if its 1663 * effective priority needs to change. 1664 */ 1665 if (disp_pri != t->t_epri) 1666 waitq_change_pri(t, disp_pri); 1667 } else { 1668 /* 1669 * The thread is on a run queue. 1670 * Note: setbackdq() may not put the thread 1671 * back on the same run queue where it originally 1672 * resided. 1673 */ 1674 (void) dispdeq(t); 1675 t->t_epri = disp_pri; 1676 setbackdq(t); 1677 } 1678 } /* end of thread_change_epri */ 1679 1680 /* 1681 * Function: Change the t_pri field of a thread. 1682 * Side Effects: Adjust the thread ordering on a run queue 1683 * or sleep queue, if necessary. 1684 * Returns: 1 if the thread was on a run queue, else 0. 1685 */ 1686 int 1687 thread_change_pri(kthread_t *t, pri_t disp_pri, int front) 1688 { 1689 uint_t state; 1690 int on_rq = 0; 1691 1692 ASSERT(THREAD_LOCK_HELD(t)); 1693 1694 state = t->t_state; 1695 THREAD_WILLCHANGE_PRI(t, disp_pri); 1696 1697 /* 1698 * If it's not on a queue, change the priority with 1699 * impunity. 1700 */ 1701 if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) { 1702 t->t_pri = disp_pri; 1703 1704 if (state == TS_ONPROC) { 1705 cpu_t *cp = t->t_disp_queue->disp_cpu; 1706 1707 if (t == cp->cpu_dispthread) 1708 cp->cpu_dispatch_pri = DISP_PRIO(t); 1709 } 1710 return (0); 1711 } 1712 1713 /* 1714 * It's either on a sleep queue or a run queue. 1715 */ 1716 if (state == TS_SLEEP) { 1717 /* 1718 * If the priority has changed, take the thread out of 1719 * its sleep queue and change the priority. 1720 * Re-enqueue the thread. 1721 * Each synchronization object exports a function 1722 * to do this in an appropriate manner. 1723 */ 1724 if (disp_pri != t->t_pri) 1725 SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri); 1726 } else if (state == TS_WAIT) { 1727 /* 1728 * Re-enqueue a thread on the wait queue if its 1729 * priority needs to change. 1730 */ 1731 if (disp_pri != t->t_pri) 1732 waitq_change_pri(t, disp_pri); 1733 } else { 1734 /* 1735 * The thread is on a run queue. 1736 * Note: setbackdq() may not put the thread 1737 * back on the same run queue where it originally 1738 * resided. 1739 * 1740 * We still requeue the thread even if the priority 1741 * is unchanged to preserve round-robin (and other) 1742 * effects between threads of the same priority. 1743 */ 1744 on_rq = dispdeq(t); 1745 ASSERT(on_rq); 1746 t->t_pri = disp_pri; 1747 if (front) { 1748 setfrontdq(t); 1749 } else { 1750 setbackdq(t); 1751 } 1752 } 1753 return (on_rq); 1754 } 1755