1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/types.h> 28 #include <sys/param.h> 29 #include <sys/sysmacros.h> 30 #include <sys/signal.h> 31 #include <sys/stack.h> 32 #include <sys/pcb.h> 33 #include <sys/user.h> 34 #include <sys/systm.h> 35 #include <sys/sysinfo.h> 36 #include <sys/errno.h> 37 #include <sys/cmn_err.h> 38 #include <sys/cred.h> 39 #include <sys/resource.h> 40 #include <sys/task.h> 41 #include <sys/project.h> 42 #include <sys/proc.h> 43 #include <sys/debug.h> 44 #include <sys/disp.h> 45 #include <sys/class.h> 46 #include <vm/seg_kmem.h> 47 #include <vm/seg_kp.h> 48 #include <sys/machlock.h> 49 #include <sys/kmem.h> 50 #include <sys/varargs.h> 51 #include <sys/turnstile.h> 52 #include <sys/poll.h> 53 #include <sys/vtrace.h> 54 #include <sys/callb.h> 55 #include <c2/audit.h> 56 #include <sys/tnf.h> 57 #include <sys/sobject.h> 58 #include <sys/cpupart.h> 59 #include <sys/pset.h> 60 #include <sys/door.h> 61 #include <sys/spl.h> 62 #include <sys/copyops.h> 63 #include <sys/rctl.h> 64 #include <sys/brand.h> 65 #include <sys/pool.h> 66 #include <sys/zone.h> 67 #include <sys/tsol/label.h> 68 #include <sys/tsol/tndb.h> 69 #include <sys/cpc_impl.h> 70 #include <sys/sdt.h> 71 #include <sys/reboot.h> 72 #include <sys/kdi.h> 73 #include <sys/schedctl.h> 74 #include <sys/waitq.h> 75 #include <sys/cpucaps.h> 76 #include <sys/kiconv.h> 77 78 struct kmem_cache *thread_cache; /* cache of free threads */ 79 struct kmem_cache *lwp_cache; /* cache of free lwps */ 80 struct kmem_cache *turnstile_cache; /* cache of free turnstiles */ 81 82 /* 83 * allthreads is only for use by kmem_readers. All kernel loops can use 84 * the current thread as a start/end point. 85 */ 86 static kthread_t *allthreads = &t0; /* circular list of all threads */ 87 88 static kcondvar_t reaper_cv; /* synchronization var */ 89 kthread_t *thread_deathrow; /* circular list of reapable threads */ 90 kthread_t *lwp_deathrow; /* circular list of reapable threads */ 91 kmutex_t reaplock; /* protects lwp and thread deathrows */ 92 int thread_reapcnt = 0; /* number of threads on deathrow */ 93 int lwp_reapcnt = 0; /* number of lwps on deathrow */ 94 int reaplimit = 16; /* delay reaping until reaplimit */ 95 96 thread_free_lock_t *thread_free_lock; 97 /* protects tick thread from reaper */ 98 99 extern int nthread; 100 101 id_t syscid; /* system scheduling class ID */ 102 void *segkp_thread; /* cookie for segkp pool */ 103 104 int lwp_cache_sz = 32; 105 int t_cache_sz = 8; 106 static kt_did_t next_t_id = 1; 107 108 /* Default mode for thread binding to CPUs and processor sets */ 109 int default_binding_mode = TB_ALLHARD; 110 111 /* 112 * Min/Max stack sizes for stack size parameters 113 */ 114 #define MAX_STKSIZE (32 * DEFAULTSTKSZ) 115 #define MIN_STKSIZE DEFAULTSTKSZ 116 117 /* 118 * default_stksize overrides lwp_default_stksize if it is set. 119 */ 120 int default_stksize; 121 int lwp_default_stksize; 122 123 static zone_key_t zone_thread_key; 124 125 unsigned int kmem_stackinfo; /* stackinfo feature on-off */ 126 kmem_stkinfo_t *kmem_stkinfo_log; /* stackinfo circular log */ 127 static kmutex_t kmem_stkinfo_lock; /* protects kmem_stkinfo_log */ 128 129 /* 130 * forward declarations for internal thread specific data (tsd) 131 */ 132 static void *tsd_realloc(void *, size_t, size_t); 133 134 void thread_reaper(void); 135 136 /* forward declarations for stackinfo feature */ 137 static void stkinfo_begin(kthread_t *); 138 static void stkinfo_end(kthread_t *); 139 static size_t stkinfo_percent(caddr_t, caddr_t, caddr_t); 140 141 /*ARGSUSED*/ 142 static int 143 turnstile_constructor(void *buf, void *cdrarg, int kmflags) 144 { 145 bzero(buf, sizeof (turnstile_t)); 146 return (0); 147 } 148 149 /*ARGSUSED*/ 150 static void 151 turnstile_destructor(void *buf, void *cdrarg) 152 { 153 turnstile_t *ts = buf; 154 155 ASSERT(ts->ts_free == NULL); 156 ASSERT(ts->ts_waiters == 0); 157 ASSERT(ts->ts_inheritor == NULL); 158 ASSERT(ts->ts_sleepq[0].sq_first == NULL); 159 ASSERT(ts->ts_sleepq[1].sq_first == NULL); 160 } 161 162 void 163 thread_init(void) 164 { 165 kthread_t *tp; 166 extern char sys_name[]; 167 extern void idle(); 168 struct cpu *cpu = CPU; 169 int i; 170 kmutex_t *lp; 171 172 mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL)); 173 thread_free_lock = 174 kmem_alloc(sizeof (thread_free_lock_t) * THREAD_FREE_NUM, KM_SLEEP); 175 for (i = 0; i < THREAD_FREE_NUM; i++) { 176 lp = &thread_free_lock[i].tf_lock; 177 mutex_init(lp, NULL, MUTEX_DEFAULT, NULL); 178 } 179 180 #if defined(__i386) || defined(__amd64) 181 thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t), 182 PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0); 183 184 /* 185 * "struct _klwp" includes a "struct pcb", which includes a 186 * "struct fpu", which needs to be 16-byte aligned on amd64 187 * (and even on i386 for fxsave/fxrstor). 188 */ 189 lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t), 190 16, NULL, NULL, NULL, NULL, NULL, 0); 191 #else 192 /* 193 * Allocate thread structures from static_arena. This prevents 194 * issues where a thread tries to relocate its own thread 195 * structure and touches it after the mapping has been suspended. 196 */ 197 thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t), 198 PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0); 199 200 lwp_stk_cache_init(); 201 202 lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t), 203 0, NULL, NULL, NULL, NULL, NULL, 0); 204 #endif 205 206 turnstile_cache = kmem_cache_create("turnstile_cache", 207 sizeof (turnstile_t), 0, 208 turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0); 209 210 label_init(); 211 cred_init(); 212 213 /* 214 * Initialize various resource management facilities. 215 */ 216 rctl_init(); 217 cpucaps_init(); 218 /* 219 * Zone_init() should be called before project_init() so that project ID 220 * for the first project is initialized correctly. 221 */ 222 zone_init(); 223 project_init(); 224 brand_init(); 225 kiconv_init(); 226 task_init(); 227 tcache_init(); 228 pool_init(); 229 230 curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP); 231 232 /* 233 * Originally, we had two parameters to set default stack 234 * size: one for lwp's (lwp_default_stksize), and one for 235 * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz). 236 * Now we have a third parameter that overrides both if it is 237 * set to a legal stack size, called default_stksize. 238 */ 239 240 if (default_stksize == 0) { 241 default_stksize = DEFAULTSTKSZ; 242 } else if (default_stksize % PAGESIZE != 0 || 243 default_stksize > MAX_STKSIZE || 244 default_stksize < MIN_STKSIZE) { 245 cmn_err(CE_WARN, "Illegal stack size. Using %d", 246 (int)DEFAULTSTKSZ); 247 default_stksize = DEFAULTSTKSZ; 248 } else { 249 lwp_default_stksize = default_stksize; 250 } 251 252 if (lwp_default_stksize == 0) { 253 lwp_default_stksize = default_stksize; 254 } else if (lwp_default_stksize % PAGESIZE != 0 || 255 lwp_default_stksize > MAX_STKSIZE || 256 lwp_default_stksize < MIN_STKSIZE) { 257 cmn_err(CE_WARN, "Illegal stack size. Using %d", 258 default_stksize); 259 lwp_default_stksize = default_stksize; 260 } 261 262 segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz, 263 lwp_default_stksize, 264 (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED)); 265 266 segkp_thread = segkp_cache_init(segkp, t_cache_sz, 267 default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON); 268 269 (void) getcid(sys_name, &syscid); 270 curthread->t_cid = syscid; /* current thread is t0 */ 271 272 /* 273 * Set up the first CPU's idle thread. 274 * It runs whenever the CPU has nothing worthwhile to do. 275 */ 276 tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1); 277 cpu->cpu_idle_thread = tp; 278 tp->t_preempt = 1; 279 tp->t_disp_queue = cpu->cpu_disp; 280 ASSERT(tp->t_disp_queue != NULL); 281 tp->t_bound_cpu = cpu; 282 tp->t_affinitycnt = 1; 283 284 /* 285 * Registering a thread in the callback table is usually 286 * done in the initialization code of the thread. In this 287 * case, we do it right after thread creation to avoid 288 * blocking idle thread while registering itself. It also 289 * avoids the possibility of reregistration in case a CPU 290 * restarts its idle thread. 291 */ 292 CALLB_CPR_INIT_SAFE(tp, "idle"); 293 294 /* 295 * Create the thread_reaper daemon. From this point on, exited 296 * threads will get reaped. 297 */ 298 (void) thread_create(NULL, 0, (void (*)())thread_reaper, 299 NULL, 0, &p0, TS_RUN, minclsyspri); 300 301 /* 302 * Finish initializing the kernel memory allocator now that 303 * thread_create() is available. 304 */ 305 kmem_thread_init(); 306 307 if (boothowto & RB_DEBUG) 308 kdi_dvec_thravail(); 309 } 310 311 /* 312 * Create a thread. 313 * 314 * thread_create() blocks for memory if necessary. It never fails. 315 * 316 * If stk is NULL, the thread is created at the base of the stack 317 * and cannot be swapped. 318 */ 319 kthread_t * 320 thread_create( 321 caddr_t stk, 322 size_t stksize, 323 void (*proc)(), 324 void *arg, 325 size_t len, 326 proc_t *pp, 327 int state, 328 pri_t pri) 329 { 330 kthread_t *t; 331 extern struct classfuncs sys_classfuncs; 332 turnstile_t *ts; 333 334 /* 335 * Every thread keeps a turnstile around in case it needs to block. 336 * The only reason the turnstile is not simply part of the thread 337 * structure is that we may have to break the association whenever 338 * more than one thread blocks on a given synchronization object. 339 * From a memory-management standpoint, turnstiles are like the 340 * "attached mblks" that hang off dblks in the streams allocator. 341 */ 342 ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP); 343 344 if (stk == NULL) { 345 /* 346 * alloc both thread and stack in segkp chunk 347 */ 348 349 if (stksize < default_stksize) 350 stksize = default_stksize; 351 352 if (stksize == default_stksize) { 353 stk = (caddr_t)segkp_cache_get(segkp_thread); 354 } else { 355 stksize = roundup(stksize, PAGESIZE); 356 stk = (caddr_t)segkp_get(segkp, stksize, 357 (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED)); 358 } 359 360 ASSERT(stk != NULL); 361 362 /* 363 * The machine-dependent mutex code may require that 364 * thread pointers (since they may be used for mutex owner 365 * fields) have certain alignment requirements. 366 * PTR24_ALIGN is the size of the alignment quanta. 367 * XXX - assumes stack grows toward low addresses. 368 */ 369 if (stksize <= sizeof (kthread_t) + PTR24_ALIGN) 370 cmn_err(CE_PANIC, "thread_create: proposed stack size" 371 " too small to hold thread."); 372 #ifdef STACK_GROWTH_DOWN 373 stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1); 374 stksize &= -PTR24_ALIGN; /* make thread aligned */ 375 t = (kthread_t *)(stk + stksize); 376 bzero(t, sizeof (kthread_t)); 377 if (audit_active) 378 audit_thread_create(t); 379 t->t_stk = stk + stksize; 380 t->t_stkbase = stk; 381 #else /* stack grows to larger addresses */ 382 stksize -= SA(sizeof (kthread_t)); 383 t = (kthread_t *)(stk); 384 bzero(t, sizeof (kthread_t)); 385 t->t_stk = stk + sizeof (kthread_t); 386 t->t_stkbase = stk + stksize + sizeof (kthread_t); 387 #endif /* STACK_GROWTH_DOWN */ 388 t->t_flag |= T_TALLOCSTK; 389 t->t_swap = stk; 390 } else { 391 t = kmem_cache_alloc(thread_cache, KM_SLEEP); 392 bzero(t, sizeof (kthread_t)); 393 ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0); 394 if (audit_active) 395 audit_thread_create(t); 396 /* 397 * Initialize t_stk to the kernel stack pointer to use 398 * upon entry to the kernel 399 */ 400 #ifdef STACK_GROWTH_DOWN 401 t->t_stk = stk + stksize; 402 t->t_stkbase = stk; 403 #else 404 t->t_stk = stk; /* 3b2-like */ 405 t->t_stkbase = stk + stksize; 406 #endif /* STACK_GROWTH_DOWN */ 407 } 408 409 if (kmem_stackinfo != 0) { 410 stkinfo_begin(t); 411 } 412 413 /* set default stack flag */ 414 if (stksize == lwp_default_stksize) 415 t->t_flag |= T_DFLTSTK; 416 417 t->t_ts = ts; 418 419 /* 420 * p_cred could be NULL if it thread_create is called before cred_init 421 * is called in main. 422 */ 423 mutex_enter(&pp->p_crlock); 424 if (pp->p_cred) 425 crhold(t->t_cred = pp->p_cred); 426 mutex_exit(&pp->p_crlock); 427 t->t_start = gethrestime_sec(); 428 t->t_startpc = proc; 429 t->t_procp = pp; 430 t->t_clfuncs = &sys_classfuncs.thread; 431 t->t_cid = syscid; 432 t->t_pri = pri; 433 t->t_stime = ddi_get_lbolt(); 434 t->t_schedflag = TS_LOAD | TS_DONT_SWAP; 435 t->t_bind_cpu = PBIND_NONE; 436 t->t_bindflag = (uchar_t)default_binding_mode; 437 t->t_bind_pset = PS_NONE; 438 t->t_plockp = &pp->p_lock; 439 t->t_copyops = NULL; 440 t->t_taskq = NULL; 441 t->t_anttime = 0; 442 t->t_hatdepth = 0; 443 444 t->t_dtrace_vtime = 1; /* assure vtimestamp is always non-zero */ 445 446 CPU_STATS_ADDQ(CPU, sys, nthreads, 1); 447 #ifndef NPROBE 448 /* Kernel probe */ 449 tnf_thread_create(t); 450 #endif /* NPROBE */ 451 LOCK_INIT_CLEAR(&t->t_lock); 452 453 /* 454 * Callers who give us a NULL proc must do their own 455 * stack initialization. e.g. lwp_create() 456 */ 457 if (proc != NULL) { 458 t->t_stk = thread_stk_init(t->t_stk); 459 thread_load(t, proc, arg, len); 460 } 461 462 /* 463 * Put a hold on project0. If this thread is actually in a 464 * different project, then t_proj will be changed later in 465 * lwp_create(). All kernel-only threads must be in project 0. 466 */ 467 t->t_proj = project_hold(proj0p); 468 469 lgrp_affinity_init(&t->t_lgrp_affinity); 470 471 mutex_enter(&pidlock); 472 nthread++; 473 t->t_did = next_t_id++; 474 t->t_prev = curthread->t_prev; 475 t->t_next = curthread; 476 477 /* 478 * Add the thread to the list of all threads, and initialize 479 * its t_cpu pointer. We need to block preemption since 480 * cpu_offline walks the thread list looking for threads 481 * with t_cpu pointing to the CPU being offlined. We want 482 * to make sure that the list is consistent and that if t_cpu 483 * is set, the thread is on the list. 484 */ 485 kpreempt_disable(); 486 curthread->t_prev->t_next = t; 487 curthread->t_prev = t; 488 489 /* 490 * Threads should never have a NULL t_cpu pointer so assign it 491 * here. If the thread is being created with state TS_RUN a 492 * better CPU may be chosen when it is placed on the run queue. 493 * 494 * We need to keep kernel preemption disabled when setting all 495 * three fields to keep them in sync. Also, always create in 496 * the default partition since that's where kernel threads go 497 * (if this isn't a kernel thread, t_cpupart will be changed 498 * in lwp_create before setting the thread runnable). 499 */ 500 t->t_cpupart = &cp_default; 501 502 /* 503 * For now, affiliate this thread with the root lgroup. 504 * Since the kernel does not (presently) allocate its memory 505 * in a locality aware fashion, the root is an appropriate home. 506 * If this thread is later associated with an lwp, it will have 507 * it's lgroup re-assigned at that time. 508 */ 509 lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1); 510 511 /* 512 * Inherit the current cpu. If this cpu isn't part of the chosen 513 * lgroup, a new cpu will be chosen by cpu_choose when the thread 514 * is ready to run. 515 */ 516 if (CPU->cpu_part == &cp_default) 517 t->t_cpu = CPU; 518 else 519 t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl, 520 t->t_pri, NULL); 521 522 t->t_disp_queue = t->t_cpu->cpu_disp; 523 kpreempt_enable(); 524 525 /* 526 * Initialize thread state and the dispatcher lock pointer. 527 * Need to hold onto pidlock to block allthreads walkers until 528 * the state is set. 529 */ 530 switch (state) { 531 case TS_RUN: 532 curthread->t_oldspl = splhigh(); /* get dispatcher spl */ 533 THREAD_SET_STATE(t, TS_STOPPED, &transition_lock); 534 CL_SETRUN(t); 535 thread_unlock(t); 536 break; 537 538 case TS_ONPROC: 539 THREAD_ONPROC(t, t->t_cpu); 540 break; 541 542 case TS_FREE: 543 /* 544 * Free state will be used for intr threads. 545 * The interrupt routine must set the thread dispatcher 546 * lock pointer (t_lockp) if starting on a CPU 547 * other than the current one. 548 */ 549 THREAD_FREEINTR(t, CPU); 550 break; 551 552 case TS_STOPPED: 553 THREAD_SET_STATE(t, TS_STOPPED, &stop_lock); 554 break; 555 556 default: /* TS_SLEEP, TS_ZOMB or TS_TRANS */ 557 cmn_err(CE_PANIC, "thread_create: invalid state %d", state); 558 } 559 mutex_exit(&pidlock); 560 return (t); 561 } 562 563 /* 564 * Move thread to project0 and take care of project reference counters. 565 */ 566 void 567 thread_rele(kthread_t *t) 568 { 569 kproject_t *kpj; 570 571 thread_lock(t); 572 573 ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0); 574 kpj = ttoproj(t); 575 t->t_proj = proj0p; 576 577 thread_unlock(t); 578 579 if (kpj != proj0p) { 580 project_rele(kpj); 581 (void) project_hold(proj0p); 582 } 583 } 584 585 void 586 thread_exit(void) 587 { 588 kthread_t *t = curthread; 589 590 if ((t->t_proc_flag & TP_ZTHREAD) != 0) 591 cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called"); 592 593 tsd_exit(); /* Clean up this thread's TSD */ 594 595 kcpc_passivate(); /* clean up performance counter state */ 596 597 /* 598 * No kernel thread should have called poll() without arranging 599 * calling pollcleanup() here. 600 */ 601 ASSERT(t->t_pollstate == NULL); 602 ASSERT(t->t_schedctl == NULL); 603 if (t->t_door) 604 door_slam(); /* in case thread did an upcall */ 605 606 #ifndef NPROBE 607 /* Kernel probe */ 608 if (t->t_tnf_tpdp) 609 tnf_thread_exit(); 610 #endif /* NPROBE */ 611 612 thread_rele(t); 613 t->t_preempt++; 614 615 /* 616 * remove thread from the all threads list so that 617 * death-row can use the same pointers. 618 */ 619 mutex_enter(&pidlock); 620 t->t_next->t_prev = t->t_prev; 621 t->t_prev->t_next = t->t_next; 622 ASSERT(allthreads != t); /* t0 never exits */ 623 cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */ 624 mutex_exit(&pidlock); 625 626 if (t->t_ctx != NULL) 627 exitctx(t); 628 if (t->t_procp->p_pctx != NULL) 629 exitpctx(t->t_procp); 630 631 if (kmem_stackinfo != 0) { 632 stkinfo_end(t); 633 } 634 635 t->t_state = TS_ZOMB; /* set zombie thread */ 636 637 swtch_from_zombie(); /* give up the CPU */ 638 /* NOTREACHED */ 639 } 640 641 /* 642 * Check to see if the specified thread is active (defined as being on 643 * the thread list). This is certainly a slow way to do this; if there's 644 * ever a reason to speed it up, we could maintain a hash table of active 645 * threads indexed by their t_did. 646 */ 647 static kthread_t * 648 did_to_thread(kt_did_t tid) 649 { 650 kthread_t *t; 651 652 ASSERT(MUTEX_HELD(&pidlock)); 653 for (t = curthread->t_next; t != curthread; t = t->t_next) { 654 if (t->t_did == tid) 655 break; 656 } 657 if (t->t_did == tid) 658 return (t); 659 else 660 return (NULL); 661 } 662 663 /* 664 * Wait for specified thread to exit. Returns immediately if the thread 665 * could not be found, meaning that it has either already exited or never 666 * existed. 667 */ 668 void 669 thread_join(kt_did_t tid) 670 { 671 kthread_t *t; 672 673 ASSERT(tid != curthread->t_did); 674 ASSERT(tid != t0.t_did); 675 676 mutex_enter(&pidlock); 677 /* 678 * Make sure we check that the thread is on the thread list 679 * before blocking on it; otherwise we could end up blocking on 680 * a cv that's already been freed. In other words, don't cache 681 * the thread pointer across calls to cv_wait. 682 * 683 * The choice of loop invariant means that whenever a thread 684 * is taken off the allthreads list, a cv_broadcast must be 685 * performed on that thread's t_joincv to wake up any waiters. 686 * The broadcast doesn't have to happen right away, but it 687 * shouldn't be postponed indefinitely (e.g., by doing it in 688 * thread_free which may only be executed when the deathrow 689 * queue is processed. 690 */ 691 while (t = did_to_thread(tid)) 692 cv_wait(&t->t_joincv, &pidlock); 693 mutex_exit(&pidlock); 694 } 695 696 void 697 thread_free_prevent(kthread_t *t) 698 { 699 kmutex_t *lp; 700 701 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock; 702 mutex_enter(lp); 703 } 704 705 void 706 thread_free_allow(kthread_t *t) 707 { 708 kmutex_t *lp; 709 710 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock; 711 mutex_exit(lp); 712 } 713 714 static void 715 thread_free_barrier(kthread_t *t) 716 { 717 kmutex_t *lp; 718 719 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock; 720 mutex_enter(lp); 721 mutex_exit(lp); 722 } 723 724 void 725 thread_free(kthread_t *t) 726 { 727 ASSERT(t != &t0 && t->t_state == TS_FREE); 728 ASSERT(t->t_door == NULL); 729 ASSERT(t->t_schedctl == NULL); 730 ASSERT(t->t_pollstate == NULL); 731 732 t->t_pri = 0; 733 t->t_pc = 0; 734 t->t_sp = 0; 735 t->t_wchan0 = NULL; 736 t->t_wchan = NULL; 737 if (t->t_cred != NULL) { 738 crfree(t->t_cred); 739 t->t_cred = 0; 740 } 741 if (t->t_pdmsg) { 742 kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1); 743 t->t_pdmsg = NULL; 744 } 745 if (audit_active) 746 audit_thread_free(t); 747 #ifndef NPROBE 748 if (t->t_tnf_tpdp) 749 tnf_thread_free(t); 750 #endif /* NPROBE */ 751 if (t->t_cldata) { 752 CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata); 753 } 754 if (t->t_rprof != NULL) { 755 kmem_free(t->t_rprof, sizeof (*t->t_rprof)); 756 t->t_rprof = NULL; 757 } 758 t->t_lockp = NULL; /* nothing should try to lock this thread now */ 759 if (t->t_lwp) 760 lwp_freeregs(t->t_lwp, 0); 761 if (t->t_ctx) 762 freectx(t, 0); 763 t->t_stk = NULL; 764 if (t->t_lwp) 765 lwp_stk_fini(t->t_lwp); 766 lock_clear(&t->t_lock); 767 768 if (t->t_ts->ts_waiters > 0) 769 panic("thread_free: turnstile still active"); 770 771 kmem_cache_free(turnstile_cache, t->t_ts); 772 773 free_afd(&t->t_activefd); 774 775 /* 776 * Barrier for the tick accounting code. The tick accounting code 777 * holds this lock to keep the thread from going away while it's 778 * looking at it. 779 */ 780 thread_free_barrier(t); 781 782 ASSERT(ttoproj(t) == proj0p); 783 project_rele(ttoproj(t)); 784 785 lgrp_affinity_free(&t->t_lgrp_affinity); 786 787 /* 788 * Free thread struct and its stack. 789 */ 790 if (t->t_flag & T_TALLOCSTK) { 791 /* thread struct is embedded in stack */ 792 segkp_release(segkp, t->t_swap); 793 mutex_enter(&pidlock); 794 nthread--; 795 mutex_exit(&pidlock); 796 } else { 797 if (t->t_swap) { 798 segkp_release(segkp, t->t_swap); 799 t->t_swap = NULL; 800 } 801 if (t->t_lwp) { 802 kmem_cache_free(lwp_cache, t->t_lwp); 803 t->t_lwp = NULL; 804 } 805 mutex_enter(&pidlock); 806 nthread--; 807 mutex_exit(&pidlock); 808 kmem_cache_free(thread_cache, t); 809 } 810 } 811 812 /* 813 * Removes threads associated with the given zone from a deathrow queue. 814 * tp is a pointer to the head of the deathrow queue, and countp is a 815 * pointer to the current deathrow count. Returns a linked list of 816 * threads removed from the list. 817 */ 818 static kthread_t * 819 thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid) 820 { 821 kthread_t *tmp, *list = NULL; 822 cred_t *cr; 823 824 ASSERT(MUTEX_HELD(&reaplock)); 825 while (*tp != NULL) { 826 if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) { 827 tmp = *tp; 828 *tp = tmp->t_forw; 829 tmp->t_forw = list; 830 list = tmp; 831 (*countp)--; 832 } else { 833 tp = &(*tp)->t_forw; 834 } 835 } 836 return (list); 837 } 838 839 static void 840 thread_reap_list(kthread_t *t) 841 { 842 kthread_t *next; 843 844 while (t != NULL) { 845 next = t->t_forw; 846 thread_free(t); 847 t = next; 848 } 849 } 850 851 /* ARGSUSED */ 852 static void 853 thread_zone_destroy(zoneid_t zoneid, void *unused) 854 { 855 kthread_t *t, *l; 856 857 mutex_enter(&reaplock); 858 /* 859 * Pull threads and lwps associated with zone off deathrow lists. 860 */ 861 t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid); 862 l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid); 863 mutex_exit(&reaplock); 864 865 /* 866 * Guard against race condition in mutex_owner_running: 867 * thread=owner(mutex) 868 * <interrupt> 869 * thread exits mutex 870 * thread exits 871 * thread reaped 872 * thread struct freed 873 * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE. 874 * A cross call to all cpus will cause the interrupt handler 875 * to reset the PC if it is in mutex_owner_running, refreshing 876 * stale thread pointers. 877 */ 878 mutex_sync(); /* sync with mutex code */ 879 880 /* 881 * Reap threads 882 */ 883 thread_reap_list(t); 884 885 /* 886 * Reap lwps 887 */ 888 thread_reap_list(l); 889 } 890 891 /* 892 * cleanup zombie threads that are on deathrow. 893 */ 894 void 895 thread_reaper() 896 { 897 kthread_t *t, *l; 898 callb_cpr_t cprinfo; 899 900 /* 901 * Register callback to clean up threads when zone is destroyed. 902 */ 903 zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy); 904 905 CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper"); 906 for (;;) { 907 mutex_enter(&reaplock); 908 while (thread_deathrow == NULL && lwp_deathrow == NULL) { 909 CALLB_CPR_SAFE_BEGIN(&cprinfo); 910 cv_wait(&reaper_cv, &reaplock); 911 CALLB_CPR_SAFE_END(&cprinfo, &reaplock); 912 } 913 /* 914 * mutex_sync() needs to be called when reaping, but 915 * not too often. We limit reaping rate to once 916 * per second. Reaplimit is max rate at which threads can 917 * be freed. Does not impact thread destruction/creation. 918 */ 919 t = thread_deathrow; 920 l = lwp_deathrow; 921 thread_deathrow = NULL; 922 lwp_deathrow = NULL; 923 thread_reapcnt = 0; 924 lwp_reapcnt = 0; 925 mutex_exit(&reaplock); 926 927 /* 928 * Guard against race condition in mutex_owner_running: 929 * thread=owner(mutex) 930 * <interrupt> 931 * thread exits mutex 932 * thread exits 933 * thread reaped 934 * thread struct freed 935 * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE. 936 * A cross call to all cpus will cause the interrupt handler 937 * to reset the PC if it is in mutex_owner_running, refreshing 938 * stale thread pointers. 939 */ 940 mutex_sync(); /* sync with mutex code */ 941 /* 942 * Reap threads 943 */ 944 thread_reap_list(t); 945 946 /* 947 * Reap lwps 948 */ 949 thread_reap_list(l); 950 delay(hz); 951 } 952 } 953 954 /* 955 * This is called by lwpcreate, etc.() to put a lwp_deathrow thread onto 956 * thread_deathrow. The thread's state is changed already TS_FREE to indicate 957 * that is reapable. The thread already holds the reaplock, and was already 958 * freed. 959 */ 960 void 961 reapq_move_lq_to_tq(kthread_t *t) 962 { 963 ASSERT(t->t_state == TS_FREE); 964 ASSERT(MUTEX_HELD(&reaplock)); 965 t->t_forw = thread_deathrow; 966 thread_deathrow = t; 967 thread_reapcnt++; 968 if (lwp_reapcnt + thread_reapcnt > reaplimit) 969 cv_signal(&reaper_cv); /* wake the reaper */ 970 } 971 972 /* 973 * This is called by resume() to put a zombie thread onto deathrow. 974 * The thread's state is changed to TS_FREE to indicate that is reapable. 975 * This is called from the idle thread so it must not block - just spin. 976 */ 977 void 978 reapq_add(kthread_t *t) 979 { 980 mutex_enter(&reaplock); 981 982 /* 983 * lwp_deathrow contains only threads with lwp linkage 984 * that are of the default stacksize. Anything else goes 985 * on thread_deathrow. 986 */ 987 if (ttolwp(t) && (t->t_flag & T_DFLTSTK)) { 988 t->t_forw = lwp_deathrow; 989 lwp_deathrow = t; 990 lwp_reapcnt++; 991 } else { 992 t->t_forw = thread_deathrow; 993 thread_deathrow = t; 994 thread_reapcnt++; 995 } 996 if (lwp_reapcnt + thread_reapcnt > reaplimit) 997 cv_signal(&reaper_cv); /* wake the reaper */ 998 t->t_state = TS_FREE; 999 lock_clear(&t->t_lock); 1000 1001 /* 1002 * Before we return, we need to grab and drop the thread lock for 1003 * the dead thread. At this point, the current thread is the idle 1004 * thread, and the dead thread's CPU lock points to the current 1005 * CPU -- and we must grab and drop the lock to synchronize with 1006 * a racing thread walking a blocking chain that the zombie thread 1007 * was recently in. By this point, that blocking chain is (by 1008 * definition) stale: the dead thread is not holding any locks, and 1009 * is therefore not in any blocking chains -- but if we do not regrab 1010 * our lock before freeing the dead thread's data structures, the 1011 * thread walking the (stale) blocking chain will die on memory 1012 * corruption when it attempts to drop the dead thread's lock. We 1013 * only need do this once because there is no way for the dead thread 1014 * to ever again be on a blocking chain: once we have grabbed and 1015 * dropped the thread lock, we are guaranteed that anyone that could 1016 * have seen this thread in a blocking chain can no longer see it. 1017 */ 1018 thread_lock(t); 1019 thread_unlock(t); 1020 1021 mutex_exit(&reaplock); 1022 } 1023 1024 /* 1025 * Install thread context ops for the current thread. 1026 */ 1027 void 1028 installctx( 1029 kthread_t *t, 1030 void *arg, 1031 void (*save)(void *), 1032 void (*restore)(void *), 1033 void (*fork)(void *, void *), 1034 void (*lwp_create)(void *, void *), 1035 void (*exit)(void *), 1036 void (*free)(void *, int)) 1037 { 1038 struct ctxop *ctx; 1039 1040 ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP); 1041 ctx->save_op = save; 1042 ctx->restore_op = restore; 1043 ctx->fork_op = fork; 1044 ctx->lwp_create_op = lwp_create; 1045 ctx->exit_op = exit; 1046 ctx->free_op = free; 1047 ctx->arg = arg; 1048 ctx->next = t->t_ctx; 1049 t->t_ctx = ctx; 1050 } 1051 1052 /* 1053 * Remove the thread context ops from a thread. 1054 */ 1055 int 1056 removectx( 1057 kthread_t *t, 1058 void *arg, 1059 void (*save)(void *), 1060 void (*restore)(void *), 1061 void (*fork)(void *, void *), 1062 void (*lwp_create)(void *, void *), 1063 void (*exit)(void *), 1064 void (*free)(void *, int)) 1065 { 1066 struct ctxop *ctx, *prev_ctx; 1067 1068 /* 1069 * The incoming kthread_t (which is the thread for which the 1070 * context ops will be removed) should be one of the following: 1071 * 1072 * a) the current thread, 1073 * 1074 * b) a thread of a process that's being forked (SIDL), 1075 * 1076 * c) a thread that belongs to the same process as the current 1077 * thread and for which the current thread is the agent thread, 1078 * 1079 * d) a thread that is TS_STOPPED which is indicative of it 1080 * being (if curthread is not an agent) a thread being created 1081 * as part of an lwp creation. 1082 */ 1083 ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL || 1084 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1085 1086 /* 1087 * Serialize modifications to t->t_ctx to prevent the agent thread 1088 * and the target thread from racing with each other during lwp exit. 1089 */ 1090 mutex_enter(&t->t_ctx_lock); 1091 prev_ctx = NULL; 1092 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) { 1093 if (ctx->save_op == save && ctx->restore_op == restore && 1094 ctx->fork_op == fork && ctx->lwp_create_op == lwp_create && 1095 ctx->exit_op == exit && ctx->free_op == free && 1096 ctx->arg == arg) { 1097 if (prev_ctx) 1098 prev_ctx->next = ctx->next; 1099 else 1100 t->t_ctx = ctx->next; 1101 mutex_exit(&t->t_ctx_lock); 1102 if (ctx->free_op != NULL) 1103 (ctx->free_op)(ctx->arg, 0); 1104 kmem_free(ctx, sizeof (struct ctxop)); 1105 return (1); 1106 } 1107 prev_ctx = ctx; 1108 } 1109 mutex_exit(&t->t_ctx_lock); 1110 1111 return (0); 1112 } 1113 1114 void 1115 savectx(kthread_t *t) 1116 { 1117 struct ctxop *ctx; 1118 1119 ASSERT(t == curthread); 1120 for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next) 1121 if (ctx->save_op != NULL) 1122 (ctx->save_op)(ctx->arg); 1123 } 1124 1125 void 1126 restorectx(kthread_t *t) 1127 { 1128 struct ctxop *ctx; 1129 1130 ASSERT(t == curthread); 1131 for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next) 1132 if (ctx->restore_op != NULL) 1133 (ctx->restore_op)(ctx->arg); 1134 } 1135 1136 void 1137 forkctx(kthread_t *t, kthread_t *ct) 1138 { 1139 struct ctxop *ctx; 1140 1141 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1142 if (ctx->fork_op != NULL) 1143 (ctx->fork_op)(t, ct); 1144 } 1145 1146 /* 1147 * Note that this operator is only invoked via the _lwp_create 1148 * system call. The system may have other reasons to create lwps 1149 * e.g. the agent lwp or the doors unreferenced lwp. 1150 */ 1151 void 1152 lwp_createctx(kthread_t *t, kthread_t *ct) 1153 { 1154 struct ctxop *ctx; 1155 1156 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1157 if (ctx->lwp_create_op != NULL) 1158 (ctx->lwp_create_op)(t, ct); 1159 } 1160 1161 /* 1162 * exitctx is called from thread_exit() and lwp_exit() to perform any actions 1163 * needed when the thread/LWP leaves the processor for the last time. This 1164 * routine is not intended to deal with freeing memory; freectx() is used for 1165 * that purpose during thread_free(). This routine is provided to allow for 1166 * clean-up that can't wait until thread_free(). 1167 */ 1168 void 1169 exitctx(kthread_t *t) 1170 { 1171 struct ctxop *ctx; 1172 1173 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1174 if (ctx->exit_op != NULL) 1175 (ctx->exit_op)(t); 1176 } 1177 1178 /* 1179 * freectx is called from thread_free() and exec() to get 1180 * rid of old thread context ops. 1181 */ 1182 void 1183 freectx(kthread_t *t, int isexec) 1184 { 1185 struct ctxop *ctx; 1186 1187 while ((ctx = t->t_ctx) != NULL) { 1188 t->t_ctx = ctx->next; 1189 if (ctx->free_op != NULL) 1190 (ctx->free_op)(ctx->arg, isexec); 1191 kmem_free(ctx, sizeof (struct ctxop)); 1192 } 1193 } 1194 1195 /* 1196 * freectx_ctx is called from lwp_create() when lwp is reused from 1197 * lwp_deathrow and its thread structure is added to thread_deathrow. 1198 * The thread structure to which this ctx was attached may be already 1199 * freed by the thread reaper so free_op implementations shouldn't rely 1200 * on thread structure to which this ctx was attached still being around. 1201 */ 1202 void 1203 freectx_ctx(struct ctxop *ctx) 1204 { 1205 struct ctxop *nctx; 1206 1207 ASSERT(ctx != NULL); 1208 1209 do { 1210 nctx = ctx->next; 1211 if (ctx->free_op != NULL) 1212 (ctx->free_op)(ctx->arg, 0); 1213 kmem_free(ctx, sizeof (struct ctxop)); 1214 } while ((ctx = nctx) != NULL); 1215 } 1216 1217 /* 1218 * Set the thread running; arrange for it to be swapped in if necessary. 1219 */ 1220 void 1221 setrun_locked(kthread_t *t) 1222 { 1223 ASSERT(THREAD_LOCK_HELD(t)); 1224 if (t->t_state == TS_SLEEP) { 1225 /* 1226 * Take off sleep queue. 1227 */ 1228 SOBJ_UNSLEEP(t->t_sobj_ops, t); 1229 } else if (t->t_state & (TS_RUN | TS_ONPROC)) { 1230 /* 1231 * Already on dispatcher queue. 1232 */ 1233 return; 1234 } else if (t->t_state == TS_WAIT) { 1235 waitq_setrun(t); 1236 } else if (t->t_state == TS_STOPPED) { 1237 /* 1238 * All of the sending of SIGCONT (TC_XSTART) and /proc 1239 * (TC_PSTART) and lwp_continue() (TC_CSTART) must have 1240 * requested that the thread be run. 1241 * Just calling setrun() is not sufficient to set a stopped 1242 * thread running. TP_TXSTART is always set if the thread 1243 * is not stopped by a jobcontrol stop signal. 1244 * TP_TPSTART is always set if /proc is not controlling it. 1245 * TP_TCSTART is always set if lwp_suspend() didn't stop it. 1246 * The thread won't be stopped unless one of these 1247 * three mechanisms did it. 1248 * 1249 * These flags must be set before calling setrun_locked(t). 1250 * They can't be passed as arguments because the streams 1251 * code calls setrun() indirectly and the mechanism for 1252 * doing so admits only one argument. Note that the 1253 * thread must be locked in order to change t_schedflags. 1254 */ 1255 if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART) 1256 return; 1257 /* 1258 * Process is no longer stopped (a thread is running). 1259 */ 1260 t->t_whystop = 0; 1261 t->t_whatstop = 0; 1262 /* 1263 * Strictly speaking, we do not have to clear these 1264 * flags here; they are cleared on entry to stop(). 1265 * However, they are confusing when doing kernel 1266 * debugging or when they are revealed by ps(1). 1267 */ 1268 t->t_schedflag &= ~TS_ALLSTART; 1269 THREAD_TRANSITION(t); /* drop stopped-thread lock */ 1270 ASSERT(t->t_lockp == &transition_lock); 1271 ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); 1272 /* 1273 * Let the class put the process on the dispatcher queue. 1274 */ 1275 CL_SETRUN(t); 1276 } 1277 } 1278 1279 void 1280 setrun(kthread_t *t) 1281 { 1282 thread_lock(t); 1283 setrun_locked(t); 1284 thread_unlock(t); 1285 } 1286 1287 /* 1288 * Unpin an interrupted thread. 1289 * When an interrupt occurs, the interrupt is handled on the stack 1290 * of an interrupt thread, taken from a pool linked to the CPU structure. 1291 * 1292 * When swtch() is switching away from an interrupt thread because it 1293 * blocked or was preempted, this routine is called to complete the 1294 * saving of the interrupted thread state, and returns the interrupted 1295 * thread pointer so it may be resumed. 1296 * 1297 * Called by swtch() only at high spl. 1298 */ 1299 kthread_t * 1300 thread_unpin() 1301 { 1302 kthread_t *t = curthread; /* current thread */ 1303 kthread_t *itp; /* interrupted thread */ 1304 int i; /* interrupt level */ 1305 extern int intr_passivate(); 1306 1307 ASSERT(t->t_intr != NULL); 1308 1309 itp = t->t_intr; /* interrupted thread */ 1310 t->t_intr = NULL; /* clear interrupt ptr */ 1311 1312 /* 1313 * Get state from interrupt thread for the one 1314 * it interrupted. 1315 */ 1316 1317 i = intr_passivate(t, itp); 1318 1319 TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE, 1320 "intr_passivate:level %d curthread %p (%T) ithread %p (%T)", 1321 i, t, t, itp, itp); 1322 1323 /* 1324 * Dissociate the current thread from the interrupted thread's LWP. 1325 */ 1326 t->t_lwp = NULL; 1327 1328 /* 1329 * Interrupt handlers above the level that spinlocks block must 1330 * not block. 1331 */ 1332 #if DEBUG 1333 if (i < 0 || i > LOCK_LEVEL) 1334 cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i); 1335 #endif 1336 1337 /* 1338 * Compute the CPU's base interrupt level based on the active 1339 * interrupts. 1340 */ 1341 ASSERT(CPU->cpu_intr_actv & (1 << i)); 1342 set_base_spl(); 1343 1344 return (itp); 1345 } 1346 1347 /* 1348 * Create and initialize an interrupt thread. 1349 * Returns non-zero on error. 1350 * Called at spl7() or better. 1351 */ 1352 void 1353 thread_create_intr(struct cpu *cp) 1354 { 1355 kthread_t *tp; 1356 1357 tp = thread_create(NULL, 0, 1358 (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0); 1359 1360 /* 1361 * Set the thread in the TS_FREE state. The state will change 1362 * to TS_ONPROC only while the interrupt is active. Think of these 1363 * as being on a private free list for the CPU. Being TS_FREE keeps 1364 * inactive interrupt threads out of debugger thread lists. 1365 * 1366 * We cannot call thread_create with TS_FREE because of the current 1367 * checks there for ONPROC. Fix this when thread_create takes flags. 1368 */ 1369 THREAD_FREEINTR(tp, cp); 1370 1371 /* 1372 * Nobody should ever reference the credentials of an interrupt 1373 * thread so make it NULL to catch any such references. 1374 */ 1375 tp->t_cred = NULL; 1376 tp->t_flag |= T_INTR_THREAD; 1377 tp->t_cpu = cp; 1378 tp->t_bound_cpu = cp; 1379 tp->t_disp_queue = cp->cpu_disp; 1380 tp->t_affinitycnt = 1; 1381 tp->t_preempt = 1; 1382 1383 /* 1384 * Don't make a user-requested binding on this thread so that 1385 * the processor can be offlined. 1386 */ 1387 tp->t_bind_cpu = PBIND_NONE; /* no USER-requested binding */ 1388 tp->t_bind_pset = PS_NONE; 1389 1390 #if defined(__i386) || defined(__amd64) 1391 tp->t_stk -= STACK_ALIGN; 1392 *(tp->t_stk) = 0; /* terminate intr thread stack */ 1393 #endif 1394 1395 /* 1396 * Link onto CPU's interrupt pool. 1397 */ 1398 tp->t_link = cp->cpu_intr_thread; 1399 cp->cpu_intr_thread = tp; 1400 } 1401 1402 /* 1403 * TSD -- THREAD SPECIFIC DATA 1404 */ 1405 static kmutex_t tsd_mutex; /* linked list spin lock */ 1406 static uint_t tsd_nkeys; /* size of destructor array */ 1407 /* per-key destructor funcs */ 1408 static void (**tsd_destructor)(void *); 1409 /* list of tsd_thread's */ 1410 static struct tsd_thread *tsd_list; 1411 1412 /* 1413 * Default destructor 1414 * Needed because NULL destructor means that the key is unused 1415 */ 1416 /* ARGSUSED */ 1417 void 1418 tsd_defaultdestructor(void *value) 1419 {} 1420 1421 /* 1422 * Create a key (index into per thread array) 1423 * Locks out tsd_create, tsd_destroy, and tsd_exit 1424 * May allocate memory with lock held 1425 */ 1426 void 1427 tsd_create(uint_t *keyp, void (*destructor)(void *)) 1428 { 1429 int i; 1430 uint_t nkeys; 1431 1432 /* 1433 * if key is allocated, do nothing 1434 */ 1435 mutex_enter(&tsd_mutex); 1436 if (*keyp) { 1437 mutex_exit(&tsd_mutex); 1438 return; 1439 } 1440 /* 1441 * find an unused key 1442 */ 1443 if (destructor == NULL) 1444 destructor = tsd_defaultdestructor; 1445 1446 for (i = 0; i < tsd_nkeys; ++i) 1447 if (tsd_destructor[i] == NULL) 1448 break; 1449 1450 /* 1451 * if no unused keys, increase the size of the destructor array 1452 */ 1453 if (i == tsd_nkeys) { 1454 if ((nkeys = (tsd_nkeys << 1)) == 0) 1455 nkeys = 1; 1456 tsd_destructor = 1457 (void (**)(void *))tsd_realloc((void *)tsd_destructor, 1458 (size_t)(tsd_nkeys * sizeof (void (*)(void *))), 1459 (size_t)(nkeys * sizeof (void (*)(void *)))); 1460 tsd_nkeys = nkeys; 1461 } 1462 1463 /* 1464 * allocate the next available unused key 1465 */ 1466 tsd_destructor[i] = destructor; 1467 *keyp = i + 1; 1468 mutex_exit(&tsd_mutex); 1469 } 1470 1471 /* 1472 * Destroy a key -- this is for unloadable modules 1473 * 1474 * Assumes that the caller is preventing tsd_set and tsd_get 1475 * Locks out tsd_create, tsd_destroy, and tsd_exit 1476 * May free memory with lock held 1477 */ 1478 void 1479 tsd_destroy(uint_t *keyp) 1480 { 1481 uint_t key; 1482 struct tsd_thread *tsd; 1483 1484 /* 1485 * protect the key namespace and our destructor lists 1486 */ 1487 mutex_enter(&tsd_mutex); 1488 key = *keyp; 1489 *keyp = 0; 1490 1491 ASSERT(key <= tsd_nkeys); 1492 1493 /* 1494 * if the key is valid 1495 */ 1496 if (key != 0) { 1497 uint_t k = key - 1; 1498 /* 1499 * for every thread with TSD, call key's destructor 1500 */ 1501 for (tsd = tsd_list; tsd; tsd = tsd->ts_next) { 1502 /* 1503 * no TSD for key in this thread 1504 */ 1505 if (key > tsd->ts_nkeys) 1506 continue; 1507 /* 1508 * call destructor for key 1509 */ 1510 if (tsd->ts_value[k] && tsd_destructor[k]) 1511 (*tsd_destructor[k])(tsd->ts_value[k]); 1512 /* 1513 * reset value for key 1514 */ 1515 tsd->ts_value[k] = NULL; 1516 } 1517 /* 1518 * actually free the key (NULL destructor == unused) 1519 */ 1520 tsd_destructor[k] = NULL; 1521 } 1522 1523 mutex_exit(&tsd_mutex); 1524 } 1525 1526 /* 1527 * Quickly return the per thread value that was stored with the specified key 1528 * Assumes the caller is protecting key from tsd_create and tsd_destroy 1529 */ 1530 void * 1531 tsd_get(uint_t key) 1532 { 1533 return (tsd_agent_get(curthread, key)); 1534 } 1535 1536 /* 1537 * Set a per thread value indexed with the specified key 1538 */ 1539 int 1540 tsd_set(uint_t key, void *value) 1541 { 1542 return (tsd_agent_set(curthread, key, value)); 1543 } 1544 1545 /* 1546 * Like tsd_get(), except that the agent lwp can get the tsd of 1547 * another thread in the same process (the agent thread only runs when the 1548 * process is completely stopped by /proc), or syslwp is creating a new lwp. 1549 */ 1550 void * 1551 tsd_agent_get(kthread_t *t, uint_t key) 1552 { 1553 struct tsd_thread *tsd = t->t_tsd; 1554 1555 ASSERT(t == curthread || 1556 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1557 1558 if (key && tsd != NULL && key <= tsd->ts_nkeys) 1559 return (tsd->ts_value[key - 1]); 1560 return (NULL); 1561 } 1562 1563 /* 1564 * Like tsd_set(), except that the agent lwp can set the tsd of 1565 * another thread in the same process, or syslwp can set the tsd 1566 * of a thread it's in the middle of creating. 1567 * 1568 * Assumes the caller is protecting key from tsd_create and tsd_destroy 1569 * May lock out tsd_destroy (and tsd_create), may allocate memory with 1570 * lock held 1571 */ 1572 int 1573 tsd_agent_set(kthread_t *t, uint_t key, void *value) 1574 { 1575 struct tsd_thread *tsd = t->t_tsd; 1576 1577 ASSERT(t == curthread || 1578 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1579 1580 if (key == 0) 1581 return (EINVAL); 1582 if (tsd == NULL) 1583 tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP); 1584 if (key <= tsd->ts_nkeys) { 1585 tsd->ts_value[key - 1] = value; 1586 return (0); 1587 } 1588 1589 ASSERT(key <= tsd_nkeys); 1590 1591 /* 1592 * lock out tsd_destroy() 1593 */ 1594 mutex_enter(&tsd_mutex); 1595 if (tsd->ts_nkeys == 0) { 1596 /* 1597 * Link onto list of threads with TSD 1598 */ 1599 if ((tsd->ts_next = tsd_list) != NULL) 1600 tsd_list->ts_prev = tsd; 1601 tsd_list = tsd; 1602 } 1603 1604 /* 1605 * Allocate thread local storage and set the value for key 1606 */ 1607 tsd->ts_value = tsd_realloc(tsd->ts_value, 1608 tsd->ts_nkeys * sizeof (void *), 1609 key * sizeof (void *)); 1610 tsd->ts_nkeys = key; 1611 tsd->ts_value[key - 1] = value; 1612 mutex_exit(&tsd_mutex); 1613 1614 return (0); 1615 } 1616 1617 1618 /* 1619 * Return the per thread value that was stored with the specified key 1620 * If necessary, create the key and the value 1621 * Assumes the caller is protecting *keyp from tsd_destroy 1622 */ 1623 void * 1624 tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void)) 1625 { 1626 void *value; 1627 uint_t key = *keyp; 1628 struct tsd_thread *tsd = curthread->t_tsd; 1629 1630 if (tsd == NULL) 1631 tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP); 1632 if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1])) 1633 return (value); 1634 if (key == 0) 1635 tsd_create(keyp, destroy); 1636 (void) tsd_set(*keyp, value = (*allocate)()); 1637 1638 return (value); 1639 } 1640 1641 /* 1642 * Called from thread_exit() to run the destructor function for each tsd 1643 * Locks out tsd_create and tsd_destroy 1644 * Assumes that the destructor *DOES NOT* use tsd 1645 */ 1646 void 1647 tsd_exit(void) 1648 { 1649 int i; 1650 struct tsd_thread *tsd = curthread->t_tsd; 1651 1652 if (tsd == NULL) 1653 return; 1654 1655 if (tsd->ts_nkeys == 0) { 1656 kmem_free(tsd, sizeof (*tsd)); 1657 curthread->t_tsd = NULL; 1658 return; 1659 } 1660 1661 /* 1662 * lock out tsd_create and tsd_destroy, call 1663 * the destructor, and mark the value as destroyed. 1664 */ 1665 mutex_enter(&tsd_mutex); 1666 1667 for (i = 0; i < tsd->ts_nkeys; i++) { 1668 if (tsd->ts_value[i] && tsd_destructor[i]) 1669 (*tsd_destructor[i])(tsd->ts_value[i]); 1670 tsd->ts_value[i] = NULL; 1671 } 1672 1673 /* 1674 * remove from linked list of threads with TSD 1675 */ 1676 if (tsd->ts_next) 1677 tsd->ts_next->ts_prev = tsd->ts_prev; 1678 if (tsd->ts_prev) 1679 tsd->ts_prev->ts_next = tsd->ts_next; 1680 if (tsd_list == tsd) 1681 tsd_list = tsd->ts_next; 1682 1683 mutex_exit(&tsd_mutex); 1684 1685 /* 1686 * free up the TSD 1687 */ 1688 kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *)); 1689 kmem_free(tsd, sizeof (struct tsd_thread)); 1690 curthread->t_tsd = NULL; 1691 } 1692 1693 /* 1694 * realloc 1695 */ 1696 static void * 1697 tsd_realloc(void *old, size_t osize, size_t nsize) 1698 { 1699 void *new; 1700 1701 new = kmem_zalloc(nsize, KM_SLEEP); 1702 if (old) { 1703 bcopy(old, new, osize); 1704 kmem_free(old, osize); 1705 } 1706 return (new); 1707 } 1708 1709 /* 1710 * Return non-zero if an interrupt is being serviced. 1711 */ 1712 int 1713 servicing_interrupt() 1714 { 1715 int onintr = 0; 1716 1717 /* Are we an interrupt thread */ 1718 if (curthread->t_flag & T_INTR_THREAD) 1719 return (1); 1720 /* Are we servicing a high level interrupt? */ 1721 if (CPU_ON_INTR(CPU)) { 1722 kpreempt_disable(); 1723 onintr = CPU_ON_INTR(CPU); 1724 kpreempt_enable(); 1725 } 1726 return (onintr); 1727 } 1728 1729 1730 /* 1731 * Change the dispatch priority of a thread in the system. 1732 * Used when raising or lowering a thread's priority. 1733 * (E.g., priority inheritance) 1734 * 1735 * Since threads are queued according to their priority, we 1736 * we must check the thread's state to determine whether it 1737 * is on a queue somewhere. If it is, we've got to: 1738 * 1739 * o Dequeue the thread. 1740 * o Change its effective priority. 1741 * o Enqueue the thread. 1742 * 1743 * Assumptions: The thread whose priority we wish to change 1744 * must be locked before we call thread_change_(e)pri(). 1745 * The thread_change(e)pri() function doesn't drop the thread 1746 * lock--that must be done by its caller. 1747 */ 1748 void 1749 thread_change_epri(kthread_t *t, pri_t disp_pri) 1750 { 1751 uint_t state; 1752 1753 ASSERT(THREAD_LOCK_HELD(t)); 1754 1755 /* 1756 * If the inherited priority hasn't actually changed, 1757 * just return. 1758 */ 1759 if (t->t_epri == disp_pri) 1760 return; 1761 1762 state = t->t_state; 1763 1764 /* 1765 * If it's not on a queue, change the priority with impunity. 1766 */ 1767 if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) { 1768 t->t_epri = disp_pri; 1769 if (state == TS_ONPROC) { 1770 cpu_t *cp = t->t_disp_queue->disp_cpu; 1771 1772 if (t == cp->cpu_dispthread) 1773 cp->cpu_dispatch_pri = DISP_PRIO(t); 1774 } 1775 } else if (state == TS_SLEEP) { 1776 /* 1777 * Take the thread out of its sleep queue. 1778 * Change the inherited priority. 1779 * Re-enqueue the thread. 1780 * Each synchronization object exports a function 1781 * to do this in an appropriate manner. 1782 */ 1783 SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri); 1784 } else if (state == TS_WAIT) { 1785 /* 1786 * Re-enqueue a thread on the wait queue if its 1787 * effective priority needs to change. 1788 */ 1789 if (disp_pri != t->t_epri) 1790 waitq_change_pri(t, disp_pri); 1791 } else { 1792 /* 1793 * The thread is on a run queue. 1794 * Note: setbackdq() may not put the thread 1795 * back on the same run queue where it originally 1796 * resided. 1797 */ 1798 (void) dispdeq(t); 1799 t->t_epri = disp_pri; 1800 setbackdq(t); 1801 } 1802 schedctl_set_cidpri(t); 1803 } 1804 1805 /* 1806 * Function: Change the t_pri field of a thread. 1807 * Side Effects: Adjust the thread ordering on a run queue 1808 * or sleep queue, if necessary. 1809 * Returns: 1 if the thread was on a run queue, else 0. 1810 */ 1811 int 1812 thread_change_pri(kthread_t *t, pri_t disp_pri, int front) 1813 { 1814 uint_t state; 1815 int on_rq = 0; 1816 1817 ASSERT(THREAD_LOCK_HELD(t)); 1818 1819 state = t->t_state; 1820 THREAD_WILLCHANGE_PRI(t, disp_pri); 1821 1822 /* 1823 * If it's not on a queue, change the priority with impunity. 1824 */ 1825 if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) { 1826 t->t_pri = disp_pri; 1827 1828 if (state == TS_ONPROC) { 1829 cpu_t *cp = t->t_disp_queue->disp_cpu; 1830 1831 if (t == cp->cpu_dispthread) 1832 cp->cpu_dispatch_pri = DISP_PRIO(t); 1833 } 1834 } else if (state == TS_SLEEP) { 1835 /* 1836 * If the priority has changed, take the thread out of 1837 * its sleep queue and change the priority. 1838 * Re-enqueue the thread. 1839 * Each synchronization object exports a function 1840 * to do this in an appropriate manner. 1841 */ 1842 if (disp_pri != t->t_pri) 1843 SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri); 1844 } else if (state == TS_WAIT) { 1845 /* 1846 * Re-enqueue a thread on the wait queue if its 1847 * priority needs to change. 1848 */ 1849 if (disp_pri != t->t_pri) 1850 waitq_change_pri(t, disp_pri); 1851 } else { 1852 /* 1853 * The thread is on a run queue. 1854 * Note: setbackdq() may not put the thread 1855 * back on the same run queue where it originally 1856 * resided. 1857 * 1858 * We still requeue the thread even if the priority 1859 * is unchanged to preserve round-robin (and other) 1860 * effects between threads of the same priority. 1861 */ 1862 on_rq = dispdeq(t); 1863 ASSERT(on_rq); 1864 t->t_pri = disp_pri; 1865 if (front) { 1866 setfrontdq(t); 1867 } else { 1868 setbackdq(t); 1869 } 1870 } 1871 schedctl_set_cidpri(t); 1872 return (on_rq); 1873 } 1874 1875 /* 1876 * Tunable kmem_stackinfo is set, fill the kernel thread stack with a 1877 * specific pattern. 1878 */ 1879 static void 1880 stkinfo_begin(kthread_t *t) 1881 { 1882 caddr_t start; /* stack start */ 1883 caddr_t end; /* stack end */ 1884 uint64_t *ptr; /* pattern pointer */ 1885 1886 /* 1887 * Stack grows up or down, see thread_create(), 1888 * compute stack memory area start and end (start < end). 1889 */ 1890 if (t->t_stk > t->t_stkbase) { 1891 /* stack grows down */ 1892 start = t->t_stkbase; 1893 end = t->t_stk; 1894 } else { 1895 /* stack grows up */ 1896 start = t->t_stk; 1897 end = t->t_stkbase; 1898 } 1899 1900 /* 1901 * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes 1902 * alignement for start and end in stack area boundaries 1903 * (protection against corrupt t_stkbase/t_stk data). 1904 */ 1905 if ((((uintptr_t)start) & 0x7) != 0) { 1906 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8); 1907 } 1908 end = (caddr_t)(((uintptr_t)end) & (~0x7)); 1909 1910 if ((end <= start) || (end - start) > (1024 * 1024)) { 1911 /* negative or stack size > 1 meg, assume bogus */ 1912 return; 1913 } 1914 1915 /* fill stack area with a pattern (instead of zeros) */ 1916 ptr = (uint64_t *)((void *)start); 1917 while (ptr < (uint64_t *)((void *)end)) { 1918 *ptr++ = KMEM_STKINFO_PATTERN; 1919 } 1920 } 1921 1922 1923 /* 1924 * Tunable kmem_stackinfo is set, create stackinfo log if doesn't already exist, 1925 * compute the percentage of kernel stack really used, and set in the log 1926 * if it's the latest highest percentage. 1927 */ 1928 static void 1929 stkinfo_end(kthread_t *t) 1930 { 1931 caddr_t start; /* stack start */ 1932 caddr_t end; /* stack end */ 1933 uint64_t *ptr; /* pattern pointer */ 1934 size_t stksz; /* stack size */ 1935 size_t smallest = 0; 1936 size_t percent = 0; 1937 uint_t index = 0; 1938 uint_t i; 1939 static size_t smallest_percent = (size_t)-1; 1940 static uint_t full = 0; 1941 1942 /* create the stackinfo log, if doesn't already exist */ 1943 mutex_enter(&kmem_stkinfo_lock); 1944 if (kmem_stkinfo_log == NULL) { 1945 kmem_stkinfo_log = (kmem_stkinfo_t *) 1946 kmem_zalloc(KMEM_STKINFO_LOG_SIZE * 1947 (sizeof (kmem_stkinfo_t)), KM_NOSLEEP); 1948 if (kmem_stkinfo_log == NULL) { 1949 mutex_exit(&kmem_stkinfo_lock); 1950 return; 1951 } 1952 } 1953 mutex_exit(&kmem_stkinfo_lock); 1954 1955 /* 1956 * Stack grows up or down, see thread_create(), 1957 * compute stack memory area start and end (start < end). 1958 */ 1959 if (t->t_stk > t->t_stkbase) { 1960 /* stack grows down */ 1961 start = t->t_stkbase; 1962 end = t->t_stk; 1963 } else { 1964 /* stack grows up */ 1965 start = t->t_stk; 1966 end = t->t_stkbase; 1967 } 1968 1969 /* stack size as found in kthread_t */ 1970 stksz = end - start; 1971 1972 /* 1973 * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes 1974 * alignement for start and end in stack area boundaries 1975 * (protection against corrupt t_stkbase/t_stk data). 1976 */ 1977 if ((((uintptr_t)start) & 0x7) != 0) { 1978 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8); 1979 } 1980 end = (caddr_t)(((uintptr_t)end) & (~0x7)); 1981 1982 if ((end <= start) || (end - start) > (1024 * 1024)) { 1983 /* negative or stack size > 1 meg, assume bogus */ 1984 return; 1985 } 1986 1987 /* search until no pattern in the stack */ 1988 if (t->t_stk > t->t_stkbase) { 1989 /* stack grows down */ 1990 #if defined(__i386) || defined(__amd64) 1991 /* 1992 * 6 longs are pushed on stack, see thread_load(). Skip 1993 * them, so if kthread has never run, percent is zero. 1994 * 8 bytes alignement is preserved for a 32 bit kernel, 1995 * 6 x 4 = 24, 24 is a multiple of 8. 1996 * 1997 */ 1998 end -= (6 * sizeof (long)); 1999 #endif 2000 ptr = (uint64_t *)((void *)start); 2001 while (ptr < (uint64_t *)((void *)end)) { 2002 if (*ptr != KMEM_STKINFO_PATTERN) { 2003 percent = stkinfo_percent(end, 2004 start, (caddr_t)ptr); 2005 break; 2006 } 2007 ptr++; 2008 } 2009 } else { 2010 /* stack grows up */ 2011 ptr = (uint64_t *)((void *)end); 2012 ptr--; 2013 while (ptr >= (uint64_t *)((void *)start)) { 2014 if (*ptr != KMEM_STKINFO_PATTERN) { 2015 percent = stkinfo_percent(start, 2016 end, (caddr_t)ptr); 2017 break; 2018 } 2019 ptr--; 2020 } 2021 } 2022 2023 DTRACE_PROBE3(stack__usage, kthread_t *, t, 2024 size_t, stksz, size_t, percent); 2025 2026 if (percent == 0) { 2027 return; 2028 } 2029 2030 mutex_enter(&kmem_stkinfo_lock); 2031 if (full == KMEM_STKINFO_LOG_SIZE && percent < smallest_percent) { 2032 /* 2033 * The log is full and already contains the highest values 2034 */ 2035 mutex_exit(&kmem_stkinfo_lock); 2036 return; 2037 } 2038 2039 /* keep a log of the highest used stack */ 2040 for (i = 0; i < KMEM_STKINFO_LOG_SIZE; i++) { 2041 if (kmem_stkinfo_log[i].percent == 0) { 2042 index = i; 2043 full++; 2044 break; 2045 } 2046 if (smallest == 0) { 2047 smallest = kmem_stkinfo_log[i].percent; 2048 index = i; 2049 continue; 2050 } 2051 if (kmem_stkinfo_log[i].percent < smallest) { 2052 smallest = kmem_stkinfo_log[i].percent; 2053 index = i; 2054 } 2055 } 2056 2057 if (percent >= kmem_stkinfo_log[index].percent) { 2058 kmem_stkinfo_log[index].kthread = (caddr_t)t; 2059 kmem_stkinfo_log[index].t_startpc = (caddr_t)t->t_startpc; 2060 kmem_stkinfo_log[index].start = start; 2061 kmem_stkinfo_log[index].stksz = stksz; 2062 kmem_stkinfo_log[index].percent = percent; 2063 kmem_stkinfo_log[index].t_tid = t->t_tid; 2064 kmem_stkinfo_log[index].cmd[0] = '\0'; 2065 if (t->t_tid != 0) { 2066 stksz = strlen((t->t_procp)->p_user.u_comm); 2067 if (stksz >= KMEM_STKINFO_STR_SIZE) { 2068 stksz = KMEM_STKINFO_STR_SIZE - 1; 2069 kmem_stkinfo_log[index].cmd[stksz] = '\0'; 2070 } else { 2071 stksz += 1; 2072 } 2073 (void) memcpy(kmem_stkinfo_log[index].cmd, 2074 (t->t_procp)->p_user.u_comm, stksz); 2075 } 2076 if (percent < smallest_percent) { 2077 smallest_percent = percent; 2078 } 2079 } 2080 mutex_exit(&kmem_stkinfo_lock); 2081 } 2082 2083 /* 2084 * Tunable kmem_stackinfo is set, compute stack utilization percentage. 2085 */ 2086 static size_t 2087 stkinfo_percent(caddr_t t_stk, caddr_t t_stkbase, caddr_t sp) 2088 { 2089 size_t percent; 2090 size_t s; 2091 2092 if (t_stk > t_stkbase) { 2093 /* stack grows down */ 2094 if (sp > t_stk) { 2095 return (0); 2096 } 2097 if (sp < t_stkbase) { 2098 return (100); 2099 } 2100 percent = t_stk - sp + 1; 2101 s = t_stk - t_stkbase + 1; 2102 } else { 2103 /* stack grows up */ 2104 if (sp < t_stk) { 2105 return (0); 2106 } 2107 if (sp > t_stkbase) { 2108 return (100); 2109 } 2110 percent = sp - t_stk + 1; 2111 s = t_stkbase - t_stk + 1; 2112 } 2113 percent = ((100 * percent) / s) + 1; 2114 if (percent > 100) { 2115 percent = 100; 2116 } 2117 return (percent); 2118 } 2119