1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2021 Joyent, Inc. 25 * Copyright 2021 Oxide Computer Company 26 */ 27 28 #include <sys/types.h> 29 #include <sys/param.h> 30 #include <sys/sysmacros.h> 31 #include <sys/signal.h> 32 #include <sys/stack.h> 33 #include <sys/pcb.h> 34 #include <sys/user.h> 35 #include <sys/systm.h> 36 #include <sys/sysinfo.h> 37 #include <sys/errno.h> 38 #include <sys/cmn_err.h> 39 #include <sys/cred.h> 40 #include <sys/resource.h> 41 #include <sys/task.h> 42 #include <sys/project.h> 43 #include <sys/proc.h> 44 #include <sys/debug.h> 45 #include <sys/disp.h> 46 #include <sys/class.h> 47 #include <vm/seg_kmem.h> 48 #include <vm/seg_kp.h> 49 #include <sys/machlock.h> 50 #include <sys/kmem.h> 51 #include <sys/varargs.h> 52 #include <sys/turnstile.h> 53 #include <sys/poll.h> 54 #include <sys/vtrace.h> 55 #include <sys/callb.h> 56 #include <c2/audit.h> 57 #include <sys/tnf.h> 58 #include <sys/sobject.h> 59 #include <sys/cpupart.h> 60 #include <sys/pset.h> 61 #include <sys/door.h> 62 #include <sys/spl.h> 63 #include <sys/copyops.h> 64 #include <sys/rctl.h> 65 #include <sys/brand.h> 66 #include <sys/pool.h> 67 #include <sys/zone.h> 68 #include <sys/tsol/label.h> 69 #include <sys/tsol/tndb.h> 70 #include <sys/cpc_impl.h> 71 #include <sys/sdt.h> 72 #include <sys/reboot.h> 73 #include <sys/kdi.h> 74 #include <sys/schedctl.h> 75 #include <sys/waitq.h> 76 #include <sys/cpucaps.h> 77 #include <sys/kiconv.h> 78 #include <sys/ctype.h> 79 #include <sys/smt.h> 80 81 struct kmem_cache *thread_cache; /* cache of free threads */ 82 struct kmem_cache *lwp_cache; /* cache of free lwps */ 83 struct kmem_cache *turnstile_cache; /* cache of free turnstiles */ 84 85 /* 86 * allthreads is only for use by kmem_readers. All kernel loops can use 87 * the current thread as a start/end point. 88 */ 89 kthread_t *allthreads = &t0; /* circular list of all threads */ 90 91 static kcondvar_t reaper_cv; /* synchronization var */ 92 kthread_t *thread_deathrow; /* circular list of reapable threads */ 93 kthread_t *lwp_deathrow; /* circular list of reapable threads */ 94 kmutex_t reaplock; /* protects lwp and thread deathrows */ 95 int thread_reapcnt = 0; /* number of threads on deathrow */ 96 int lwp_reapcnt = 0; /* number of lwps on deathrow */ 97 int reaplimit = 16; /* delay reaping until reaplimit */ 98 99 thread_free_lock_t *thread_free_lock; 100 /* protects tick thread from reaper */ 101 102 extern int nthread; 103 104 /* System Scheduling classes. */ 105 id_t syscid; /* system scheduling class ID */ 106 id_t sysdccid = CLASS_UNUSED; /* reset when SDC loads */ 107 108 void *segkp_thread; /* cookie for segkp pool */ 109 110 int lwp_cache_sz = 32; 111 int t_cache_sz = 8; 112 static kt_did_t next_t_id = 1; 113 114 /* Default mode for thread binding to CPUs and processor sets */ 115 int default_binding_mode = TB_ALLHARD; 116 117 /* 118 * Min/Max stack sizes for stack size parameters 119 */ 120 #define MAX_STKSIZE (32 * DEFAULTSTKSZ) 121 #define MIN_STKSIZE DEFAULTSTKSZ 122 123 /* 124 * default_stksize overrides lwp_default_stksize if it is set. 125 */ 126 int default_stksize; 127 int lwp_default_stksize; 128 129 static zone_key_t zone_thread_key; 130 131 unsigned int kmem_stackinfo; /* stackinfo feature on-off */ 132 kmem_stkinfo_t *kmem_stkinfo_log; /* stackinfo circular log */ 133 static kmutex_t kmem_stkinfo_lock; /* protects kmem_stkinfo_log */ 134 135 /* 136 * forward declarations for internal thread specific data (tsd) 137 */ 138 static void *tsd_realloc(void *, size_t, size_t); 139 140 void thread_reaper(void); 141 142 /* forward declarations for stackinfo feature */ 143 static void stkinfo_begin(kthread_t *); 144 static void stkinfo_end(kthread_t *); 145 static size_t stkinfo_percent(caddr_t, caddr_t, caddr_t); 146 147 /*ARGSUSED*/ 148 static int 149 turnstile_constructor(void *buf, void *cdrarg, int kmflags) 150 { 151 bzero(buf, sizeof (turnstile_t)); 152 return (0); 153 } 154 155 /*ARGSUSED*/ 156 static void 157 turnstile_destructor(void *buf, void *cdrarg) 158 { 159 turnstile_t *ts = buf; 160 161 ASSERT(ts->ts_free == NULL); 162 ASSERT(ts->ts_waiters == 0); 163 ASSERT(ts->ts_inheritor == NULL); 164 ASSERT(ts->ts_sleepq[0].sq_first == NULL); 165 ASSERT(ts->ts_sleepq[1].sq_first == NULL); 166 } 167 168 void 169 thread_init(void) 170 { 171 kthread_t *tp; 172 extern char sys_name[]; 173 extern void idle(); 174 struct cpu *cpu = CPU; 175 int i; 176 kmutex_t *lp; 177 178 mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL)); 179 thread_free_lock = 180 kmem_alloc(sizeof (thread_free_lock_t) * THREAD_FREE_NUM, KM_SLEEP); 181 for (i = 0; i < THREAD_FREE_NUM; i++) { 182 lp = &thread_free_lock[i].tf_lock; 183 mutex_init(lp, NULL, MUTEX_DEFAULT, NULL); 184 } 185 186 #if defined(__x86) 187 thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t), 188 PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0); 189 190 /* 191 * "struct _klwp" includes a "struct pcb", which includes a 192 * "struct fpu", which needs to be 64-byte aligned on amd64 193 * (and even on i386) for xsave/xrstor. 194 */ 195 lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t), 196 64, NULL, NULL, NULL, NULL, NULL, 0); 197 #else 198 /* 199 * Allocate thread structures from static_arena. This prevents 200 * issues where a thread tries to relocate its own thread 201 * structure and touches it after the mapping has been suspended. 202 */ 203 thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t), 204 PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0); 205 206 lwp_stk_cache_init(); 207 208 lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t), 209 0, NULL, NULL, NULL, NULL, NULL, 0); 210 #endif 211 212 turnstile_cache = kmem_cache_create("turnstile_cache", 213 sizeof (turnstile_t), 0, 214 turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0); 215 216 label_init(); 217 cred_init(); 218 219 /* 220 * Initialize various resource management facilities. 221 */ 222 rctl_init(); 223 cpucaps_init(); 224 /* 225 * Zone_init() should be called before project_init() so that project ID 226 * for the first project is initialized correctly. 227 */ 228 zone_init(); 229 project_init(); 230 brand_init(); 231 kiconv_init(); 232 task_init(); 233 tcache_init(); 234 pool_init(); 235 236 curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP); 237 238 /* 239 * Originally, we had two parameters to set default stack 240 * size: one for lwp's (lwp_default_stksize), and one for 241 * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz). 242 * Now we have a third parameter that overrides both if it is 243 * set to a legal stack size, called default_stksize. 244 */ 245 246 if (default_stksize == 0) { 247 default_stksize = DEFAULTSTKSZ; 248 } else if (default_stksize % PAGESIZE != 0 || 249 default_stksize > MAX_STKSIZE || 250 default_stksize < MIN_STKSIZE) { 251 cmn_err(CE_WARN, "Illegal stack size. Using %d", 252 (int)DEFAULTSTKSZ); 253 default_stksize = DEFAULTSTKSZ; 254 } else { 255 lwp_default_stksize = default_stksize; 256 } 257 258 if (lwp_default_stksize == 0) { 259 lwp_default_stksize = default_stksize; 260 } else if (lwp_default_stksize % PAGESIZE != 0 || 261 lwp_default_stksize > MAX_STKSIZE || 262 lwp_default_stksize < MIN_STKSIZE) { 263 cmn_err(CE_WARN, "Illegal stack size. Using %d", 264 default_stksize); 265 lwp_default_stksize = default_stksize; 266 } 267 268 segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz, 269 lwp_default_stksize, 270 (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED)); 271 272 segkp_thread = segkp_cache_init(segkp, t_cache_sz, 273 default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON); 274 275 (void) getcid(sys_name, &syscid); 276 curthread->t_cid = syscid; /* current thread is t0 */ 277 278 /* 279 * Set up the first CPU's idle thread. 280 * It runs whenever the CPU has nothing worthwhile to do. 281 */ 282 tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1); 283 cpu->cpu_idle_thread = tp; 284 tp->t_preempt = 1; 285 tp->t_disp_queue = cpu->cpu_disp; 286 ASSERT(tp->t_disp_queue != NULL); 287 tp->t_bound_cpu = cpu; 288 tp->t_affinitycnt = 1; 289 290 /* 291 * Registering a thread in the callback table is usually 292 * done in the initialization code of the thread. In this 293 * case, we do it right after thread creation to avoid 294 * blocking idle thread while registering itself. It also 295 * avoids the possibility of reregistration in case a CPU 296 * restarts its idle thread. 297 */ 298 CALLB_CPR_INIT_SAFE(tp, "idle"); 299 300 /* 301 * Create the thread_reaper daemon. From this point on, exited 302 * threads will get reaped. 303 */ 304 (void) thread_create(NULL, 0, (void (*)())thread_reaper, 305 NULL, 0, &p0, TS_RUN, minclsyspri); 306 307 /* 308 * Finish initializing the kernel memory allocator now that 309 * thread_create() is available. 310 */ 311 kmem_thread_init(); 312 313 if (boothowto & RB_DEBUG) 314 kdi_dvec_thravail(); 315 } 316 317 /* 318 * Create a thread. 319 * 320 * thread_create() blocks for memory if necessary. It never fails. 321 * 322 * If stk is NULL, the thread is created at the base of the stack 323 * and cannot be swapped. 324 */ 325 kthread_t * 326 thread_create( 327 caddr_t stk, 328 size_t stksize, 329 void (*proc)(), 330 void *arg, 331 size_t len, 332 proc_t *pp, 333 int state, 334 pri_t pri) 335 { 336 kthread_t *t; 337 extern struct classfuncs sys_classfuncs; 338 turnstile_t *ts; 339 340 /* 341 * Every thread keeps a turnstile around in case it needs to block. 342 * The only reason the turnstile is not simply part of the thread 343 * structure is that we may have to break the association whenever 344 * more than one thread blocks on a given synchronization object. 345 * From a memory-management standpoint, turnstiles are like the 346 * "attached mblks" that hang off dblks in the streams allocator. 347 */ 348 ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP); 349 350 if (stk == NULL) { 351 /* 352 * alloc both thread and stack in segkp chunk 353 */ 354 355 if (stksize < default_stksize) 356 stksize = default_stksize; 357 358 if (stksize == default_stksize) { 359 stk = (caddr_t)segkp_cache_get(segkp_thread); 360 } else { 361 stksize = roundup(stksize, PAGESIZE); 362 stk = (caddr_t)segkp_get(segkp, stksize, 363 (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED)); 364 } 365 366 ASSERT(stk != NULL); 367 368 /* 369 * The machine-dependent mutex code may require that 370 * thread pointers (since they may be used for mutex owner 371 * fields) have certain alignment requirements. 372 * PTR24_ALIGN is the size of the alignment quanta. 373 * XXX - assumes stack grows toward low addresses. 374 */ 375 if (stksize <= sizeof (kthread_t) + PTR24_ALIGN) 376 cmn_err(CE_PANIC, "thread_create: proposed stack size" 377 " too small to hold thread."); 378 #ifdef STACK_GROWTH_DOWN 379 stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1); 380 stksize &= -PTR24_ALIGN; /* make thread aligned */ 381 t = (kthread_t *)(stk + stksize); 382 bzero(t, sizeof (kthread_t)); 383 if (audit_active) 384 audit_thread_create(t); 385 t->t_stk = stk + stksize; 386 t->t_stkbase = stk; 387 #else /* stack grows to larger addresses */ 388 stksize -= SA(sizeof (kthread_t)); 389 t = (kthread_t *)(stk); 390 bzero(t, sizeof (kthread_t)); 391 t->t_stk = stk + sizeof (kthread_t); 392 t->t_stkbase = stk + stksize + sizeof (kthread_t); 393 #endif /* STACK_GROWTH_DOWN */ 394 t->t_flag |= T_TALLOCSTK; 395 t->t_swap = stk; 396 } else { 397 t = kmem_cache_alloc(thread_cache, KM_SLEEP); 398 bzero(t, sizeof (kthread_t)); 399 ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0); 400 if (audit_active) 401 audit_thread_create(t); 402 /* 403 * Initialize t_stk to the kernel stack pointer to use 404 * upon entry to the kernel 405 */ 406 #ifdef STACK_GROWTH_DOWN 407 t->t_stk = stk + stksize; 408 t->t_stkbase = stk; 409 #else 410 t->t_stk = stk; /* 3b2-like */ 411 t->t_stkbase = stk + stksize; 412 #endif /* STACK_GROWTH_DOWN */ 413 } 414 415 if (kmem_stackinfo != 0) { 416 stkinfo_begin(t); 417 } 418 419 t->t_ts = ts; 420 421 /* 422 * p_cred could be NULL if it thread_create is called before cred_init 423 * is called in main. 424 */ 425 mutex_enter(&pp->p_crlock); 426 if (pp->p_cred) 427 crhold(t->t_cred = pp->p_cred); 428 mutex_exit(&pp->p_crlock); 429 t->t_start = gethrestime_sec(); 430 t->t_startpc = proc; 431 t->t_procp = pp; 432 t->t_clfuncs = &sys_classfuncs.thread; 433 t->t_cid = syscid; 434 t->t_pri = pri; 435 t->t_stime = ddi_get_lbolt(); 436 t->t_schedflag = TS_LOAD | TS_DONT_SWAP; 437 t->t_bind_cpu = PBIND_NONE; 438 t->t_bindflag = (uchar_t)default_binding_mode; 439 t->t_bind_pset = PS_NONE; 440 t->t_plockp = &pp->p_lock; 441 t->t_copyops = NULL; 442 t->t_taskq = NULL; 443 t->t_anttime = 0; 444 t->t_hatdepth = 0; 445 446 t->t_dtrace_vtime = 1; /* assure vtimestamp is always non-zero */ 447 448 CPU_STATS_ADDQ(CPU, sys, nthreads, 1); 449 #ifndef NPROBE 450 /* Kernel probe */ 451 tnf_thread_create(t); 452 #endif /* NPROBE */ 453 LOCK_INIT_CLEAR(&t->t_lock); 454 455 /* 456 * Callers who give us a NULL proc must do their own 457 * stack initialization. e.g. lwp_create() 458 */ 459 if (proc != NULL) { 460 t->t_stk = thread_stk_init(t->t_stk); 461 thread_load(t, proc, arg, len); 462 } 463 464 /* 465 * Put a hold on project0. If this thread is actually in a 466 * different project, then t_proj will be changed later in 467 * lwp_create(). All kernel-only threads must be in project 0. 468 */ 469 t->t_proj = project_hold(proj0p); 470 471 lgrp_affinity_init(&t->t_lgrp_affinity); 472 473 mutex_enter(&pidlock); 474 nthread++; 475 t->t_did = next_t_id++; 476 t->t_prev = curthread->t_prev; 477 t->t_next = curthread; 478 479 /* 480 * Add the thread to the list of all threads, and initialize 481 * its t_cpu pointer. We need to block preemption since 482 * cpu_offline walks the thread list looking for threads 483 * with t_cpu pointing to the CPU being offlined. We want 484 * to make sure that the list is consistent and that if t_cpu 485 * is set, the thread is on the list. 486 */ 487 kpreempt_disable(); 488 curthread->t_prev->t_next = t; 489 curthread->t_prev = t; 490 491 /* 492 * We'll always create in the default partition since that's where 493 * kernel threads go (we'll change this later if needed, in 494 * lwp_create()). 495 */ 496 t->t_cpupart = &cp_default; 497 498 /* 499 * For now, affiliate this thread with the root lgroup. 500 * Since the kernel does not (presently) allocate its memory 501 * in a locality aware fashion, the root is an appropriate home. 502 * If this thread is later associated with an lwp, it will have 503 * its lgroup re-assigned at that time. 504 */ 505 lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1); 506 507 /* 508 * If the current CPU is in the default cpupart, use it. Otherwise, 509 * pick one that is; before entering the dispatcher code, we'll 510 * make sure to keep the invariant that ->t_cpu is set. (In fact, we 511 * rely on this, in ht_should_run(), in the call tree of 512 * disp_lowpri_cpu().) 513 */ 514 if (CPU->cpu_part == &cp_default) { 515 t->t_cpu = CPU; 516 } else { 517 t->t_cpu = cp_default.cp_cpulist; 518 t->t_cpu = disp_lowpri_cpu(t->t_cpu, t, t->t_pri); 519 } 520 521 t->t_disp_queue = t->t_cpu->cpu_disp; 522 kpreempt_enable(); 523 524 /* 525 * Initialize thread state and the dispatcher lock pointer. 526 * Need to hold onto pidlock to block allthreads walkers until 527 * the state is set. 528 */ 529 switch (state) { 530 case TS_RUN: 531 curthread->t_oldspl = splhigh(); /* get dispatcher spl */ 532 THREAD_SET_STATE(t, TS_STOPPED, &transition_lock); 533 CL_SETRUN(t); 534 thread_unlock(t); 535 break; 536 537 case TS_ONPROC: 538 THREAD_ONPROC(t, t->t_cpu); 539 break; 540 541 case TS_FREE: 542 /* 543 * Free state will be used for intr threads. 544 * The interrupt routine must set the thread dispatcher 545 * lock pointer (t_lockp) if starting on a CPU 546 * other than the current one. 547 */ 548 THREAD_FREEINTR(t, CPU); 549 break; 550 551 case TS_STOPPED: 552 THREAD_SET_STATE(t, TS_STOPPED, &stop_lock); 553 break; 554 555 default: /* TS_SLEEP, TS_ZOMB or TS_TRANS */ 556 cmn_err(CE_PANIC, "thread_create: invalid state %d", state); 557 } 558 mutex_exit(&pidlock); 559 return (t); 560 } 561 562 /* 563 * Move thread to project0 and take care of project reference counters. 564 */ 565 void 566 thread_rele(kthread_t *t) 567 { 568 kproject_t *kpj; 569 570 thread_lock(t); 571 572 ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0); 573 kpj = ttoproj(t); 574 t->t_proj = proj0p; 575 576 thread_unlock(t); 577 578 if (kpj != proj0p) { 579 project_rele(kpj); 580 (void) project_hold(proj0p); 581 } 582 } 583 584 void 585 thread_exit(void) 586 { 587 kthread_t *t = curthread; 588 589 if ((t->t_proc_flag & TP_ZTHREAD) != 0) 590 cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called"); 591 592 tsd_exit(); /* Clean up this thread's TSD */ 593 594 kcpc_passivate(); /* clean up performance counter state */ 595 596 /* 597 * No kernel thread should have called poll() without arranging 598 * calling pollcleanup() here. 599 */ 600 ASSERT(t->t_pollstate == NULL); 601 ASSERT(t->t_schedctl == NULL); 602 if (t->t_door) 603 door_slam(); /* in case thread did an upcall */ 604 605 #ifndef NPROBE 606 /* Kernel probe */ 607 if (t->t_tnf_tpdp) 608 tnf_thread_exit(); 609 #endif /* NPROBE */ 610 611 thread_rele(t); 612 t->t_preempt++; 613 614 /* 615 * remove thread from the all threads list so that 616 * death-row can use the same pointers. 617 */ 618 mutex_enter(&pidlock); 619 t->t_next->t_prev = t->t_prev; 620 t->t_prev->t_next = t->t_next; 621 ASSERT(allthreads != t); /* t0 never exits */ 622 cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */ 623 mutex_exit(&pidlock); 624 625 if (t->t_ctx != NULL) 626 exitctx(t); 627 if (t->t_procp->p_pctx != NULL) 628 exitpctx(t->t_procp); 629 630 if (kmem_stackinfo != 0) { 631 stkinfo_end(t); 632 } 633 634 t->t_state = TS_ZOMB; /* set zombie thread */ 635 636 swtch_from_zombie(); /* give up the CPU */ 637 /* NOTREACHED */ 638 } 639 640 /* 641 * Check to see if the specified thread is active (defined as being on 642 * the thread list). This is certainly a slow way to do this; if there's 643 * ever a reason to speed it up, we could maintain a hash table of active 644 * threads indexed by their t_did. 645 */ 646 static kthread_t * 647 did_to_thread(kt_did_t tid) 648 { 649 kthread_t *t; 650 651 ASSERT(MUTEX_HELD(&pidlock)); 652 for (t = curthread->t_next; t != curthread; t = t->t_next) { 653 if (t->t_did == tid) 654 break; 655 } 656 if (t->t_did == tid) 657 return (t); 658 else 659 return (NULL); 660 } 661 662 /* 663 * Wait for specified thread to exit. Returns immediately if the thread 664 * could not be found, meaning that it has either already exited or never 665 * existed. 666 */ 667 void 668 thread_join(kt_did_t tid) 669 { 670 kthread_t *t; 671 672 ASSERT(tid != curthread->t_did); 673 ASSERT(tid != t0.t_did); 674 675 mutex_enter(&pidlock); 676 /* 677 * Make sure we check that the thread is on the thread list 678 * before blocking on it; otherwise we could end up blocking on 679 * a cv that's already been freed. In other words, don't cache 680 * the thread pointer across calls to cv_wait. 681 * 682 * The choice of loop invariant means that whenever a thread 683 * is taken off the allthreads list, a cv_broadcast must be 684 * performed on that thread's t_joincv to wake up any waiters. 685 * The broadcast doesn't have to happen right away, but it 686 * shouldn't be postponed indefinitely (e.g., by doing it in 687 * thread_free which may only be executed when the deathrow 688 * queue is processed. 689 */ 690 while (t = did_to_thread(tid)) 691 cv_wait(&t->t_joincv, &pidlock); 692 mutex_exit(&pidlock); 693 } 694 695 void 696 thread_free_prevent(kthread_t *t) 697 { 698 kmutex_t *lp; 699 700 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock; 701 mutex_enter(lp); 702 } 703 704 void 705 thread_free_allow(kthread_t *t) 706 { 707 kmutex_t *lp; 708 709 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock; 710 mutex_exit(lp); 711 } 712 713 static void 714 thread_free_barrier(kthread_t *t) 715 { 716 kmutex_t *lp; 717 718 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock; 719 mutex_enter(lp); 720 mutex_exit(lp); 721 } 722 723 void 724 thread_free(kthread_t *t) 725 { 726 boolean_t allocstk = (t->t_flag & T_TALLOCSTK); 727 klwp_t *lwp = t->t_lwp; 728 caddr_t swap = t->t_swap; 729 730 ASSERT(t != &t0 && t->t_state == TS_FREE); 731 ASSERT(t->t_door == NULL); 732 ASSERT(t->t_schedctl == NULL); 733 ASSERT(t->t_pollstate == NULL); 734 735 t->t_pri = 0; 736 t->t_pc = 0; 737 t->t_sp = 0; 738 t->t_wchan0 = NULL; 739 t->t_wchan = NULL; 740 if (t->t_cred != NULL) { 741 crfree(t->t_cred); 742 t->t_cred = 0; 743 } 744 if (t->t_pdmsg) { 745 kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1); 746 t->t_pdmsg = NULL; 747 } 748 if (audit_active) 749 audit_thread_free(t); 750 #ifndef NPROBE 751 if (t->t_tnf_tpdp) 752 tnf_thread_free(t); 753 #endif /* NPROBE */ 754 if (t->t_cldata) { 755 CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata); 756 } 757 if (t->t_rprof != NULL) { 758 kmem_free(t->t_rprof, sizeof (*t->t_rprof)); 759 t->t_rprof = NULL; 760 } 761 t->t_lockp = NULL; /* nothing should try to lock this thread now */ 762 if (lwp) 763 lwp_freeregs(lwp, 0); 764 if (t->t_ctx) 765 freectx(t, 0); 766 t->t_stk = NULL; 767 if (lwp) 768 lwp_stk_fini(lwp); 769 lock_clear(&t->t_lock); 770 771 if (t->t_ts->ts_waiters > 0) 772 panic("thread_free: turnstile still active"); 773 774 kmem_cache_free(turnstile_cache, t->t_ts); 775 776 free_afd(&t->t_activefd); 777 778 /* 779 * Barrier for the tick accounting code. The tick accounting code 780 * holds this lock to keep the thread from going away while it's 781 * looking at it. 782 */ 783 thread_free_barrier(t); 784 785 ASSERT(ttoproj(t) == proj0p); 786 project_rele(ttoproj(t)); 787 788 lgrp_affinity_free(&t->t_lgrp_affinity); 789 790 mutex_enter(&pidlock); 791 nthread--; 792 mutex_exit(&pidlock); 793 794 if (t->t_name != NULL) { 795 kmem_free(t->t_name, THREAD_NAME_MAX); 796 t->t_name = NULL; 797 } 798 799 /* 800 * Free thread, lwp and stack. This needs to be done carefully, since 801 * if T_TALLOCSTK is set, the thread is part of the stack. 802 */ 803 t->t_lwp = NULL; 804 t->t_swap = NULL; 805 806 if (swap) { 807 segkp_release(segkp, swap); 808 } 809 if (lwp) { 810 kmem_cache_free(lwp_cache, lwp); 811 } 812 if (!allocstk) { 813 kmem_cache_free(thread_cache, t); 814 } 815 } 816 817 /* 818 * Removes threads associated with the given zone from a deathrow queue. 819 * tp is a pointer to the head of the deathrow queue, and countp is a 820 * pointer to the current deathrow count. Returns a linked list of 821 * threads removed from the list. 822 */ 823 static kthread_t * 824 thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid) 825 { 826 kthread_t *tmp, *list = NULL; 827 cred_t *cr; 828 829 ASSERT(MUTEX_HELD(&reaplock)); 830 while (*tp != NULL) { 831 if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) { 832 tmp = *tp; 833 *tp = tmp->t_forw; 834 tmp->t_forw = list; 835 list = tmp; 836 (*countp)--; 837 } else { 838 tp = &(*tp)->t_forw; 839 } 840 } 841 return (list); 842 } 843 844 static void 845 thread_reap_list(kthread_t *t) 846 { 847 kthread_t *next; 848 849 while (t != NULL) { 850 next = t->t_forw; 851 thread_free(t); 852 t = next; 853 } 854 } 855 856 /* ARGSUSED */ 857 static void 858 thread_zone_destroy(zoneid_t zoneid, void *unused) 859 { 860 kthread_t *t, *l; 861 862 mutex_enter(&reaplock); 863 /* 864 * Pull threads and lwps associated with zone off deathrow lists. 865 */ 866 t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid); 867 l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid); 868 mutex_exit(&reaplock); 869 870 /* 871 * Guard against race condition in mutex_owner_running: 872 * thread=owner(mutex) 873 * <interrupt> 874 * thread exits mutex 875 * thread exits 876 * thread reaped 877 * thread struct freed 878 * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE. 879 * A cross call to all cpus will cause the interrupt handler 880 * to reset the PC if it is in mutex_owner_running, refreshing 881 * stale thread pointers. 882 */ 883 mutex_sync(); /* sync with mutex code */ 884 885 /* 886 * Reap threads 887 */ 888 thread_reap_list(t); 889 890 /* 891 * Reap lwps 892 */ 893 thread_reap_list(l); 894 } 895 896 /* 897 * cleanup zombie threads that are on deathrow. 898 */ 899 void 900 thread_reaper() 901 { 902 kthread_t *t, *l; 903 callb_cpr_t cprinfo; 904 905 /* 906 * Register callback to clean up threads when zone is destroyed. 907 */ 908 zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy); 909 910 CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper"); 911 for (;;) { 912 mutex_enter(&reaplock); 913 while (thread_deathrow == NULL && lwp_deathrow == NULL) { 914 CALLB_CPR_SAFE_BEGIN(&cprinfo); 915 cv_wait(&reaper_cv, &reaplock); 916 CALLB_CPR_SAFE_END(&cprinfo, &reaplock); 917 } 918 /* 919 * mutex_sync() needs to be called when reaping, but 920 * not too often. We limit reaping rate to once 921 * per second. Reaplimit is max rate at which threads can 922 * be freed. Does not impact thread destruction/creation. 923 */ 924 t = thread_deathrow; 925 l = lwp_deathrow; 926 thread_deathrow = NULL; 927 lwp_deathrow = NULL; 928 thread_reapcnt = 0; 929 lwp_reapcnt = 0; 930 mutex_exit(&reaplock); 931 932 /* 933 * Guard against race condition in mutex_owner_running: 934 * thread=owner(mutex) 935 * <interrupt> 936 * thread exits mutex 937 * thread exits 938 * thread reaped 939 * thread struct freed 940 * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE. 941 * A cross call to all cpus will cause the interrupt handler 942 * to reset the PC if it is in mutex_owner_running, refreshing 943 * stale thread pointers. 944 */ 945 mutex_sync(); /* sync with mutex code */ 946 /* 947 * Reap threads 948 */ 949 thread_reap_list(t); 950 951 /* 952 * Reap lwps 953 */ 954 thread_reap_list(l); 955 delay(hz); 956 } 957 } 958 959 /* 960 * This is called by lwpcreate, etc.() to put a lwp_deathrow thread onto 961 * thread_deathrow. The thread's state is changed already TS_FREE to indicate 962 * that is reapable. The thread already holds the reaplock, and was already 963 * freed. 964 */ 965 void 966 reapq_move_lq_to_tq(kthread_t *t) 967 { 968 ASSERT(t->t_state == TS_FREE); 969 ASSERT(MUTEX_HELD(&reaplock)); 970 t->t_forw = thread_deathrow; 971 thread_deathrow = t; 972 thread_reapcnt++; 973 if (lwp_reapcnt + thread_reapcnt > reaplimit) 974 cv_signal(&reaper_cv); /* wake the reaper */ 975 } 976 977 /* 978 * This is called by resume() to put a zombie thread onto deathrow. 979 * The thread's state is changed to TS_FREE to indicate that is reapable. 980 * This is called from the idle thread so it must not block - just spin. 981 */ 982 void 983 reapq_add(kthread_t *t) 984 { 985 mutex_enter(&reaplock); 986 987 /* 988 * lwp_deathrow contains threads with lwp linkage and 989 * swappable thread stacks which have the default stacksize. 990 * These threads' lwps and stacks may be reused by lwp_create(). 991 * 992 * Anything else goes on thread_deathrow(), where it will eventually 993 * be thread_free()d. 994 */ 995 if (t->t_flag & T_LWPREUSE) { 996 ASSERT(ttolwp(t) != NULL); 997 t->t_forw = lwp_deathrow; 998 lwp_deathrow = t; 999 lwp_reapcnt++; 1000 } else { 1001 t->t_forw = thread_deathrow; 1002 thread_deathrow = t; 1003 thread_reapcnt++; 1004 } 1005 if (lwp_reapcnt + thread_reapcnt > reaplimit) 1006 cv_signal(&reaper_cv); /* wake the reaper */ 1007 t->t_state = TS_FREE; 1008 lock_clear(&t->t_lock); 1009 1010 /* 1011 * Before we return, we need to grab and drop the thread lock for 1012 * the dead thread. At this point, the current thread is the idle 1013 * thread, and the dead thread's CPU lock points to the current 1014 * CPU -- and we must grab and drop the lock to synchronize with 1015 * a racing thread walking a blocking chain that the zombie thread 1016 * was recently in. By this point, that blocking chain is (by 1017 * definition) stale: the dead thread is not holding any locks, and 1018 * is therefore not in any blocking chains -- but if we do not regrab 1019 * our lock before freeing the dead thread's data structures, the 1020 * thread walking the (stale) blocking chain will die on memory 1021 * corruption when it attempts to drop the dead thread's lock. We 1022 * only need do this once because there is no way for the dead thread 1023 * to ever again be on a blocking chain: once we have grabbed and 1024 * dropped the thread lock, we are guaranteed that anyone that could 1025 * have seen this thread in a blocking chain can no longer see it. 1026 */ 1027 thread_lock(t); 1028 thread_unlock(t); 1029 1030 mutex_exit(&reaplock); 1031 } 1032 1033 static struct ctxop * 1034 ctxop_find_by_tmpl(kthread_t *t, const struct ctxop_template *ct, void *arg) 1035 { 1036 struct ctxop *ctx, *head; 1037 1038 ASSERT(MUTEX_HELD(&t->t_ctx_lock)); 1039 ASSERT(curthread->t_preempt > 0); 1040 1041 if (t->t_ctx == NULL) { 1042 return (NULL); 1043 } 1044 1045 ctx = head = t->t_ctx; 1046 do { 1047 if (ctx->save_op == ct->ct_save && 1048 ctx->restore_op == ct->ct_restore && 1049 ctx->fork_op == ct->ct_fork && 1050 ctx->lwp_create_op == ct->ct_lwp_create && 1051 ctx->exit_op == ct->ct_exit && 1052 ctx->free_op == ct->ct_free && 1053 ctx->arg == arg) { 1054 return (ctx); 1055 } 1056 1057 ctx = ctx->next; 1058 } while (ctx != head); 1059 1060 return (NULL); 1061 } 1062 1063 static void 1064 ctxop_detach_chain(kthread_t *t, struct ctxop *ctx) 1065 { 1066 ASSERT(t != NULL); 1067 ASSERT(t->t_ctx != NULL); 1068 ASSERT(ctx != NULL); 1069 ASSERT(ctx->next != NULL && ctx->prev != NULL); 1070 1071 ctx->prev->next = ctx->next; 1072 ctx->next->prev = ctx->prev; 1073 if (ctx->next == ctx) { 1074 /* last remaining item */ 1075 t->t_ctx = NULL; 1076 } else if (ctx == t->t_ctx) { 1077 /* fix up head of list */ 1078 t->t_ctx = ctx->next; 1079 } 1080 ctx->next = ctx->prev = NULL; 1081 } 1082 1083 struct ctxop * 1084 ctxop_allocate(const struct ctxop_template *ct, void *arg) 1085 { 1086 struct ctxop *ctx; 1087 1088 /* 1089 * No changes have been made to the interface yet, so we expect all 1090 * callers to use the original revision. 1091 */ 1092 VERIFY3U(ct->ct_rev, ==, CTXOP_TPL_REV); 1093 1094 ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP); 1095 ctx->save_op = ct->ct_save; 1096 ctx->restore_op = ct->ct_restore; 1097 ctx->fork_op = ct->ct_fork; 1098 ctx->lwp_create_op = ct->ct_lwp_create; 1099 ctx->exit_op = ct->ct_exit; 1100 ctx->free_op = ct->ct_free; 1101 ctx->arg = arg; 1102 ctx->save_ts = 0; 1103 ctx->restore_ts = 0; 1104 ctx->next = ctx->prev = NULL; 1105 1106 return (ctx); 1107 } 1108 1109 void 1110 ctxop_free(struct ctxop *ctx) 1111 { 1112 if (ctx->free_op != NULL) 1113 (ctx->free_op)(ctx->arg, 0); 1114 1115 kmem_free(ctx, sizeof (struct ctxop)); 1116 } 1117 1118 void 1119 ctxop_attach(kthread_t *t, struct ctxop *ctx) 1120 { 1121 ASSERT(ctx->next == NULL && ctx->prev == NULL); 1122 1123 /* 1124 * Keep ctxops in a doubly-linked list to allow traversal in both 1125 * directions. Using only the newest-to-oldest ordering was adequate 1126 * previously, but reversing the order for restore_op actions is 1127 * necessary if later-added ctxops depends on earlier ones. 1128 * 1129 * One example of such a dependency: Hypervisor software handling the 1130 * guest FPU expects that it save FPU state prior to host FPU handling 1131 * and consequently handle the guest logic _after_ the host FPU has 1132 * been restored. 1133 * 1134 * The t_ctx member points to the most recently added ctxop or is NULL 1135 * if no ctxops are associated with the thread. The 'next' pointers 1136 * form a loop of the ctxops in newest-to-oldest order. The 'prev' 1137 * pointers form a loop in the reverse direction, where t_ctx->prev is 1138 * the oldest entry associated with the thread. 1139 * 1140 * The protection of kpreempt_disable is required to safely perform the 1141 * list insertion, since there are inconsistent states between some of 1142 * the pointer assignments. 1143 */ 1144 kpreempt_disable(); 1145 if (t->t_ctx == NULL) { 1146 ctx->next = ctx; 1147 ctx->prev = ctx; 1148 } else { 1149 struct ctxop *head = t->t_ctx, *tail = t->t_ctx->prev; 1150 1151 ctx->next = head; 1152 ctx->prev = tail; 1153 head->prev = ctx; 1154 tail->next = ctx; 1155 } 1156 t->t_ctx = ctx; 1157 kpreempt_enable(); 1158 } 1159 1160 void 1161 ctxop_detach(kthread_t *t, struct ctxop *ctx) 1162 { 1163 /* 1164 * The incoming kthread_t (which is the thread for which the 1165 * context ops will be detached) should be one of the following: 1166 * 1167 * a) the current thread, 1168 * 1169 * b) a thread of a process that's being forked (SIDL), 1170 * 1171 * c) a thread that belongs to the same process as the current 1172 * thread and for which the current thread is the agent thread, 1173 * 1174 * d) a thread that is TS_STOPPED which is indicative of it 1175 * being (if curthread is not an agent) a thread being created 1176 * as part of an lwp creation. 1177 */ 1178 ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL || 1179 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1180 1181 /* 1182 * Serialize modifications to t->t_ctx to prevent the agent thread 1183 * and the target thread from racing with each other during lwp exit. 1184 */ 1185 mutex_enter(&t->t_ctx_lock); 1186 kpreempt_disable(); 1187 1188 VERIFY(t->t_ctx != NULL); 1189 1190 #ifdef DEBUG 1191 /* Check that provided `ctx` is actually present in the t_ctx chain */ 1192 struct ctxop *head, *cur; 1193 head = cur = t->t_ctx; 1194 for (;;) { 1195 if (cur == ctx) { 1196 break; 1197 } 1198 cur = cur->next; 1199 /* If we wrap, having not found `ctx`, this assert will fail */ 1200 ASSERT3P(cur, !=, head); 1201 } 1202 #endif /* DEBUG */ 1203 1204 ctxop_detach_chain(t, ctx); 1205 1206 mutex_exit(&t->t_ctx_lock); 1207 kpreempt_enable(); 1208 } 1209 1210 void 1211 ctxop_install(kthread_t *t, const struct ctxop_template *ct, void *arg) 1212 { 1213 ctxop_attach(t, ctxop_allocate(ct, arg)); 1214 } 1215 1216 int 1217 ctxop_remove(kthread_t *t, const struct ctxop_template *ct, void *arg) 1218 { 1219 struct ctxop *ctx; 1220 1221 /* 1222 * ctxop_remove() shares the same requirements for the acted-upon thread 1223 * as ctxop_detach() 1224 */ 1225 ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL || 1226 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1227 1228 /* 1229 * Serialize modifications to t->t_ctx to prevent the agent thread 1230 * and the target thread from racing with each other during lwp exit. 1231 */ 1232 mutex_enter(&t->t_ctx_lock); 1233 kpreempt_disable(); 1234 1235 ctx = ctxop_find_by_tmpl(t, ct, arg); 1236 if (ctx != NULL) { 1237 ctxop_detach_chain(t, ctx); 1238 ctxop_free(ctx); 1239 } 1240 1241 mutex_exit(&t->t_ctx_lock); 1242 kpreempt_enable(); 1243 1244 if (ctx != NULL) { 1245 return (1); 1246 } 1247 return (0); 1248 } 1249 1250 void 1251 savectx(kthread_t *t) 1252 { 1253 ASSERT(t == curthread); 1254 1255 if (t->t_ctx != NULL) { 1256 struct ctxop *ctx, *head; 1257 1258 /* Forward traversal */ 1259 ctx = head = t->t_ctx; 1260 do { 1261 if (ctx->save_op != NULL) { 1262 ctx->save_ts = gethrtime_unscaled(); 1263 (ctx->save_op)(ctx->arg); 1264 } 1265 ctx = ctx->next; 1266 } while (ctx != head); 1267 } 1268 } 1269 1270 void 1271 restorectx(kthread_t *t) 1272 { 1273 ASSERT(t == curthread); 1274 1275 if (t->t_ctx != NULL) { 1276 struct ctxop *ctx, *tail; 1277 1278 /* Backward traversal (starting at the tail) */ 1279 ctx = tail = t->t_ctx->prev; 1280 do { 1281 if (ctx->restore_op != NULL) { 1282 ctx->restore_ts = gethrtime_unscaled(); 1283 (ctx->restore_op)(ctx->arg); 1284 } 1285 ctx = ctx->prev; 1286 } while (ctx != tail); 1287 } 1288 } 1289 1290 void 1291 forkctx(kthread_t *t, kthread_t *ct) 1292 { 1293 if (t->t_ctx != NULL) { 1294 struct ctxop *ctx, *head; 1295 1296 /* Forward traversal */ 1297 ctx = head = t->t_ctx; 1298 do { 1299 if (ctx->fork_op != NULL) { 1300 (ctx->fork_op)(t, ct); 1301 } 1302 ctx = ctx->next; 1303 } while (ctx != head); 1304 } 1305 } 1306 1307 /* 1308 * Note that this operator is only invoked via the _lwp_create 1309 * system call. The system may have other reasons to create lwps 1310 * e.g. the agent lwp or the doors unreferenced lwp. 1311 */ 1312 void 1313 lwp_createctx(kthread_t *t, kthread_t *ct) 1314 { 1315 if (t->t_ctx != NULL) { 1316 struct ctxop *ctx, *head; 1317 1318 /* Forward traversal */ 1319 ctx = head = t->t_ctx; 1320 do { 1321 if (ctx->lwp_create_op != NULL) { 1322 (ctx->lwp_create_op)(t, ct); 1323 } 1324 ctx = ctx->next; 1325 } while (ctx != head); 1326 } 1327 } 1328 1329 /* 1330 * exitctx is called from thread_exit() and lwp_exit() to perform any actions 1331 * needed when the thread/LWP leaves the processor for the last time. This 1332 * routine is not intended to deal with freeing memory; freectx() is used for 1333 * that purpose during thread_free(). This routine is provided to allow for 1334 * clean-up that can't wait until thread_free(). 1335 */ 1336 void 1337 exitctx(kthread_t *t) 1338 { 1339 if (t->t_ctx != NULL) { 1340 struct ctxop *ctx, *head; 1341 1342 /* Forward traversal */ 1343 ctx = head = t->t_ctx; 1344 do { 1345 if (ctx->exit_op != NULL) { 1346 (ctx->exit_op)(t); 1347 } 1348 ctx = ctx->next; 1349 } while (ctx != head); 1350 } 1351 } 1352 1353 /* 1354 * freectx is called from thread_free() and exec() to get 1355 * rid of old thread context ops. 1356 */ 1357 void 1358 freectx(kthread_t *t, int isexec) 1359 { 1360 kpreempt_disable(); 1361 if (t->t_ctx != NULL) { 1362 struct ctxop *ctx, *head; 1363 1364 ctx = head = t->t_ctx; 1365 t->t_ctx = NULL; 1366 do { 1367 struct ctxop *next = ctx->next; 1368 1369 if (ctx->free_op != NULL) { 1370 (ctx->free_op)(ctx->arg, isexec); 1371 } 1372 kmem_free(ctx, sizeof (struct ctxop)); 1373 ctx = next; 1374 } while (ctx != head); 1375 } 1376 kpreempt_enable(); 1377 } 1378 1379 /* 1380 * freectx_ctx is called from lwp_create() when lwp is reused from 1381 * lwp_deathrow and its thread structure is added to thread_deathrow. 1382 * The thread structure to which this ctx was attached may be already 1383 * freed by the thread reaper so free_op implementations shouldn't rely 1384 * on thread structure to which this ctx was attached still being around. 1385 */ 1386 void 1387 freectx_ctx(struct ctxop *ctx) 1388 { 1389 struct ctxop *head = ctx; 1390 1391 ASSERT(ctx != NULL); 1392 1393 kpreempt_disable(); 1394 1395 head = ctx; 1396 do { 1397 struct ctxop *next = ctx->next; 1398 1399 if (ctx->free_op != NULL) { 1400 (ctx->free_op)(ctx->arg, 0); 1401 } 1402 kmem_free(ctx, sizeof (struct ctxop)); 1403 ctx = next; 1404 } while (ctx != head); 1405 kpreempt_enable(); 1406 } 1407 1408 /* 1409 * Set the thread running; arrange for it to be swapped in if necessary. 1410 */ 1411 void 1412 setrun_locked(kthread_t *t) 1413 { 1414 ASSERT(THREAD_LOCK_HELD(t)); 1415 if (t->t_state == TS_SLEEP) { 1416 /* 1417 * Take off sleep queue. 1418 */ 1419 SOBJ_UNSLEEP(t->t_sobj_ops, t); 1420 } else if (t->t_state & (TS_RUN | TS_ONPROC)) { 1421 /* 1422 * Already on dispatcher queue. 1423 */ 1424 return; 1425 } else if (t->t_state == TS_WAIT) { 1426 waitq_setrun(t); 1427 } else if (t->t_state == TS_STOPPED) { 1428 /* 1429 * All of the sending of SIGCONT (TC_XSTART) and /proc 1430 * (TC_PSTART) and lwp_continue() (TC_CSTART) must have 1431 * requested that the thread be run. 1432 * Just calling setrun() is not sufficient to set a stopped 1433 * thread running. TP_TXSTART is always set if the thread 1434 * is not stopped by a jobcontrol stop signal. 1435 * TP_TPSTART is always set if /proc is not controlling it. 1436 * TP_TCSTART is always set if lwp_suspend() didn't stop it. 1437 * The thread won't be stopped unless one of these 1438 * three mechanisms did it. 1439 * 1440 * These flags must be set before calling setrun_locked(t). 1441 * They can't be passed as arguments because the streams 1442 * code calls setrun() indirectly and the mechanism for 1443 * doing so admits only one argument. Note that the 1444 * thread must be locked in order to change t_schedflags. 1445 */ 1446 if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART) 1447 return; 1448 /* 1449 * Process is no longer stopped (a thread is running). 1450 */ 1451 t->t_whystop = 0; 1452 t->t_whatstop = 0; 1453 /* 1454 * Strictly speaking, we do not have to clear these 1455 * flags here; they are cleared on entry to stop(). 1456 * However, they are confusing when doing kernel 1457 * debugging or when they are revealed by ps(1). 1458 */ 1459 t->t_schedflag &= ~TS_ALLSTART; 1460 THREAD_TRANSITION(t); /* drop stopped-thread lock */ 1461 ASSERT(t->t_lockp == &transition_lock); 1462 ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); 1463 /* 1464 * Let the class put the process on the dispatcher queue. 1465 */ 1466 CL_SETRUN(t); 1467 } 1468 } 1469 1470 void 1471 setrun(kthread_t *t) 1472 { 1473 thread_lock(t); 1474 setrun_locked(t); 1475 thread_unlock(t); 1476 } 1477 1478 /* 1479 * Unpin an interrupted thread. 1480 * When an interrupt occurs, the interrupt is handled on the stack 1481 * of an interrupt thread, taken from a pool linked to the CPU structure. 1482 * 1483 * When swtch() is switching away from an interrupt thread because it 1484 * blocked or was preempted, this routine is called to complete the 1485 * saving of the interrupted thread state, and returns the interrupted 1486 * thread pointer so it may be resumed. 1487 * 1488 * Called by swtch() only at high spl. 1489 */ 1490 kthread_t * 1491 thread_unpin() 1492 { 1493 kthread_t *t = curthread; /* current thread */ 1494 kthread_t *itp; /* interrupted thread */ 1495 int i; /* interrupt level */ 1496 extern int intr_passivate(); 1497 1498 ASSERT(t->t_intr != NULL); 1499 1500 itp = t->t_intr; /* interrupted thread */ 1501 t->t_intr = NULL; /* clear interrupt ptr */ 1502 1503 smt_end_intr(); 1504 1505 /* 1506 * Get state from interrupt thread for the one 1507 * it interrupted. 1508 */ 1509 1510 i = intr_passivate(t, itp); 1511 1512 TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE, 1513 "intr_passivate:level %d curthread %p (%T) ithread %p (%T)", 1514 i, t, t, itp, itp); 1515 1516 /* 1517 * Dissociate the current thread from the interrupted thread's LWP. 1518 */ 1519 t->t_lwp = NULL; 1520 1521 /* 1522 * Interrupt handlers above the level that spinlocks block must 1523 * not block. 1524 */ 1525 #if DEBUG 1526 if (i < 0 || i > LOCK_LEVEL) 1527 cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i); 1528 #endif 1529 1530 /* 1531 * Compute the CPU's base interrupt level based on the active 1532 * interrupts. 1533 */ 1534 ASSERT(CPU->cpu_intr_actv & (1 << i)); 1535 set_base_spl(); 1536 1537 return (itp); 1538 } 1539 1540 /* 1541 * Create and initialize an interrupt thread. 1542 * Returns non-zero on error. 1543 * Called at spl7() or better. 1544 */ 1545 void 1546 thread_create_intr(struct cpu *cp) 1547 { 1548 kthread_t *tp; 1549 1550 tp = thread_create(NULL, 0, 1551 (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0); 1552 1553 /* 1554 * Set the thread in the TS_FREE state. The state will change 1555 * to TS_ONPROC only while the interrupt is active. Think of these 1556 * as being on a private free list for the CPU. Being TS_FREE keeps 1557 * inactive interrupt threads out of debugger thread lists. 1558 * 1559 * We cannot call thread_create with TS_FREE because of the current 1560 * checks there for ONPROC. Fix this when thread_create takes flags. 1561 */ 1562 THREAD_FREEINTR(tp, cp); 1563 1564 /* 1565 * Nobody should ever reference the credentials of an interrupt 1566 * thread so make it NULL to catch any such references. 1567 */ 1568 tp->t_cred = NULL; 1569 tp->t_flag |= T_INTR_THREAD; 1570 tp->t_cpu = cp; 1571 tp->t_bound_cpu = cp; 1572 tp->t_disp_queue = cp->cpu_disp; 1573 tp->t_affinitycnt = 1; 1574 tp->t_preempt = 1; 1575 1576 /* 1577 * Don't make a user-requested binding on this thread so that 1578 * the processor can be offlined. 1579 */ 1580 tp->t_bind_cpu = PBIND_NONE; /* no USER-requested binding */ 1581 tp->t_bind_pset = PS_NONE; 1582 1583 #if defined(__x86) 1584 tp->t_stk -= STACK_ALIGN; 1585 *(tp->t_stk) = 0; /* terminate intr thread stack */ 1586 #endif 1587 1588 /* 1589 * Link onto CPU's interrupt pool. 1590 */ 1591 tp->t_link = cp->cpu_intr_thread; 1592 cp->cpu_intr_thread = tp; 1593 } 1594 1595 /* 1596 * TSD -- THREAD SPECIFIC DATA 1597 */ 1598 static kmutex_t tsd_mutex; /* linked list spin lock */ 1599 static uint_t tsd_nkeys; /* size of destructor array */ 1600 /* per-key destructor funcs */ 1601 static void (**tsd_destructor)(void *); 1602 /* list of tsd_thread's */ 1603 static struct tsd_thread *tsd_list; 1604 1605 /* 1606 * Default destructor 1607 * Needed because NULL destructor means that the key is unused 1608 */ 1609 /* ARGSUSED */ 1610 void 1611 tsd_defaultdestructor(void *value) 1612 {} 1613 1614 /* 1615 * Create a key (index into per thread array) 1616 * Locks out tsd_create, tsd_destroy, and tsd_exit 1617 * May allocate memory with lock held 1618 */ 1619 void 1620 tsd_create(uint_t *keyp, void (*destructor)(void *)) 1621 { 1622 int i; 1623 uint_t nkeys; 1624 1625 /* 1626 * if key is allocated, do nothing 1627 */ 1628 mutex_enter(&tsd_mutex); 1629 if (*keyp) { 1630 mutex_exit(&tsd_mutex); 1631 return; 1632 } 1633 /* 1634 * find an unused key 1635 */ 1636 if (destructor == NULL) 1637 destructor = tsd_defaultdestructor; 1638 1639 for (i = 0; i < tsd_nkeys; ++i) 1640 if (tsd_destructor[i] == NULL) 1641 break; 1642 1643 /* 1644 * if no unused keys, increase the size of the destructor array 1645 */ 1646 if (i == tsd_nkeys) { 1647 if ((nkeys = (tsd_nkeys << 1)) == 0) 1648 nkeys = 1; 1649 tsd_destructor = 1650 (void (**)(void *))tsd_realloc((void *)tsd_destructor, 1651 (size_t)(tsd_nkeys * sizeof (void (*)(void *))), 1652 (size_t)(nkeys * sizeof (void (*)(void *)))); 1653 tsd_nkeys = nkeys; 1654 } 1655 1656 /* 1657 * allocate the next available unused key 1658 */ 1659 tsd_destructor[i] = destructor; 1660 *keyp = i + 1; 1661 mutex_exit(&tsd_mutex); 1662 } 1663 1664 /* 1665 * Destroy a key -- this is for unloadable modules 1666 * 1667 * Assumes that the caller is preventing tsd_set and tsd_get 1668 * Locks out tsd_create, tsd_destroy, and tsd_exit 1669 * May free memory with lock held 1670 */ 1671 void 1672 tsd_destroy(uint_t *keyp) 1673 { 1674 uint_t key; 1675 struct tsd_thread *tsd; 1676 1677 /* 1678 * protect the key namespace and our destructor lists 1679 */ 1680 mutex_enter(&tsd_mutex); 1681 key = *keyp; 1682 *keyp = 0; 1683 1684 ASSERT(key <= tsd_nkeys); 1685 1686 /* 1687 * if the key is valid 1688 */ 1689 if (key != 0) { 1690 uint_t k = key - 1; 1691 /* 1692 * for every thread with TSD, call key's destructor 1693 */ 1694 for (tsd = tsd_list; tsd; tsd = tsd->ts_next) { 1695 /* 1696 * no TSD for key in this thread 1697 */ 1698 if (key > tsd->ts_nkeys) 1699 continue; 1700 /* 1701 * call destructor for key 1702 */ 1703 if (tsd->ts_value[k] && tsd_destructor[k]) 1704 (*tsd_destructor[k])(tsd->ts_value[k]); 1705 /* 1706 * reset value for key 1707 */ 1708 tsd->ts_value[k] = NULL; 1709 } 1710 /* 1711 * actually free the key (NULL destructor == unused) 1712 */ 1713 tsd_destructor[k] = NULL; 1714 } 1715 1716 mutex_exit(&tsd_mutex); 1717 } 1718 1719 /* 1720 * Quickly return the per thread value that was stored with the specified key 1721 * Assumes the caller is protecting key from tsd_create and tsd_destroy 1722 */ 1723 void * 1724 tsd_get(uint_t key) 1725 { 1726 return (tsd_agent_get(curthread, key)); 1727 } 1728 1729 /* 1730 * Set a per thread value indexed with the specified key 1731 */ 1732 int 1733 tsd_set(uint_t key, void *value) 1734 { 1735 return (tsd_agent_set(curthread, key, value)); 1736 } 1737 1738 /* 1739 * Like tsd_get(), except that the agent lwp can get the tsd of 1740 * another thread in the same process (the agent thread only runs when the 1741 * process is completely stopped by /proc), or syslwp is creating a new lwp. 1742 */ 1743 void * 1744 tsd_agent_get(kthread_t *t, uint_t key) 1745 { 1746 struct tsd_thread *tsd = t->t_tsd; 1747 1748 ASSERT(t == curthread || 1749 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1750 1751 if (key && tsd != NULL && key <= tsd->ts_nkeys) 1752 return (tsd->ts_value[key - 1]); 1753 return (NULL); 1754 } 1755 1756 /* 1757 * Like tsd_set(), except that the agent lwp can set the tsd of 1758 * another thread in the same process, or syslwp can set the tsd 1759 * of a thread it's in the middle of creating. 1760 * 1761 * Assumes the caller is protecting key from tsd_create and tsd_destroy 1762 * May lock out tsd_destroy (and tsd_create), may allocate memory with 1763 * lock held 1764 */ 1765 int 1766 tsd_agent_set(kthread_t *t, uint_t key, void *value) 1767 { 1768 struct tsd_thread *tsd = t->t_tsd; 1769 1770 ASSERT(t == curthread || 1771 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1772 1773 if (key == 0) 1774 return (EINVAL); 1775 if (tsd == NULL) 1776 tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP); 1777 if (key <= tsd->ts_nkeys) { 1778 tsd->ts_value[key - 1] = value; 1779 return (0); 1780 } 1781 1782 ASSERT(key <= tsd_nkeys); 1783 1784 /* 1785 * lock out tsd_destroy() 1786 */ 1787 mutex_enter(&tsd_mutex); 1788 if (tsd->ts_nkeys == 0) { 1789 /* 1790 * Link onto list of threads with TSD 1791 */ 1792 if ((tsd->ts_next = tsd_list) != NULL) 1793 tsd_list->ts_prev = tsd; 1794 tsd_list = tsd; 1795 } 1796 1797 /* 1798 * Allocate thread local storage and set the value for key 1799 */ 1800 tsd->ts_value = tsd_realloc(tsd->ts_value, 1801 tsd->ts_nkeys * sizeof (void *), 1802 key * sizeof (void *)); 1803 tsd->ts_nkeys = key; 1804 tsd->ts_value[key - 1] = value; 1805 mutex_exit(&tsd_mutex); 1806 1807 return (0); 1808 } 1809 1810 1811 /* 1812 * Return the per thread value that was stored with the specified key 1813 * If necessary, create the key and the value 1814 * Assumes the caller is protecting *keyp from tsd_destroy 1815 */ 1816 void * 1817 tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void)) 1818 { 1819 void *value; 1820 uint_t key = *keyp; 1821 struct tsd_thread *tsd = curthread->t_tsd; 1822 1823 if (tsd == NULL) 1824 tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP); 1825 if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1])) 1826 return (value); 1827 if (key == 0) 1828 tsd_create(keyp, destroy); 1829 (void) tsd_set(*keyp, value = (*allocate)()); 1830 1831 return (value); 1832 } 1833 1834 /* 1835 * Called from thread_exit() to run the destructor function for each tsd 1836 * Locks out tsd_create and tsd_destroy 1837 * Assumes that the destructor *DOES NOT* use tsd 1838 */ 1839 void 1840 tsd_exit(void) 1841 { 1842 int i; 1843 struct tsd_thread *tsd = curthread->t_tsd; 1844 1845 if (tsd == NULL) 1846 return; 1847 1848 if (tsd->ts_nkeys == 0) { 1849 kmem_free(tsd, sizeof (*tsd)); 1850 curthread->t_tsd = NULL; 1851 return; 1852 } 1853 1854 /* 1855 * lock out tsd_create and tsd_destroy, call 1856 * the destructor, and mark the value as destroyed. 1857 */ 1858 mutex_enter(&tsd_mutex); 1859 1860 for (i = 0; i < tsd->ts_nkeys; i++) { 1861 if (tsd->ts_value[i] && tsd_destructor[i]) 1862 (*tsd_destructor[i])(tsd->ts_value[i]); 1863 tsd->ts_value[i] = NULL; 1864 } 1865 1866 /* 1867 * remove from linked list of threads with TSD 1868 */ 1869 if (tsd->ts_next) 1870 tsd->ts_next->ts_prev = tsd->ts_prev; 1871 if (tsd->ts_prev) 1872 tsd->ts_prev->ts_next = tsd->ts_next; 1873 if (tsd_list == tsd) 1874 tsd_list = tsd->ts_next; 1875 1876 mutex_exit(&tsd_mutex); 1877 1878 /* 1879 * free up the TSD 1880 */ 1881 kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *)); 1882 kmem_free(tsd, sizeof (struct tsd_thread)); 1883 curthread->t_tsd = NULL; 1884 } 1885 1886 /* 1887 * realloc 1888 */ 1889 static void * 1890 tsd_realloc(void *old, size_t osize, size_t nsize) 1891 { 1892 void *new; 1893 1894 new = kmem_zalloc(nsize, KM_SLEEP); 1895 if (old) { 1896 bcopy(old, new, osize); 1897 kmem_free(old, osize); 1898 } 1899 return (new); 1900 } 1901 1902 /* 1903 * Return non-zero if an interrupt is being serviced. 1904 */ 1905 int 1906 servicing_interrupt() 1907 { 1908 int onintr = 0; 1909 1910 /* Are we an interrupt thread */ 1911 if (curthread->t_flag & T_INTR_THREAD) 1912 return (1); 1913 /* Are we servicing a high level interrupt? */ 1914 if (CPU_ON_INTR(CPU)) { 1915 kpreempt_disable(); 1916 onintr = CPU_ON_INTR(CPU); 1917 kpreempt_enable(); 1918 } 1919 return (onintr); 1920 } 1921 1922 1923 /* 1924 * Change the dispatch priority of a thread in the system. 1925 * Used when raising or lowering a thread's priority. 1926 * (E.g., priority inheritance) 1927 * 1928 * Since threads are queued according to their priority, we 1929 * we must check the thread's state to determine whether it 1930 * is on a queue somewhere. If it is, we've got to: 1931 * 1932 * o Dequeue the thread. 1933 * o Change its effective priority. 1934 * o Enqueue the thread. 1935 * 1936 * Assumptions: The thread whose priority we wish to change 1937 * must be locked before we call thread_change_(e)pri(). 1938 * The thread_change(e)pri() function doesn't drop the thread 1939 * lock--that must be done by its caller. 1940 */ 1941 void 1942 thread_change_epri(kthread_t *t, pri_t disp_pri) 1943 { 1944 uint_t state; 1945 1946 ASSERT(THREAD_LOCK_HELD(t)); 1947 1948 /* 1949 * If the inherited priority hasn't actually changed, 1950 * just return. 1951 */ 1952 if (t->t_epri == disp_pri) 1953 return; 1954 1955 state = t->t_state; 1956 1957 /* 1958 * If it's not on a queue, change the priority with impunity. 1959 */ 1960 if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) { 1961 t->t_epri = disp_pri; 1962 if (state == TS_ONPROC) { 1963 cpu_t *cp = t->t_disp_queue->disp_cpu; 1964 1965 if (t == cp->cpu_dispthread) 1966 cp->cpu_dispatch_pri = DISP_PRIO(t); 1967 } 1968 } else if (state == TS_SLEEP) { 1969 /* 1970 * Take the thread out of its sleep queue. 1971 * Change the inherited priority. 1972 * Re-enqueue the thread. 1973 * Each synchronization object exports a function 1974 * to do this in an appropriate manner. 1975 */ 1976 SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri); 1977 } else if (state == TS_WAIT) { 1978 /* 1979 * Re-enqueue a thread on the wait queue if its 1980 * effective priority needs to change. 1981 */ 1982 if (disp_pri != t->t_epri) 1983 waitq_change_pri(t, disp_pri); 1984 } else { 1985 /* 1986 * The thread is on a run queue. 1987 * Note: setbackdq() may not put the thread 1988 * back on the same run queue where it originally 1989 * resided. 1990 */ 1991 (void) dispdeq(t); 1992 t->t_epri = disp_pri; 1993 setbackdq(t); 1994 } 1995 schedctl_set_cidpri(t); 1996 } 1997 1998 /* 1999 * Function: Change the t_pri field of a thread. 2000 * Side Effects: Adjust the thread ordering on a run queue 2001 * or sleep queue, if necessary. 2002 * Returns: 1 if the thread was on a run queue, else 0. 2003 */ 2004 int 2005 thread_change_pri(kthread_t *t, pri_t disp_pri, int front) 2006 { 2007 uint_t state; 2008 int on_rq = 0; 2009 2010 ASSERT(THREAD_LOCK_HELD(t)); 2011 2012 state = t->t_state; 2013 THREAD_WILLCHANGE_PRI(t, disp_pri); 2014 2015 /* 2016 * If it's not on a queue, change the priority with impunity. 2017 */ 2018 if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) { 2019 t->t_pri = disp_pri; 2020 2021 if (state == TS_ONPROC) { 2022 cpu_t *cp = t->t_disp_queue->disp_cpu; 2023 2024 if (t == cp->cpu_dispthread) 2025 cp->cpu_dispatch_pri = DISP_PRIO(t); 2026 } 2027 } else if (state == TS_SLEEP) { 2028 /* 2029 * If the priority has changed, take the thread out of 2030 * its sleep queue and change the priority. 2031 * Re-enqueue the thread. 2032 * Each synchronization object exports a function 2033 * to do this in an appropriate manner. 2034 */ 2035 if (disp_pri != t->t_pri) 2036 SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri); 2037 } else if (state == TS_WAIT) { 2038 /* 2039 * Re-enqueue a thread on the wait queue if its 2040 * priority needs to change. 2041 */ 2042 if (disp_pri != t->t_pri) 2043 waitq_change_pri(t, disp_pri); 2044 } else { 2045 /* 2046 * The thread is on a run queue. 2047 * Note: setbackdq() may not put the thread 2048 * back on the same run queue where it originally 2049 * resided. 2050 * 2051 * We still requeue the thread even if the priority 2052 * is unchanged to preserve round-robin (and other) 2053 * effects between threads of the same priority. 2054 */ 2055 on_rq = dispdeq(t); 2056 ASSERT(on_rq); 2057 t->t_pri = disp_pri; 2058 if (front) { 2059 setfrontdq(t); 2060 } else { 2061 setbackdq(t); 2062 } 2063 } 2064 schedctl_set_cidpri(t); 2065 return (on_rq); 2066 } 2067 2068 /* 2069 * Tunable kmem_stackinfo is set, fill the kernel thread stack with a 2070 * specific pattern. 2071 */ 2072 static void 2073 stkinfo_begin(kthread_t *t) 2074 { 2075 caddr_t start; /* stack start */ 2076 caddr_t end; /* stack end */ 2077 uint64_t *ptr; /* pattern pointer */ 2078 2079 /* 2080 * Stack grows up or down, see thread_create(), 2081 * compute stack memory area start and end (start < end). 2082 */ 2083 if (t->t_stk > t->t_stkbase) { 2084 /* stack grows down */ 2085 start = t->t_stkbase; 2086 end = t->t_stk; 2087 } else { 2088 /* stack grows up */ 2089 start = t->t_stk; 2090 end = t->t_stkbase; 2091 } 2092 2093 /* 2094 * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes 2095 * alignement for start and end in stack area boundaries 2096 * (protection against corrupt t_stkbase/t_stk data). 2097 */ 2098 if ((((uintptr_t)start) & 0x7) != 0) { 2099 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8); 2100 } 2101 end = (caddr_t)(((uintptr_t)end) & (~0x7)); 2102 2103 if ((end <= start) || (end - start) > (1024 * 1024)) { 2104 /* negative or stack size > 1 meg, assume bogus */ 2105 return; 2106 } 2107 2108 /* fill stack area with a pattern (instead of zeros) */ 2109 ptr = (uint64_t *)((void *)start); 2110 while (ptr < (uint64_t *)((void *)end)) { 2111 *ptr++ = KMEM_STKINFO_PATTERN; 2112 } 2113 } 2114 2115 2116 /* 2117 * Tunable kmem_stackinfo is set, create stackinfo log if doesn't already exist, 2118 * compute the percentage of kernel stack really used, and set in the log 2119 * if it's the latest highest percentage. 2120 */ 2121 static void 2122 stkinfo_end(kthread_t *t) 2123 { 2124 caddr_t start; /* stack start */ 2125 caddr_t end; /* stack end */ 2126 uint64_t *ptr; /* pattern pointer */ 2127 size_t stksz; /* stack size */ 2128 size_t smallest = 0; 2129 size_t percent = 0; 2130 uint_t index = 0; 2131 uint_t i; 2132 static size_t smallest_percent = (size_t)-1; 2133 static uint_t full = 0; 2134 2135 /* create the stackinfo log, if doesn't already exist */ 2136 mutex_enter(&kmem_stkinfo_lock); 2137 if (kmem_stkinfo_log == NULL) { 2138 kmem_stkinfo_log = (kmem_stkinfo_t *) 2139 kmem_zalloc(KMEM_STKINFO_LOG_SIZE * 2140 (sizeof (kmem_stkinfo_t)), KM_NOSLEEP); 2141 if (kmem_stkinfo_log == NULL) { 2142 mutex_exit(&kmem_stkinfo_lock); 2143 return; 2144 } 2145 } 2146 mutex_exit(&kmem_stkinfo_lock); 2147 2148 /* 2149 * Stack grows up or down, see thread_create(), 2150 * compute stack memory area start and end (start < end). 2151 */ 2152 if (t->t_stk > t->t_stkbase) { 2153 /* stack grows down */ 2154 start = t->t_stkbase; 2155 end = t->t_stk; 2156 } else { 2157 /* stack grows up */ 2158 start = t->t_stk; 2159 end = t->t_stkbase; 2160 } 2161 2162 /* stack size as found in kthread_t */ 2163 stksz = end - start; 2164 2165 /* 2166 * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes 2167 * alignement for start and end in stack area boundaries 2168 * (protection against corrupt t_stkbase/t_stk data). 2169 */ 2170 if ((((uintptr_t)start) & 0x7) != 0) { 2171 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8); 2172 } 2173 end = (caddr_t)(((uintptr_t)end) & (~0x7)); 2174 2175 if ((end <= start) || (end - start) > (1024 * 1024)) { 2176 /* negative or stack size > 1 meg, assume bogus */ 2177 return; 2178 } 2179 2180 /* search until no pattern in the stack */ 2181 if (t->t_stk > t->t_stkbase) { 2182 /* stack grows down */ 2183 #if defined(__x86) 2184 /* 2185 * 6 longs are pushed on stack, see thread_load(). Skip 2186 * them, so if kthread has never run, percent is zero. 2187 * 8 bytes alignement is preserved for a 32 bit kernel, 2188 * 6 x 4 = 24, 24 is a multiple of 8. 2189 * 2190 */ 2191 end -= (6 * sizeof (long)); 2192 #endif 2193 ptr = (uint64_t *)((void *)start); 2194 while (ptr < (uint64_t *)((void *)end)) { 2195 if (*ptr != KMEM_STKINFO_PATTERN) { 2196 percent = stkinfo_percent(end, 2197 start, (caddr_t)ptr); 2198 break; 2199 } 2200 ptr++; 2201 } 2202 } else { 2203 /* stack grows up */ 2204 ptr = (uint64_t *)((void *)end); 2205 ptr--; 2206 while (ptr >= (uint64_t *)((void *)start)) { 2207 if (*ptr != KMEM_STKINFO_PATTERN) { 2208 percent = stkinfo_percent(start, 2209 end, (caddr_t)ptr); 2210 break; 2211 } 2212 ptr--; 2213 } 2214 } 2215 2216 DTRACE_PROBE3(stack__usage, kthread_t *, t, 2217 size_t, stksz, size_t, percent); 2218 2219 if (percent == 0) { 2220 return; 2221 } 2222 2223 mutex_enter(&kmem_stkinfo_lock); 2224 if (full == KMEM_STKINFO_LOG_SIZE && percent < smallest_percent) { 2225 /* 2226 * The log is full and already contains the highest values 2227 */ 2228 mutex_exit(&kmem_stkinfo_lock); 2229 return; 2230 } 2231 2232 /* keep a log of the highest used stack */ 2233 for (i = 0; i < KMEM_STKINFO_LOG_SIZE; i++) { 2234 if (kmem_stkinfo_log[i].percent == 0) { 2235 index = i; 2236 full++; 2237 break; 2238 } 2239 if (smallest == 0) { 2240 smallest = kmem_stkinfo_log[i].percent; 2241 index = i; 2242 continue; 2243 } 2244 if (kmem_stkinfo_log[i].percent < smallest) { 2245 smallest = kmem_stkinfo_log[i].percent; 2246 index = i; 2247 } 2248 } 2249 2250 if (percent >= kmem_stkinfo_log[index].percent) { 2251 kmem_stkinfo_log[index].kthread = (caddr_t)t; 2252 kmem_stkinfo_log[index].t_startpc = (caddr_t)t->t_startpc; 2253 kmem_stkinfo_log[index].start = start; 2254 kmem_stkinfo_log[index].stksz = stksz; 2255 kmem_stkinfo_log[index].percent = percent; 2256 kmem_stkinfo_log[index].t_tid = t->t_tid; 2257 kmem_stkinfo_log[index].cmd[0] = '\0'; 2258 if (t->t_tid != 0) { 2259 stksz = strlen((t->t_procp)->p_user.u_comm); 2260 if (stksz >= KMEM_STKINFO_STR_SIZE) { 2261 stksz = KMEM_STKINFO_STR_SIZE - 1; 2262 kmem_stkinfo_log[index].cmd[stksz] = '\0'; 2263 } else { 2264 stksz += 1; 2265 } 2266 (void) memcpy(kmem_stkinfo_log[index].cmd, 2267 (t->t_procp)->p_user.u_comm, stksz); 2268 } 2269 if (percent < smallest_percent) { 2270 smallest_percent = percent; 2271 } 2272 } 2273 mutex_exit(&kmem_stkinfo_lock); 2274 } 2275 2276 /* 2277 * Tunable kmem_stackinfo is set, compute stack utilization percentage. 2278 */ 2279 static size_t 2280 stkinfo_percent(caddr_t t_stk, caddr_t t_stkbase, caddr_t sp) 2281 { 2282 size_t percent; 2283 size_t s; 2284 2285 if (t_stk > t_stkbase) { 2286 /* stack grows down */ 2287 if (sp > t_stk) { 2288 return (0); 2289 } 2290 if (sp < t_stkbase) { 2291 return (100); 2292 } 2293 percent = t_stk - sp + 1; 2294 s = t_stk - t_stkbase + 1; 2295 } else { 2296 /* stack grows up */ 2297 if (sp < t_stk) { 2298 return (0); 2299 } 2300 if (sp > t_stkbase) { 2301 return (100); 2302 } 2303 percent = sp - t_stk + 1; 2304 s = t_stkbase - t_stk + 1; 2305 } 2306 percent = ((100 * percent) / s) + 1; 2307 if (percent > 100) { 2308 percent = 100; 2309 } 2310 return (percent); 2311 } 2312 2313 /* 2314 * NOTE: This will silently truncate a name > THREAD_NAME_MAX - 1 characters 2315 * long. It is expected that callers (acting on behalf of userland clients) 2316 * will perform any required checks to return the correct error semantics. 2317 * It is also expected callers on behalf of userland clients have done 2318 * any necessary permission checks. 2319 */ 2320 int 2321 thread_setname(kthread_t *t, const char *name) 2322 { 2323 char *buf = NULL; 2324 2325 /* 2326 * We optimistically assume that a thread's name will only be set 2327 * once and so allocate memory in preparation of setting t_name. 2328 * If it turns out a name has already been set, we just discard (free) 2329 * the buffer we just allocated and reuse the current buffer 2330 * (as all should be THREAD_NAME_MAX large). 2331 * 2332 * Such an arrangement means over the lifetime of a kthread_t, t_name 2333 * is either NULL or has one value (the address of the buffer holding 2334 * the current thread name). The assumption is that most kthread_t 2335 * instances will not have a name assigned, so dynamically allocating 2336 * the memory should minimize the footprint of this feature, but by 2337 * having the buffer persist for the life of the thread, it simplifies 2338 * usage in highly constrained situations (e.g. dtrace). 2339 */ 2340 if (name != NULL && name[0] != '\0') { 2341 for (size_t i = 0; name[i] != '\0'; i++) { 2342 if (!isprint(name[i])) 2343 return (EINVAL); 2344 } 2345 2346 buf = kmem_zalloc(THREAD_NAME_MAX, KM_SLEEP); 2347 (void) strlcpy(buf, name, THREAD_NAME_MAX); 2348 } 2349 2350 mutex_enter(&ttoproc(t)->p_lock); 2351 if (t->t_name == NULL) { 2352 t->t_name = buf; 2353 } else { 2354 if (buf != NULL) { 2355 (void) strlcpy(t->t_name, name, THREAD_NAME_MAX); 2356 kmem_free(buf, THREAD_NAME_MAX); 2357 } else { 2358 bzero(t->t_name, THREAD_NAME_MAX); 2359 } 2360 } 2361 mutex_exit(&ttoproc(t)->p_lock); 2362 return (0); 2363 } 2364 2365 int 2366 thread_vsetname(kthread_t *t, const char *fmt, ...) 2367 { 2368 char name[THREAD_NAME_MAX]; 2369 va_list va; 2370 int rc; 2371 2372 va_start(va, fmt); 2373 rc = vsnprintf(name, sizeof (name), fmt, va); 2374 va_end(va); 2375 2376 if (rc < 0) 2377 return (EINVAL); 2378 2379 if (rc >= sizeof (name)) 2380 return (ENAMETOOLONG); 2381 2382 return (thread_setname(t, name)); 2383 } 2384