1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 28 #include <sys/types.h> 29 #include <sys/param.h> 30 #include <sys/sysmacros.h> 31 #include <sys/signal.h> 32 #include <sys/stack.h> 33 #include <sys/pcb.h> 34 #include <sys/user.h> 35 #include <sys/systm.h> 36 #include <sys/sysinfo.h> 37 #include <sys/errno.h> 38 #include <sys/cmn_err.h> 39 #include <sys/cred.h> 40 #include <sys/resource.h> 41 #include <sys/task.h> 42 #include <sys/project.h> 43 #include <sys/proc.h> 44 #include <sys/debug.h> 45 #include <sys/disp.h> 46 #include <sys/class.h> 47 #include <vm/seg_kmem.h> 48 #include <vm/seg_kp.h> 49 #include <sys/machlock.h> 50 #include <sys/kmem.h> 51 #include <sys/varargs.h> 52 #include <sys/turnstile.h> 53 #include <sys/poll.h> 54 #include <sys/vtrace.h> 55 #include <sys/callb.h> 56 #include <c2/audit.h> 57 #include <sys/tnf.h> 58 #include <sys/sobject.h> 59 #include <sys/cpupart.h> 60 #include <sys/pset.h> 61 #include <sys/door.h> 62 #include <sys/spl.h> 63 #include <sys/copyops.h> 64 #include <sys/rctl.h> 65 #include <sys/brand.h> 66 #include <sys/pool.h> 67 #include <sys/zone.h> 68 #include <sys/tsol/label.h> 69 #include <sys/tsol/tndb.h> 70 #include <sys/cpc_impl.h> 71 #include <sys/sdt.h> 72 #include <sys/reboot.h> 73 #include <sys/kdi.h> 74 #include <sys/schedctl.h> 75 #include <sys/waitq.h> 76 #include <sys/cpucaps.h> 77 #include <sys/kiconv.h> 78 79 struct kmem_cache *thread_cache; /* cache of free threads */ 80 struct kmem_cache *lwp_cache; /* cache of free lwps */ 81 struct kmem_cache *turnstile_cache; /* cache of free turnstiles */ 82 83 /* 84 * allthreads is only for use by kmem_readers. All kernel loops can use 85 * the current thread as a start/end point. 86 */ 87 static kthread_t *allthreads = &t0; /* circular list of all threads */ 88 89 static kcondvar_t reaper_cv; /* synchronization var */ 90 kthread_t *thread_deathrow; /* circular list of reapable threads */ 91 kthread_t *lwp_deathrow; /* circular list of reapable threads */ 92 kmutex_t reaplock; /* protects lwp and thread deathrows */ 93 int thread_reapcnt = 0; /* number of threads on deathrow */ 94 int lwp_reapcnt = 0; /* number of lwps on deathrow */ 95 int reaplimit = 16; /* delay reaping until reaplimit */ 96 97 thread_free_lock_t *thread_free_lock; 98 /* protects tick thread from reaper */ 99 100 extern int nthread; 101 102 id_t syscid; /* system scheduling class ID */ 103 void *segkp_thread; /* cookie for segkp pool */ 104 105 int lwp_cache_sz = 32; 106 int t_cache_sz = 8; 107 static kt_did_t next_t_id = 1; 108 109 /* Default mode for thread binding to CPUs and processor sets */ 110 int default_binding_mode = TB_ALLHARD; 111 112 /* 113 * Min/Max stack sizes for stack size parameters 114 */ 115 #define MAX_STKSIZE (32 * DEFAULTSTKSZ) 116 #define MIN_STKSIZE DEFAULTSTKSZ 117 118 /* 119 * default_stksize overrides lwp_default_stksize if it is set. 120 */ 121 int default_stksize; 122 int lwp_default_stksize; 123 124 static zone_key_t zone_thread_key; 125 126 unsigned int kmem_stackinfo; /* stackinfo feature on-off */ 127 kmem_stkinfo_t *kmem_stkinfo_log; /* stackinfo circular log */ 128 static kmutex_t kmem_stkinfo_lock; /* protects kmem_stkinfo_log */ 129 130 /* 131 * forward declarations for internal thread specific data (tsd) 132 */ 133 static void *tsd_realloc(void *, size_t, size_t); 134 135 void thread_reaper(void); 136 137 /* forward declarations for stackinfo feature */ 138 static void stkinfo_begin(kthread_t *); 139 static void stkinfo_end(kthread_t *); 140 static size_t stkinfo_percent(caddr_t, caddr_t, caddr_t); 141 142 /*ARGSUSED*/ 143 static int 144 turnstile_constructor(void *buf, void *cdrarg, int kmflags) 145 { 146 bzero(buf, sizeof (turnstile_t)); 147 return (0); 148 } 149 150 /*ARGSUSED*/ 151 static void 152 turnstile_destructor(void *buf, void *cdrarg) 153 { 154 turnstile_t *ts = buf; 155 156 ASSERT(ts->ts_free == NULL); 157 ASSERT(ts->ts_waiters == 0); 158 ASSERT(ts->ts_inheritor == NULL); 159 ASSERT(ts->ts_sleepq[0].sq_first == NULL); 160 ASSERT(ts->ts_sleepq[1].sq_first == NULL); 161 } 162 163 void 164 thread_init(void) 165 { 166 kthread_t *tp; 167 extern char sys_name[]; 168 extern void idle(); 169 struct cpu *cpu = CPU; 170 int i; 171 kmutex_t *lp; 172 173 mutex_init(&reaplock, NULL, MUTEX_SPIN, (void *)ipltospl(DISP_LEVEL)); 174 thread_free_lock = 175 kmem_alloc(sizeof (thread_free_lock_t) * THREAD_FREE_NUM, KM_SLEEP); 176 for (i = 0; i < THREAD_FREE_NUM; i++) { 177 lp = &thread_free_lock[i].tf_lock; 178 mutex_init(lp, NULL, MUTEX_DEFAULT, NULL); 179 } 180 181 #if defined(__i386) || defined(__amd64) 182 thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t), 183 PTR24_ALIGN, NULL, NULL, NULL, NULL, NULL, 0); 184 185 /* 186 * "struct _klwp" includes a "struct pcb", which includes a 187 * "struct fpu", which needs to be 16-byte aligned on amd64 188 * (and even on i386 for fxsave/fxrstor). 189 */ 190 lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t), 191 16, NULL, NULL, NULL, NULL, NULL, 0); 192 #else 193 /* 194 * Allocate thread structures from static_arena. This prevents 195 * issues where a thread tries to relocate its own thread 196 * structure and touches it after the mapping has been suspended. 197 */ 198 thread_cache = kmem_cache_create("thread_cache", sizeof (kthread_t), 199 PTR24_ALIGN, NULL, NULL, NULL, NULL, static_arena, 0); 200 201 lwp_stk_cache_init(); 202 203 lwp_cache = kmem_cache_create("lwp_cache", sizeof (klwp_t), 204 0, NULL, NULL, NULL, NULL, NULL, 0); 205 #endif 206 207 turnstile_cache = kmem_cache_create("turnstile_cache", 208 sizeof (turnstile_t), 0, 209 turnstile_constructor, turnstile_destructor, NULL, NULL, NULL, 0); 210 211 label_init(); 212 cred_init(); 213 214 /* 215 * Initialize various resource management facilities. 216 */ 217 rctl_init(); 218 cpucaps_init(); 219 /* 220 * Zone_init() should be called before project_init() so that project ID 221 * for the first project is initialized correctly. 222 */ 223 zone_init(); 224 project_init(); 225 brand_init(); 226 kiconv_init(); 227 task_init(); 228 tcache_init(); 229 pool_init(); 230 231 curthread->t_ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP); 232 233 /* 234 * Originally, we had two parameters to set default stack 235 * size: one for lwp's (lwp_default_stksize), and one for 236 * kernel-only threads (DEFAULTSTKSZ, a.k.a. _defaultstksz). 237 * Now we have a third parameter that overrides both if it is 238 * set to a legal stack size, called default_stksize. 239 */ 240 241 if (default_stksize == 0) { 242 default_stksize = DEFAULTSTKSZ; 243 } else if (default_stksize % PAGESIZE != 0 || 244 default_stksize > MAX_STKSIZE || 245 default_stksize < MIN_STKSIZE) { 246 cmn_err(CE_WARN, "Illegal stack size. Using %d", 247 (int)DEFAULTSTKSZ); 248 default_stksize = DEFAULTSTKSZ; 249 } else { 250 lwp_default_stksize = default_stksize; 251 } 252 253 if (lwp_default_stksize == 0) { 254 lwp_default_stksize = default_stksize; 255 } else if (lwp_default_stksize % PAGESIZE != 0 || 256 lwp_default_stksize > MAX_STKSIZE || 257 lwp_default_stksize < MIN_STKSIZE) { 258 cmn_err(CE_WARN, "Illegal stack size. Using %d", 259 default_stksize); 260 lwp_default_stksize = default_stksize; 261 } 262 263 segkp_lwp = segkp_cache_init(segkp, lwp_cache_sz, 264 lwp_default_stksize, 265 (KPD_NOWAIT | KPD_HASREDZONE | KPD_LOCKED)); 266 267 segkp_thread = segkp_cache_init(segkp, t_cache_sz, 268 default_stksize, KPD_HASREDZONE | KPD_LOCKED | KPD_NO_ANON); 269 270 (void) getcid(sys_name, &syscid); 271 curthread->t_cid = syscid; /* current thread is t0 */ 272 273 /* 274 * Set up the first CPU's idle thread. 275 * It runs whenever the CPU has nothing worthwhile to do. 276 */ 277 tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_STOPPED, -1); 278 cpu->cpu_idle_thread = tp; 279 tp->t_preempt = 1; 280 tp->t_disp_queue = cpu->cpu_disp; 281 ASSERT(tp->t_disp_queue != NULL); 282 tp->t_bound_cpu = cpu; 283 tp->t_affinitycnt = 1; 284 285 /* 286 * Registering a thread in the callback table is usually 287 * done in the initialization code of the thread. In this 288 * case, we do it right after thread creation to avoid 289 * blocking idle thread while registering itself. It also 290 * avoids the possibility of reregistration in case a CPU 291 * restarts its idle thread. 292 */ 293 CALLB_CPR_INIT_SAFE(tp, "idle"); 294 295 /* 296 * Create the thread_reaper daemon. From this point on, exited 297 * threads will get reaped. 298 */ 299 (void) thread_create(NULL, 0, (void (*)())thread_reaper, 300 NULL, 0, &p0, TS_RUN, minclsyspri); 301 302 /* 303 * Finish initializing the kernel memory allocator now that 304 * thread_create() is available. 305 */ 306 kmem_thread_init(); 307 308 if (boothowto & RB_DEBUG) 309 kdi_dvec_thravail(); 310 } 311 312 /* 313 * Create a thread. 314 * 315 * thread_create() blocks for memory if necessary. It never fails. 316 * 317 * If stk is NULL, the thread is created at the base of the stack 318 * and cannot be swapped. 319 */ 320 kthread_t * 321 thread_create( 322 caddr_t stk, 323 size_t stksize, 324 void (*proc)(), 325 void *arg, 326 size_t len, 327 proc_t *pp, 328 int state, 329 pri_t pri) 330 { 331 kthread_t *t; 332 extern struct classfuncs sys_classfuncs; 333 turnstile_t *ts; 334 335 /* 336 * Every thread keeps a turnstile around in case it needs to block. 337 * The only reason the turnstile is not simply part of the thread 338 * structure is that we may have to break the association whenever 339 * more than one thread blocks on a given synchronization object. 340 * From a memory-management standpoint, turnstiles are like the 341 * "attached mblks" that hang off dblks in the streams allocator. 342 */ 343 ts = kmem_cache_alloc(turnstile_cache, KM_SLEEP); 344 345 if (stk == NULL) { 346 /* 347 * alloc both thread and stack in segkp chunk 348 */ 349 350 if (stksize < default_stksize) 351 stksize = default_stksize; 352 353 if (stksize == default_stksize) { 354 stk = (caddr_t)segkp_cache_get(segkp_thread); 355 } else { 356 stksize = roundup(stksize, PAGESIZE); 357 stk = (caddr_t)segkp_get(segkp, stksize, 358 (KPD_HASREDZONE | KPD_NO_ANON | KPD_LOCKED)); 359 } 360 361 ASSERT(stk != NULL); 362 363 /* 364 * The machine-dependent mutex code may require that 365 * thread pointers (since they may be used for mutex owner 366 * fields) have certain alignment requirements. 367 * PTR24_ALIGN is the size of the alignment quanta. 368 * XXX - assumes stack grows toward low addresses. 369 */ 370 if (stksize <= sizeof (kthread_t) + PTR24_ALIGN) 371 cmn_err(CE_PANIC, "thread_create: proposed stack size" 372 " too small to hold thread."); 373 #ifdef STACK_GROWTH_DOWN 374 stksize -= SA(sizeof (kthread_t) + PTR24_ALIGN - 1); 375 stksize &= -PTR24_ALIGN; /* make thread aligned */ 376 t = (kthread_t *)(stk + stksize); 377 bzero(t, sizeof (kthread_t)); 378 if (audit_active) 379 audit_thread_create(t); 380 t->t_stk = stk + stksize; 381 t->t_stkbase = stk; 382 #else /* stack grows to larger addresses */ 383 stksize -= SA(sizeof (kthread_t)); 384 t = (kthread_t *)(stk); 385 bzero(t, sizeof (kthread_t)); 386 t->t_stk = stk + sizeof (kthread_t); 387 t->t_stkbase = stk + stksize + sizeof (kthread_t); 388 #endif /* STACK_GROWTH_DOWN */ 389 t->t_flag |= T_TALLOCSTK; 390 t->t_swap = stk; 391 } else { 392 t = kmem_cache_alloc(thread_cache, KM_SLEEP); 393 bzero(t, sizeof (kthread_t)); 394 ASSERT(((uintptr_t)t & (PTR24_ALIGN - 1)) == 0); 395 if (audit_active) 396 audit_thread_create(t); 397 /* 398 * Initialize t_stk to the kernel stack pointer to use 399 * upon entry to the kernel 400 */ 401 #ifdef STACK_GROWTH_DOWN 402 t->t_stk = stk + stksize; 403 t->t_stkbase = stk; 404 #else 405 t->t_stk = stk; /* 3b2-like */ 406 t->t_stkbase = stk + stksize; 407 #endif /* STACK_GROWTH_DOWN */ 408 } 409 410 if (kmem_stackinfo != 0) { 411 stkinfo_begin(t); 412 } 413 414 /* set default stack flag */ 415 if (stksize == lwp_default_stksize) 416 t->t_flag |= T_DFLTSTK; 417 418 t->t_ts = ts; 419 420 /* 421 * p_cred could be NULL if it thread_create is called before cred_init 422 * is called in main. 423 */ 424 mutex_enter(&pp->p_crlock); 425 if (pp->p_cred) 426 crhold(t->t_cred = pp->p_cred); 427 mutex_exit(&pp->p_crlock); 428 t->t_start = gethrestime_sec(); 429 t->t_startpc = proc; 430 t->t_procp = pp; 431 t->t_clfuncs = &sys_classfuncs.thread; 432 t->t_cid = syscid; 433 t->t_pri = pri; 434 t->t_stime = lbolt; 435 t->t_schedflag = TS_LOAD | TS_DONT_SWAP; 436 t->t_bind_cpu = PBIND_NONE; 437 t->t_bindflag = (uchar_t)default_binding_mode; 438 t->t_bind_pset = PS_NONE; 439 t->t_plockp = &pp->p_lock; 440 t->t_copyops = NULL; 441 t->t_taskq = NULL; 442 t->t_anttime = 0; 443 t->t_hatdepth = 0; 444 445 t->t_dtrace_vtime = 1; /* assure vtimestamp is always non-zero */ 446 447 CPU_STATS_ADDQ(CPU, sys, nthreads, 1); 448 #ifndef NPROBE 449 /* Kernel probe */ 450 tnf_thread_create(t); 451 #endif /* NPROBE */ 452 LOCK_INIT_CLEAR(&t->t_lock); 453 454 /* 455 * Callers who give us a NULL proc must do their own 456 * stack initialization. e.g. lwp_create() 457 */ 458 if (proc != NULL) { 459 t->t_stk = thread_stk_init(t->t_stk); 460 thread_load(t, proc, arg, len); 461 } 462 463 /* 464 * Put a hold on project0. If this thread is actually in a 465 * different project, then t_proj will be changed later in 466 * lwp_create(). All kernel-only threads must be in project 0. 467 */ 468 t->t_proj = project_hold(proj0p); 469 470 lgrp_affinity_init(&t->t_lgrp_affinity); 471 472 mutex_enter(&pidlock); 473 nthread++; 474 t->t_did = next_t_id++; 475 t->t_prev = curthread->t_prev; 476 t->t_next = curthread; 477 478 /* 479 * Add the thread to the list of all threads, and initialize 480 * its t_cpu pointer. We need to block preemption since 481 * cpu_offline walks the thread list looking for threads 482 * with t_cpu pointing to the CPU being offlined. We want 483 * to make sure that the list is consistent and that if t_cpu 484 * is set, the thread is on the list. 485 */ 486 kpreempt_disable(); 487 curthread->t_prev->t_next = t; 488 curthread->t_prev = t; 489 490 /* 491 * Threads should never have a NULL t_cpu pointer so assign it 492 * here. If the thread is being created with state TS_RUN a 493 * better CPU may be chosen when it is placed on the run queue. 494 * 495 * We need to keep kernel preemption disabled when setting all 496 * three fields to keep them in sync. Also, always create in 497 * the default partition since that's where kernel threads go 498 * (if this isn't a kernel thread, t_cpupart will be changed 499 * in lwp_create before setting the thread runnable). 500 */ 501 t->t_cpupart = &cp_default; 502 503 /* 504 * For now, affiliate this thread with the root lgroup. 505 * Since the kernel does not (presently) allocate its memory 506 * in a locality aware fashion, the root is an appropriate home. 507 * If this thread is later associated with an lwp, it will have 508 * it's lgroup re-assigned at that time. 509 */ 510 lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1); 511 512 /* 513 * Inherit the current cpu. If this cpu isn't part of the chosen 514 * lgroup, a new cpu will be chosen by cpu_choose when the thread 515 * is ready to run. 516 */ 517 if (CPU->cpu_part == &cp_default) 518 t->t_cpu = CPU; 519 else 520 t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl, 521 t->t_pri, NULL); 522 523 t->t_disp_queue = t->t_cpu->cpu_disp; 524 kpreempt_enable(); 525 526 /* 527 * Initialize thread state and the dispatcher lock pointer. 528 * Need to hold onto pidlock to block allthreads walkers until 529 * the state is set. 530 */ 531 switch (state) { 532 case TS_RUN: 533 curthread->t_oldspl = splhigh(); /* get dispatcher spl */ 534 THREAD_SET_STATE(t, TS_STOPPED, &transition_lock); 535 CL_SETRUN(t); 536 thread_unlock(t); 537 break; 538 539 case TS_ONPROC: 540 THREAD_ONPROC(t, t->t_cpu); 541 break; 542 543 case TS_FREE: 544 /* 545 * Free state will be used for intr threads. 546 * The interrupt routine must set the thread dispatcher 547 * lock pointer (t_lockp) if starting on a CPU 548 * other than the current one. 549 */ 550 THREAD_FREEINTR(t, CPU); 551 break; 552 553 case TS_STOPPED: 554 THREAD_SET_STATE(t, TS_STOPPED, &stop_lock); 555 break; 556 557 default: /* TS_SLEEP, TS_ZOMB or TS_TRANS */ 558 cmn_err(CE_PANIC, "thread_create: invalid state %d", state); 559 } 560 mutex_exit(&pidlock); 561 return (t); 562 } 563 564 /* 565 * Move thread to project0 and take care of project reference counters. 566 */ 567 void 568 thread_rele(kthread_t *t) 569 { 570 kproject_t *kpj; 571 572 thread_lock(t); 573 574 ASSERT(t == curthread || t->t_state == TS_FREE || t->t_procp == &p0); 575 kpj = ttoproj(t); 576 t->t_proj = proj0p; 577 578 thread_unlock(t); 579 580 if (kpj != proj0p) { 581 project_rele(kpj); 582 (void) project_hold(proj0p); 583 } 584 } 585 586 void 587 thread_exit(void) 588 { 589 kthread_t *t = curthread; 590 591 if ((t->t_proc_flag & TP_ZTHREAD) != 0) 592 cmn_err(CE_PANIC, "thread_exit: zthread_exit() not called"); 593 594 tsd_exit(); /* Clean up this thread's TSD */ 595 596 kcpc_passivate(); /* clean up performance counter state */ 597 598 /* 599 * No kernel thread should have called poll() without arranging 600 * calling pollcleanup() here. 601 */ 602 ASSERT(t->t_pollstate == NULL); 603 ASSERT(t->t_schedctl == NULL); 604 if (t->t_door) 605 door_slam(); /* in case thread did an upcall */ 606 607 #ifndef NPROBE 608 /* Kernel probe */ 609 if (t->t_tnf_tpdp) 610 tnf_thread_exit(); 611 #endif /* NPROBE */ 612 613 thread_rele(t); 614 t->t_preempt++; 615 616 /* 617 * remove thread from the all threads list so that 618 * death-row can use the same pointers. 619 */ 620 mutex_enter(&pidlock); 621 t->t_next->t_prev = t->t_prev; 622 t->t_prev->t_next = t->t_next; 623 ASSERT(allthreads != t); /* t0 never exits */ 624 cv_broadcast(&t->t_joincv); /* wake up anyone in thread_join */ 625 mutex_exit(&pidlock); 626 627 if (t->t_ctx != NULL) 628 exitctx(t); 629 if (t->t_procp->p_pctx != NULL) 630 exitpctx(t->t_procp); 631 632 if (kmem_stackinfo != 0) { 633 stkinfo_end(t); 634 } 635 636 t->t_state = TS_ZOMB; /* set zombie thread */ 637 638 swtch_from_zombie(); /* give up the CPU */ 639 /* NOTREACHED */ 640 } 641 642 /* 643 * Check to see if the specified thread is active (defined as being on 644 * the thread list). This is certainly a slow way to do this; if there's 645 * ever a reason to speed it up, we could maintain a hash table of active 646 * threads indexed by their t_did. 647 */ 648 static kthread_t * 649 did_to_thread(kt_did_t tid) 650 { 651 kthread_t *t; 652 653 ASSERT(MUTEX_HELD(&pidlock)); 654 for (t = curthread->t_next; t != curthread; t = t->t_next) { 655 if (t->t_did == tid) 656 break; 657 } 658 if (t->t_did == tid) 659 return (t); 660 else 661 return (NULL); 662 } 663 664 /* 665 * Wait for specified thread to exit. Returns immediately if the thread 666 * could not be found, meaning that it has either already exited or never 667 * existed. 668 */ 669 void 670 thread_join(kt_did_t tid) 671 { 672 kthread_t *t; 673 674 ASSERT(tid != curthread->t_did); 675 ASSERT(tid != t0.t_did); 676 677 mutex_enter(&pidlock); 678 /* 679 * Make sure we check that the thread is on the thread list 680 * before blocking on it; otherwise we could end up blocking on 681 * a cv that's already been freed. In other words, don't cache 682 * the thread pointer across calls to cv_wait. 683 * 684 * The choice of loop invariant means that whenever a thread 685 * is taken off the allthreads list, a cv_broadcast must be 686 * performed on that thread's t_joincv to wake up any waiters. 687 * The broadcast doesn't have to happen right away, but it 688 * shouldn't be postponed indefinitely (e.g., by doing it in 689 * thread_free which may only be executed when the deathrow 690 * queue is processed. 691 */ 692 while (t = did_to_thread(tid)) 693 cv_wait(&t->t_joincv, &pidlock); 694 mutex_exit(&pidlock); 695 } 696 697 void 698 thread_free_prevent(kthread_t *t) 699 { 700 kmutex_t *lp; 701 702 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock; 703 mutex_enter(lp); 704 } 705 706 void 707 thread_free_allow(kthread_t *t) 708 { 709 kmutex_t *lp; 710 711 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock; 712 mutex_exit(lp); 713 } 714 715 static void 716 thread_free_barrier(kthread_t *t) 717 { 718 kmutex_t *lp; 719 720 lp = &thread_free_lock[THREAD_FREE_HASH(t)].tf_lock; 721 mutex_enter(lp); 722 mutex_exit(lp); 723 } 724 725 void 726 thread_free(kthread_t *t) 727 { 728 ASSERT(t != &t0 && t->t_state == TS_FREE); 729 ASSERT(t->t_door == NULL); 730 ASSERT(t->t_schedctl == NULL); 731 ASSERT(t->t_pollstate == NULL); 732 733 t->t_pri = 0; 734 t->t_pc = 0; 735 t->t_sp = 0; 736 t->t_wchan0 = NULL; 737 t->t_wchan = NULL; 738 if (t->t_cred != NULL) { 739 crfree(t->t_cred); 740 t->t_cred = 0; 741 } 742 if (t->t_pdmsg) { 743 kmem_free(t->t_pdmsg, strlen(t->t_pdmsg) + 1); 744 t->t_pdmsg = NULL; 745 } 746 if (audit_active) 747 audit_thread_free(t); 748 #ifndef NPROBE 749 if (t->t_tnf_tpdp) 750 tnf_thread_free(t); 751 #endif /* NPROBE */ 752 if (t->t_cldata) { 753 CL_EXITCLASS(t->t_cid, (caddr_t *)t->t_cldata); 754 } 755 if (t->t_rprof != NULL) { 756 kmem_free(t->t_rprof, sizeof (*t->t_rprof)); 757 t->t_rprof = NULL; 758 } 759 t->t_lockp = NULL; /* nothing should try to lock this thread now */ 760 if (t->t_lwp) 761 lwp_freeregs(t->t_lwp, 0); 762 if (t->t_ctx) 763 freectx(t, 0); 764 t->t_stk = NULL; 765 if (t->t_lwp) 766 lwp_stk_fini(t->t_lwp); 767 lock_clear(&t->t_lock); 768 769 if (t->t_ts->ts_waiters > 0) 770 panic("thread_free: turnstile still active"); 771 772 kmem_cache_free(turnstile_cache, t->t_ts); 773 774 free_afd(&t->t_activefd); 775 776 /* 777 * Barrier for the tick accounting code. The tick accounting code 778 * holds this lock to keep the thread from going away while it's 779 * looking at it. 780 */ 781 thread_free_barrier(t); 782 783 ASSERT(ttoproj(t) == proj0p); 784 project_rele(ttoproj(t)); 785 786 lgrp_affinity_free(&t->t_lgrp_affinity); 787 788 /* 789 * Free thread struct and its stack. 790 */ 791 if (t->t_flag & T_TALLOCSTK) { 792 /* thread struct is embedded in stack */ 793 segkp_release(segkp, t->t_swap); 794 mutex_enter(&pidlock); 795 nthread--; 796 mutex_exit(&pidlock); 797 } else { 798 if (t->t_swap) { 799 segkp_release(segkp, t->t_swap); 800 t->t_swap = NULL; 801 } 802 if (t->t_lwp) { 803 kmem_cache_free(lwp_cache, t->t_lwp); 804 t->t_lwp = NULL; 805 } 806 mutex_enter(&pidlock); 807 nthread--; 808 mutex_exit(&pidlock); 809 kmem_cache_free(thread_cache, t); 810 } 811 } 812 813 /* 814 * Removes threads associated with the given zone from a deathrow queue. 815 * tp is a pointer to the head of the deathrow queue, and countp is a 816 * pointer to the current deathrow count. Returns a linked list of 817 * threads removed from the list. 818 */ 819 static kthread_t * 820 thread_zone_cleanup(kthread_t **tp, int *countp, zoneid_t zoneid) 821 { 822 kthread_t *tmp, *list = NULL; 823 cred_t *cr; 824 825 ASSERT(MUTEX_HELD(&reaplock)); 826 while (*tp != NULL) { 827 if ((cr = (*tp)->t_cred) != NULL && crgetzoneid(cr) == zoneid) { 828 tmp = *tp; 829 *tp = tmp->t_forw; 830 tmp->t_forw = list; 831 list = tmp; 832 (*countp)--; 833 } else { 834 tp = &(*tp)->t_forw; 835 } 836 } 837 return (list); 838 } 839 840 static void 841 thread_reap_list(kthread_t *t) 842 { 843 kthread_t *next; 844 845 while (t != NULL) { 846 next = t->t_forw; 847 thread_free(t); 848 t = next; 849 } 850 } 851 852 /* ARGSUSED */ 853 static void 854 thread_zone_destroy(zoneid_t zoneid, void *unused) 855 { 856 kthread_t *t, *l; 857 858 mutex_enter(&reaplock); 859 /* 860 * Pull threads and lwps associated with zone off deathrow lists. 861 */ 862 t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid); 863 l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid); 864 mutex_exit(&reaplock); 865 866 /* 867 * Guard against race condition in mutex_owner_running: 868 * thread=owner(mutex) 869 * <interrupt> 870 * thread exits mutex 871 * thread exits 872 * thread reaped 873 * thread struct freed 874 * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE. 875 * A cross call to all cpus will cause the interrupt handler 876 * to reset the PC if it is in mutex_owner_running, refreshing 877 * stale thread pointers. 878 */ 879 mutex_sync(); /* sync with mutex code */ 880 881 /* 882 * Reap threads 883 */ 884 thread_reap_list(t); 885 886 /* 887 * Reap lwps 888 */ 889 thread_reap_list(l); 890 } 891 892 /* 893 * cleanup zombie threads that are on deathrow. 894 */ 895 void 896 thread_reaper() 897 { 898 kthread_t *t, *l; 899 callb_cpr_t cprinfo; 900 901 /* 902 * Register callback to clean up threads when zone is destroyed. 903 */ 904 zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy); 905 906 CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper"); 907 for (;;) { 908 mutex_enter(&reaplock); 909 while (thread_deathrow == NULL && lwp_deathrow == NULL) { 910 CALLB_CPR_SAFE_BEGIN(&cprinfo); 911 cv_wait(&reaper_cv, &reaplock); 912 CALLB_CPR_SAFE_END(&cprinfo, &reaplock); 913 } 914 /* 915 * mutex_sync() needs to be called when reaping, but 916 * not too often. We limit reaping rate to once 917 * per second. Reaplimit is max rate at which threads can 918 * be freed. Does not impact thread destruction/creation. 919 */ 920 t = thread_deathrow; 921 l = lwp_deathrow; 922 thread_deathrow = NULL; 923 lwp_deathrow = NULL; 924 thread_reapcnt = 0; 925 lwp_reapcnt = 0; 926 mutex_exit(&reaplock); 927 928 /* 929 * Guard against race condition in mutex_owner_running: 930 * thread=owner(mutex) 931 * <interrupt> 932 * thread exits mutex 933 * thread exits 934 * thread reaped 935 * thread struct freed 936 * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE. 937 * A cross call to all cpus will cause the interrupt handler 938 * to reset the PC if it is in mutex_owner_running, refreshing 939 * stale thread pointers. 940 */ 941 mutex_sync(); /* sync with mutex code */ 942 /* 943 * Reap threads 944 */ 945 thread_reap_list(t); 946 947 /* 948 * Reap lwps 949 */ 950 thread_reap_list(l); 951 delay(hz); 952 } 953 } 954 955 /* 956 * This is called by lwpcreate, etc.() to put a lwp_deathrow thread onto 957 * thread_deathrow. The thread's state is changed already TS_FREE to indicate 958 * that is reapable. The thread already holds the reaplock, and was already 959 * freed. 960 */ 961 void 962 reapq_move_lq_to_tq(kthread_t *t) 963 { 964 ASSERT(t->t_state == TS_FREE); 965 ASSERT(MUTEX_HELD(&reaplock)); 966 t->t_forw = thread_deathrow; 967 thread_deathrow = t; 968 thread_reapcnt++; 969 if (lwp_reapcnt + thread_reapcnt > reaplimit) 970 cv_signal(&reaper_cv); /* wake the reaper */ 971 } 972 973 /* 974 * This is called by resume() to put a zombie thread onto deathrow. 975 * The thread's state is changed to TS_FREE to indicate that is reapable. 976 * This is called from the idle thread so it must not block - just spin. 977 */ 978 void 979 reapq_add(kthread_t *t) 980 { 981 mutex_enter(&reaplock); 982 983 /* 984 * lwp_deathrow contains only threads with lwp linkage 985 * that are of the default stacksize. Anything else goes 986 * on thread_deathrow. 987 */ 988 if (ttolwp(t) && (t->t_flag & T_DFLTSTK)) { 989 t->t_forw = lwp_deathrow; 990 lwp_deathrow = t; 991 lwp_reapcnt++; 992 } else { 993 t->t_forw = thread_deathrow; 994 thread_deathrow = t; 995 thread_reapcnt++; 996 } 997 if (lwp_reapcnt + thread_reapcnt > reaplimit) 998 cv_signal(&reaper_cv); /* wake the reaper */ 999 t->t_state = TS_FREE; 1000 lock_clear(&t->t_lock); 1001 1002 /* 1003 * Before we return, we need to grab and drop the thread lock for 1004 * the dead thread. At this point, the current thread is the idle 1005 * thread, and the dead thread's CPU lock points to the current 1006 * CPU -- and we must grab and drop the lock to synchronize with 1007 * a racing thread walking a blocking chain that the zombie thread 1008 * was recently in. By this point, that blocking chain is (by 1009 * definition) stale: the dead thread is not holding any locks, and 1010 * is therefore not in any blocking chains -- but if we do not regrab 1011 * our lock before freeing the dead thread's data structures, the 1012 * thread walking the (stale) blocking chain will die on memory 1013 * corruption when it attempts to drop the dead thread's lock. We 1014 * only need do this once because there is no way for the dead thread 1015 * to ever again be on a blocking chain: once we have grabbed and 1016 * dropped the thread lock, we are guaranteed that anyone that could 1017 * have seen this thread in a blocking chain can no longer see it. 1018 */ 1019 thread_lock(t); 1020 thread_unlock(t); 1021 1022 mutex_exit(&reaplock); 1023 } 1024 1025 /* 1026 * Install thread context ops for the current thread. 1027 */ 1028 void 1029 installctx( 1030 kthread_t *t, 1031 void *arg, 1032 void (*save)(void *), 1033 void (*restore)(void *), 1034 void (*fork)(void *, void *), 1035 void (*lwp_create)(void *, void *), 1036 void (*exit)(void *), 1037 void (*free)(void *, int)) 1038 { 1039 struct ctxop *ctx; 1040 1041 ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP); 1042 ctx->save_op = save; 1043 ctx->restore_op = restore; 1044 ctx->fork_op = fork; 1045 ctx->lwp_create_op = lwp_create; 1046 ctx->exit_op = exit; 1047 ctx->free_op = free; 1048 ctx->arg = arg; 1049 ctx->next = t->t_ctx; 1050 t->t_ctx = ctx; 1051 } 1052 1053 /* 1054 * Remove the thread context ops from a thread. 1055 */ 1056 int 1057 removectx( 1058 kthread_t *t, 1059 void *arg, 1060 void (*save)(void *), 1061 void (*restore)(void *), 1062 void (*fork)(void *, void *), 1063 void (*lwp_create)(void *, void *), 1064 void (*exit)(void *), 1065 void (*free)(void *, int)) 1066 { 1067 struct ctxop *ctx, *prev_ctx; 1068 1069 /* 1070 * The incoming kthread_t (which is the thread for which the 1071 * context ops will be removed) should be one of the following: 1072 * 1073 * a) the current thread, 1074 * 1075 * b) a thread of a process that's being forked (SIDL), 1076 * 1077 * c) a thread that belongs to the same process as the current 1078 * thread and for which the current thread is the agent thread, 1079 * 1080 * d) a thread that is TS_STOPPED which is indicative of it 1081 * being (if curthread is not an agent) a thread being created 1082 * as part of an lwp creation. 1083 */ 1084 ASSERT(t == curthread || ttoproc(t)->p_stat == SIDL || 1085 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1086 1087 /* 1088 * Serialize modifications to t->t_ctx to prevent the agent thread 1089 * and the target thread from racing with each other during lwp exit. 1090 */ 1091 mutex_enter(&t->t_ctx_lock); 1092 prev_ctx = NULL; 1093 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) { 1094 if (ctx->save_op == save && ctx->restore_op == restore && 1095 ctx->fork_op == fork && ctx->lwp_create_op == lwp_create && 1096 ctx->exit_op == exit && ctx->free_op == free && 1097 ctx->arg == arg) { 1098 if (prev_ctx) 1099 prev_ctx->next = ctx->next; 1100 else 1101 t->t_ctx = ctx->next; 1102 mutex_exit(&t->t_ctx_lock); 1103 if (ctx->free_op != NULL) 1104 (ctx->free_op)(ctx->arg, 0); 1105 kmem_free(ctx, sizeof (struct ctxop)); 1106 return (1); 1107 } 1108 prev_ctx = ctx; 1109 } 1110 mutex_exit(&t->t_ctx_lock); 1111 1112 return (0); 1113 } 1114 1115 void 1116 savectx(kthread_t *t) 1117 { 1118 struct ctxop *ctx; 1119 1120 ASSERT(t == curthread); 1121 for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next) 1122 if (ctx->save_op != NULL) 1123 (ctx->save_op)(ctx->arg); 1124 } 1125 1126 void 1127 restorectx(kthread_t *t) 1128 { 1129 struct ctxop *ctx; 1130 1131 ASSERT(t == curthread); 1132 for (ctx = t->t_ctx; ctx != 0; ctx = ctx->next) 1133 if (ctx->restore_op != NULL) 1134 (ctx->restore_op)(ctx->arg); 1135 } 1136 1137 void 1138 forkctx(kthread_t *t, kthread_t *ct) 1139 { 1140 struct ctxop *ctx; 1141 1142 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1143 if (ctx->fork_op != NULL) 1144 (ctx->fork_op)(t, ct); 1145 } 1146 1147 /* 1148 * Note that this operator is only invoked via the _lwp_create 1149 * system call. The system may have other reasons to create lwps 1150 * e.g. the agent lwp or the doors unreferenced lwp. 1151 */ 1152 void 1153 lwp_createctx(kthread_t *t, kthread_t *ct) 1154 { 1155 struct ctxop *ctx; 1156 1157 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1158 if (ctx->lwp_create_op != NULL) 1159 (ctx->lwp_create_op)(t, ct); 1160 } 1161 1162 /* 1163 * exitctx is called from thread_exit() and lwp_exit() to perform any actions 1164 * needed when the thread/LWP leaves the processor for the last time. This 1165 * routine is not intended to deal with freeing memory; freectx() is used for 1166 * that purpose during thread_free(). This routine is provided to allow for 1167 * clean-up that can't wait until thread_free(). 1168 */ 1169 void 1170 exitctx(kthread_t *t) 1171 { 1172 struct ctxop *ctx; 1173 1174 for (ctx = t->t_ctx; ctx != NULL; ctx = ctx->next) 1175 if (ctx->exit_op != NULL) 1176 (ctx->exit_op)(t); 1177 } 1178 1179 /* 1180 * freectx is called from thread_free() and exec() to get 1181 * rid of old thread context ops. 1182 */ 1183 void 1184 freectx(kthread_t *t, int isexec) 1185 { 1186 struct ctxop *ctx; 1187 1188 while ((ctx = t->t_ctx) != NULL) { 1189 t->t_ctx = ctx->next; 1190 if (ctx->free_op != NULL) 1191 (ctx->free_op)(ctx->arg, isexec); 1192 kmem_free(ctx, sizeof (struct ctxop)); 1193 } 1194 } 1195 1196 /* 1197 * freectx_ctx is called from lwp_create() when lwp is reused from 1198 * lwp_deathrow and its thread structure is added to thread_deathrow. 1199 * The thread structure to which this ctx was attached may be already 1200 * freed by the thread reaper so free_op implementations shouldn't rely 1201 * on thread structure to which this ctx was attached still being around. 1202 */ 1203 void 1204 freectx_ctx(struct ctxop *ctx) 1205 { 1206 struct ctxop *nctx; 1207 1208 ASSERT(ctx != NULL); 1209 1210 do { 1211 nctx = ctx->next; 1212 if (ctx->free_op != NULL) 1213 (ctx->free_op)(ctx->arg, 0); 1214 kmem_free(ctx, sizeof (struct ctxop)); 1215 } while ((ctx = nctx) != NULL); 1216 } 1217 1218 /* 1219 * Set the thread running; arrange for it to be swapped in if necessary. 1220 */ 1221 void 1222 setrun_locked(kthread_t *t) 1223 { 1224 ASSERT(THREAD_LOCK_HELD(t)); 1225 if (t->t_state == TS_SLEEP) { 1226 /* 1227 * Take off sleep queue. 1228 */ 1229 SOBJ_UNSLEEP(t->t_sobj_ops, t); 1230 } else if (t->t_state & (TS_RUN | TS_ONPROC)) { 1231 /* 1232 * Already on dispatcher queue. 1233 */ 1234 return; 1235 } else if (t->t_state == TS_WAIT) { 1236 waitq_setrun(t); 1237 } else if (t->t_state == TS_STOPPED) { 1238 /* 1239 * All of the sending of SIGCONT (TC_XSTART) and /proc 1240 * (TC_PSTART) and lwp_continue() (TC_CSTART) must have 1241 * requested that the thread be run. 1242 * Just calling setrun() is not sufficient to set a stopped 1243 * thread running. TP_TXSTART is always set if the thread 1244 * is not stopped by a jobcontrol stop signal. 1245 * TP_TPSTART is always set if /proc is not controlling it. 1246 * TP_TCSTART is always set if lwp_suspend() didn't stop it. 1247 * The thread won't be stopped unless one of these 1248 * three mechanisms did it. 1249 * 1250 * These flags must be set before calling setrun_locked(t). 1251 * They can't be passed as arguments because the streams 1252 * code calls setrun() indirectly and the mechanism for 1253 * doing so admits only one argument. Note that the 1254 * thread must be locked in order to change t_schedflags. 1255 */ 1256 if ((t->t_schedflag & TS_ALLSTART) != TS_ALLSTART) 1257 return; 1258 /* 1259 * Process is no longer stopped (a thread is running). 1260 */ 1261 t->t_whystop = 0; 1262 t->t_whatstop = 0; 1263 /* 1264 * Strictly speaking, we do not have to clear these 1265 * flags here; they are cleared on entry to stop(). 1266 * However, they are confusing when doing kernel 1267 * debugging or when they are revealed by ps(1). 1268 */ 1269 t->t_schedflag &= ~TS_ALLSTART; 1270 THREAD_TRANSITION(t); /* drop stopped-thread lock */ 1271 ASSERT(t->t_lockp == &transition_lock); 1272 ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); 1273 /* 1274 * Let the class put the process on the dispatcher queue. 1275 */ 1276 CL_SETRUN(t); 1277 } 1278 } 1279 1280 void 1281 setrun(kthread_t *t) 1282 { 1283 thread_lock(t); 1284 setrun_locked(t); 1285 thread_unlock(t); 1286 } 1287 1288 /* 1289 * Unpin an interrupted thread. 1290 * When an interrupt occurs, the interrupt is handled on the stack 1291 * of an interrupt thread, taken from a pool linked to the CPU structure. 1292 * 1293 * When swtch() is switching away from an interrupt thread because it 1294 * blocked or was preempted, this routine is called to complete the 1295 * saving of the interrupted thread state, and returns the interrupted 1296 * thread pointer so it may be resumed. 1297 * 1298 * Called by swtch() only at high spl. 1299 */ 1300 kthread_t * 1301 thread_unpin() 1302 { 1303 kthread_t *t = curthread; /* current thread */ 1304 kthread_t *itp; /* interrupted thread */ 1305 int i; /* interrupt level */ 1306 extern int intr_passivate(); 1307 1308 ASSERT(t->t_intr != NULL); 1309 1310 itp = t->t_intr; /* interrupted thread */ 1311 t->t_intr = NULL; /* clear interrupt ptr */ 1312 1313 /* 1314 * Get state from interrupt thread for the one 1315 * it interrupted. 1316 */ 1317 1318 i = intr_passivate(t, itp); 1319 1320 TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE, 1321 "intr_passivate:level %d curthread %p (%T) ithread %p (%T)", 1322 i, t, t, itp, itp); 1323 1324 /* 1325 * Dissociate the current thread from the interrupted thread's LWP. 1326 */ 1327 t->t_lwp = NULL; 1328 1329 /* 1330 * Interrupt handlers above the level that spinlocks block must 1331 * not block. 1332 */ 1333 #if DEBUG 1334 if (i < 0 || i > LOCK_LEVEL) 1335 cmn_err(CE_PANIC, "thread_unpin: ipl out of range %x", i); 1336 #endif 1337 1338 /* 1339 * Compute the CPU's base interrupt level based on the active 1340 * interrupts. 1341 */ 1342 ASSERT(CPU->cpu_intr_actv & (1 << i)); 1343 set_base_spl(); 1344 1345 return (itp); 1346 } 1347 1348 /* 1349 * Create and initialize an interrupt thread. 1350 * Returns non-zero on error. 1351 * Called at spl7() or better. 1352 */ 1353 void 1354 thread_create_intr(struct cpu *cp) 1355 { 1356 kthread_t *tp; 1357 1358 tp = thread_create(NULL, 0, 1359 (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0); 1360 1361 /* 1362 * Set the thread in the TS_FREE state. The state will change 1363 * to TS_ONPROC only while the interrupt is active. Think of these 1364 * as being on a private free list for the CPU. Being TS_FREE keeps 1365 * inactive interrupt threads out of debugger thread lists. 1366 * 1367 * We cannot call thread_create with TS_FREE because of the current 1368 * checks there for ONPROC. Fix this when thread_create takes flags. 1369 */ 1370 THREAD_FREEINTR(tp, cp); 1371 1372 /* 1373 * Nobody should ever reference the credentials of an interrupt 1374 * thread so make it NULL to catch any such references. 1375 */ 1376 tp->t_cred = NULL; 1377 tp->t_flag |= T_INTR_THREAD; 1378 tp->t_cpu = cp; 1379 tp->t_bound_cpu = cp; 1380 tp->t_disp_queue = cp->cpu_disp; 1381 tp->t_affinitycnt = 1; 1382 tp->t_preempt = 1; 1383 1384 /* 1385 * Don't make a user-requested binding on this thread so that 1386 * the processor can be offlined. 1387 */ 1388 tp->t_bind_cpu = PBIND_NONE; /* no USER-requested binding */ 1389 tp->t_bind_pset = PS_NONE; 1390 1391 #if defined(__i386) || defined(__amd64) 1392 tp->t_stk -= STACK_ALIGN; 1393 *(tp->t_stk) = 0; /* terminate intr thread stack */ 1394 #endif 1395 1396 /* 1397 * Link onto CPU's interrupt pool. 1398 */ 1399 tp->t_link = cp->cpu_intr_thread; 1400 cp->cpu_intr_thread = tp; 1401 } 1402 1403 /* 1404 * TSD -- THREAD SPECIFIC DATA 1405 */ 1406 static kmutex_t tsd_mutex; /* linked list spin lock */ 1407 static uint_t tsd_nkeys; /* size of destructor array */ 1408 /* per-key destructor funcs */ 1409 static void (**tsd_destructor)(void *); 1410 /* list of tsd_thread's */ 1411 static struct tsd_thread *tsd_list; 1412 1413 /* 1414 * Default destructor 1415 * Needed because NULL destructor means that the key is unused 1416 */ 1417 /* ARGSUSED */ 1418 void 1419 tsd_defaultdestructor(void *value) 1420 {} 1421 1422 /* 1423 * Create a key (index into per thread array) 1424 * Locks out tsd_create, tsd_destroy, and tsd_exit 1425 * May allocate memory with lock held 1426 */ 1427 void 1428 tsd_create(uint_t *keyp, void (*destructor)(void *)) 1429 { 1430 int i; 1431 uint_t nkeys; 1432 1433 /* 1434 * if key is allocated, do nothing 1435 */ 1436 mutex_enter(&tsd_mutex); 1437 if (*keyp) { 1438 mutex_exit(&tsd_mutex); 1439 return; 1440 } 1441 /* 1442 * find an unused key 1443 */ 1444 if (destructor == NULL) 1445 destructor = tsd_defaultdestructor; 1446 1447 for (i = 0; i < tsd_nkeys; ++i) 1448 if (tsd_destructor[i] == NULL) 1449 break; 1450 1451 /* 1452 * if no unused keys, increase the size of the destructor array 1453 */ 1454 if (i == tsd_nkeys) { 1455 if ((nkeys = (tsd_nkeys << 1)) == 0) 1456 nkeys = 1; 1457 tsd_destructor = 1458 (void (**)(void *))tsd_realloc((void *)tsd_destructor, 1459 (size_t)(tsd_nkeys * sizeof (void (*)(void *))), 1460 (size_t)(nkeys * sizeof (void (*)(void *)))); 1461 tsd_nkeys = nkeys; 1462 } 1463 1464 /* 1465 * allocate the next available unused key 1466 */ 1467 tsd_destructor[i] = destructor; 1468 *keyp = i + 1; 1469 mutex_exit(&tsd_mutex); 1470 } 1471 1472 /* 1473 * Destroy a key -- this is for unloadable modules 1474 * 1475 * Assumes that the caller is preventing tsd_set and tsd_get 1476 * Locks out tsd_create, tsd_destroy, and tsd_exit 1477 * May free memory with lock held 1478 */ 1479 void 1480 tsd_destroy(uint_t *keyp) 1481 { 1482 uint_t key; 1483 struct tsd_thread *tsd; 1484 1485 /* 1486 * protect the key namespace and our destructor lists 1487 */ 1488 mutex_enter(&tsd_mutex); 1489 key = *keyp; 1490 *keyp = 0; 1491 1492 ASSERT(key <= tsd_nkeys); 1493 1494 /* 1495 * if the key is valid 1496 */ 1497 if (key != 0) { 1498 uint_t k = key - 1; 1499 /* 1500 * for every thread with TSD, call key's destructor 1501 */ 1502 for (tsd = tsd_list; tsd; tsd = tsd->ts_next) { 1503 /* 1504 * no TSD for key in this thread 1505 */ 1506 if (key > tsd->ts_nkeys) 1507 continue; 1508 /* 1509 * call destructor for key 1510 */ 1511 if (tsd->ts_value[k] && tsd_destructor[k]) 1512 (*tsd_destructor[k])(tsd->ts_value[k]); 1513 /* 1514 * reset value for key 1515 */ 1516 tsd->ts_value[k] = NULL; 1517 } 1518 /* 1519 * actually free the key (NULL destructor == unused) 1520 */ 1521 tsd_destructor[k] = NULL; 1522 } 1523 1524 mutex_exit(&tsd_mutex); 1525 } 1526 1527 /* 1528 * Quickly return the per thread value that was stored with the specified key 1529 * Assumes the caller is protecting key from tsd_create and tsd_destroy 1530 */ 1531 void * 1532 tsd_get(uint_t key) 1533 { 1534 return (tsd_agent_get(curthread, key)); 1535 } 1536 1537 /* 1538 * Set a per thread value indexed with the specified key 1539 */ 1540 int 1541 tsd_set(uint_t key, void *value) 1542 { 1543 return (tsd_agent_set(curthread, key, value)); 1544 } 1545 1546 /* 1547 * Like tsd_get(), except that the agent lwp can get the tsd of 1548 * another thread in the same process (the agent thread only runs when the 1549 * process is completely stopped by /proc), or syslwp is creating a new lwp. 1550 */ 1551 void * 1552 tsd_agent_get(kthread_t *t, uint_t key) 1553 { 1554 struct tsd_thread *tsd = t->t_tsd; 1555 1556 ASSERT(t == curthread || 1557 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1558 1559 if (key && tsd != NULL && key <= tsd->ts_nkeys) 1560 return (tsd->ts_value[key - 1]); 1561 return (NULL); 1562 } 1563 1564 /* 1565 * Like tsd_set(), except that the agent lwp can set the tsd of 1566 * another thread in the same process, or syslwp can set the tsd 1567 * of a thread it's in the middle of creating. 1568 * 1569 * Assumes the caller is protecting key from tsd_create and tsd_destroy 1570 * May lock out tsd_destroy (and tsd_create), may allocate memory with 1571 * lock held 1572 */ 1573 int 1574 tsd_agent_set(kthread_t *t, uint_t key, void *value) 1575 { 1576 struct tsd_thread *tsd = t->t_tsd; 1577 1578 ASSERT(t == curthread || 1579 ttoproc(t)->p_agenttp == curthread || t->t_state == TS_STOPPED); 1580 1581 if (key == 0) 1582 return (EINVAL); 1583 if (tsd == NULL) 1584 tsd = t->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP); 1585 if (key <= tsd->ts_nkeys) { 1586 tsd->ts_value[key - 1] = value; 1587 return (0); 1588 } 1589 1590 ASSERT(key <= tsd_nkeys); 1591 1592 /* 1593 * lock out tsd_destroy() 1594 */ 1595 mutex_enter(&tsd_mutex); 1596 if (tsd->ts_nkeys == 0) { 1597 /* 1598 * Link onto list of threads with TSD 1599 */ 1600 if ((tsd->ts_next = tsd_list) != NULL) 1601 tsd_list->ts_prev = tsd; 1602 tsd_list = tsd; 1603 } 1604 1605 /* 1606 * Allocate thread local storage and set the value for key 1607 */ 1608 tsd->ts_value = tsd_realloc(tsd->ts_value, 1609 tsd->ts_nkeys * sizeof (void *), 1610 key * sizeof (void *)); 1611 tsd->ts_nkeys = key; 1612 tsd->ts_value[key - 1] = value; 1613 mutex_exit(&tsd_mutex); 1614 1615 return (0); 1616 } 1617 1618 1619 /* 1620 * Return the per thread value that was stored with the specified key 1621 * If necessary, create the key and the value 1622 * Assumes the caller is protecting *keyp from tsd_destroy 1623 */ 1624 void * 1625 tsd_getcreate(uint_t *keyp, void (*destroy)(void *), void *(*allocate)(void)) 1626 { 1627 void *value; 1628 uint_t key = *keyp; 1629 struct tsd_thread *tsd = curthread->t_tsd; 1630 1631 if (tsd == NULL) 1632 tsd = curthread->t_tsd = kmem_zalloc(sizeof (*tsd), KM_SLEEP); 1633 if (key && key <= tsd->ts_nkeys && (value = tsd->ts_value[key - 1])) 1634 return (value); 1635 if (key == 0) 1636 tsd_create(keyp, destroy); 1637 (void) tsd_set(*keyp, value = (*allocate)()); 1638 1639 return (value); 1640 } 1641 1642 /* 1643 * Called from thread_exit() to run the destructor function for each tsd 1644 * Locks out tsd_create and tsd_destroy 1645 * Assumes that the destructor *DOES NOT* use tsd 1646 */ 1647 void 1648 tsd_exit(void) 1649 { 1650 int i; 1651 struct tsd_thread *tsd = curthread->t_tsd; 1652 1653 if (tsd == NULL) 1654 return; 1655 1656 if (tsd->ts_nkeys == 0) { 1657 kmem_free(tsd, sizeof (*tsd)); 1658 curthread->t_tsd = NULL; 1659 return; 1660 } 1661 1662 /* 1663 * lock out tsd_create and tsd_destroy, call 1664 * the destructor, and mark the value as destroyed. 1665 */ 1666 mutex_enter(&tsd_mutex); 1667 1668 for (i = 0; i < tsd->ts_nkeys; i++) { 1669 if (tsd->ts_value[i] && tsd_destructor[i]) 1670 (*tsd_destructor[i])(tsd->ts_value[i]); 1671 tsd->ts_value[i] = NULL; 1672 } 1673 1674 /* 1675 * remove from linked list of threads with TSD 1676 */ 1677 if (tsd->ts_next) 1678 tsd->ts_next->ts_prev = tsd->ts_prev; 1679 if (tsd->ts_prev) 1680 tsd->ts_prev->ts_next = tsd->ts_next; 1681 if (tsd_list == tsd) 1682 tsd_list = tsd->ts_next; 1683 1684 mutex_exit(&tsd_mutex); 1685 1686 /* 1687 * free up the TSD 1688 */ 1689 kmem_free(tsd->ts_value, tsd->ts_nkeys * sizeof (void *)); 1690 kmem_free(tsd, sizeof (struct tsd_thread)); 1691 curthread->t_tsd = NULL; 1692 } 1693 1694 /* 1695 * realloc 1696 */ 1697 static void * 1698 tsd_realloc(void *old, size_t osize, size_t nsize) 1699 { 1700 void *new; 1701 1702 new = kmem_zalloc(nsize, KM_SLEEP); 1703 if (old) { 1704 bcopy(old, new, osize); 1705 kmem_free(old, osize); 1706 } 1707 return (new); 1708 } 1709 1710 /* 1711 * Check to see if an interrupt thread might be active at a given ipl. 1712 * If so return true. 1713 * We must be conservative--it is ok to give a false yes, but a false no 1714 * will cause disaster. (But if the situation changes after we check it is 1715 * ok--the caller is trying to ensure that an interrupt routine has been 1716 * exited). 1717 * This is used when trying to remove an interrupt handler from an autovector 1718 * list in avintr.c. 1719 */ 1720 int 1721 intr_active(struct cpu *cp, int level) 1722 { 1723 if (level <= LOCK_LEVEL) 1724 return (cp->cpu_thread != cp->cpu_dispthread); 1725 else 1726 return (CPU_ON_INTR(cp)); 1727 } 1728 1729 /* 1730 * Return non-zero if an interrupt is being serviced. 1731 */ 1732 int 1733 servicing_interrupt() 1734 { 1735 int onintr = 0; 1736 1737 /* Are we an interrupt thread */ 1738 if (curthread->t_flag & T_INTR_THREAD) 1739 return (1); 1740 /* Are we servicing a high level interrupt? */ 1741 if (CPU_ON_INTR(CPU)) { 1742 kpreempt_disable(); 1743 onintr = CPU_ON_INTR(CPU); 1744 kpreempt_enable(); 1745 } 1746 return (onintr); 1747 } 1748 1749 1750 /* 1751 * Change the dispatch priority of a thread in the system. 1752 * Used when raising or lowering a thread's priority. 1753 * (E.g., priority inheritance) 1754 * 1755 * Since threads are queued according to their priority, we 1756 * we must check the thread's state to determine whether it 1757 * is on a queue somewhere. If it is, we've got to: 1758 * 1759 * o Dequeue the thread. 1760 * o Change its effective priority. 1761 * o Enqueue the thread. 1762 * 1763 * Assumptions: The thread whose priority we wish to change 1764 * must be locked before we call thread_change_(e)pri(). 1765 * The thread_change(e)pri() function doesn't drop the thread 1766 * lock--that must be done by its caller. 1767 */ 1768 void 1769 thread_change_epri(kthread_t *t, pri_t disp_pri) 1770 { 1771 uint_t state; 1772 1773 ASSERT(THREAD_LOCK_HELD(t)); 1774 1775 /* 1776 * If the inherited priority hasn't actually changed, 1777 * just return. 1778 */ 1779 if (t->t_epri == disp_pri) 1780 return; 1781 1782 state = t->t_state; 1783 1784 /* 1785 * If it's not on a queue, change the priority with impunity. 1786 */ 1787 if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) { 1788 t->t_epri = disp_pri; 1789 if (state == TS_ONPROC) { 1790 cpu_t *cp = t->t_disp_queue->disp_cpu; 1791 1792 if (t == cp->cpu_dispthread) 1793 cp->cpu_dispatch_pri = DISP_PRIO(t); 1794 } 1795 } else if (state == TS_SLEEP) { 1796 /* 1797 * Take the thread out of its sleep queue. 1798 * Change the inherited priority. 1799 * Re-enqueue the thread. 1800 * Each synchronization object exports a function 1801 * to do this in an appropriate manner. 1802 */ 1803 SOBJ_CHANGE_EPRI(t->t_sobj_ops, t, disp_pri); 1804 } else if (state == TS_WAIT) { 1805 /* 1806 * Re-enqueue a thread on the wait queue if its 1807 * effective priority needs to change. 1808 */ 1809 if (disp_pri != t->t_epri) 1810 waitq_change_pri(t, disp_pri); 1811 } else { 1812 /* 1813 * The thread is on a run queue. 1814 * Note: setbackdq() may not put the thread 1815 * back on the same run queue where it originally 1816 * resided. 1817 */ 1818 (void) dispdeq(t); 1819 t->t_epri = disp_pri; 1820 setbackdq(t); 1821 } 1822 schedctl_set_cidpri(t); 1823 } 1824 1825 /* 1826 * Function: Change the t_pri field of a thread. 1827 * Side Effects: Adjust the thread ordering on a run queue 1828 * or sleep queue, if necessary. 1829 * Returns: 1 if the thread was on a run queue, else 0. 1830 */ 1831 int 1832 thread_change_pri(kthread_t *t, pri_t disp_pri, int front) 1833 { 1834 uint_t state; 1835 int on_rq = 0; 1836 1837 ASSERT(THREAD_LOCK_HELD(t)); 1838 1839 state = t->t_state; 1840 THREAD_WILLCHANGE_PRI(t, disp_pri); 1841 1842 /* 1843 * If it's not on a queue, change the priority with impunity. 1844 */ 1845 if ((state & (TS_SLEEP | TS_RUN | TS_WAIT)) == 0) { 1846 t->t_pri = disp_pri; 1847 1848 if (state == TS_ONPROC) { 1849 cpu_t *cp = t->t_disp_queue->disp_cpu; 1850 1851 if (t == cp->cpu_dispthread) 1852 cp->cpu_dispatch_pri = DISP_PRIO(t); 1853 } 1854 } else if (state == TS_SLEEP) { 1855 /* 1856 * If the priority has changed, take the thread out of 1857 * its sleep queue and change the priority. 1858 * Re-enqueue the thread. 1859 * Each synchronization object exports a function 1860 * to do this in an appropriate manner. 1861 */ 1862 if (disp_pri != t->t_pri) 1863 SOBJ_CHANGE_PRI(t->t_sobj_ops, t, disp_pri); 1864 } else if (state == TS_WAIT) { 1865 /* 1866 * Re-enqueue a thread on the wait queue if its 1867 * priority needs to change. 1868 */ 1869 if (disp_pri != t->t_pri) 1870 waitq_change_pri(t, disp_pri); 1871 } else { 1872 /* 1873 * The thread is on a run queue. 1874 * Note: setbackdq() may not put the thread 1875 * back on the same run queue where it originally 1876 * resided. 1877 * 1878 * We still requeue the thread even if the priority 1879 * is unchanged to preserve round-robin (and other) 1880 * effects between threads of the same priority. 1881 */ 1882 on_rq = dispdeq(t); 1883 ASSERT(on_rq); 1884 t->t_pri = disp_pri; 1885 if (front) { 1886 setfrontdq(t); 1887 } else { 1888 setbackdq(t); 1889 } 1890 } 1891 schedctl_set_cidpri(t); 1892 return (on_rq); 1893 } 1894 1895 /* 1896 * Tunable kmem_stackinfo is set, fill the kernel thread stack with a 1897 * specific pattern. 1898 */ 1899 static void 1900 stkinfo_begin(kthread_t *t) 1901 { 1902 caddr_t start; /* stack start */ 1903 caddr_t end; /* stack end */ 1904 uint64_t *ptr; /* pattern pointer */ 1905 1906 /* 1907 * Stack grows up or down, see thread_create(), 1908 * compute stack memory area start and end (start < end). 1909 */ 1910 if (t->t_stk > t->t_stkbase) { 1911 /* stack grows down */ 1912 start = t->t_stkbase; 1913 end = t->t_stk; 1914 } else { 1915 /* stack grows up */ 1916 start = t->t_stk; 1917 end = t->t_stkbase; 1918 } 1919 1920 /* 1921 * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes 1922 * alignement for start and end in stack area boundaries 1923 * (protection against corrupt t_stkbase/t_stk data). 1924 */ 1925 if ((((uintptr_t)start) & 0x7) != 0) { 1926 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8); 1927 } 1928 end = (caddr_t)(((uintptr_t)end) & (~0x7)); 1929 1930 if ((end <= start) || (end - start) > (1024 * 1024)) { 1931 /* negative or stack size > 1 meg, assume bogus */ 1932 return; 1933 } 1934 1935 /* fill stack area with a pattern (instead of zeros) */ 1936 ptr = (uint64_t *)((void *)start); 1937 while (ptr < (uint64_t *)((void *)end)) { 1938 *ptr++ = KMEM_STKINFO_PATTERN; 1939 } 1940 } 1941 1942 1943 /* 1944 * Tunable kmem_stackinfo is set, create stackinfo log if doesn't already exist, 1945 * compute the percentage of kernel stack really used, and set in the log 1946 * if it's the latest highest percentage. 1947 */ 1948 static void 1949 stkinfo_end(kthread_t *t) 1950 { 1951 caddr_t start; /* stack start */ 1952 caddr_t end; /* stack end */ 1953 uint64_t *ptr; /* pattern pointer */ 1954 size_t stksz; /* stack size */ 1955 size_t smallest = 0; 1956 size_t percent = 0; 1957 uint_t index = 0; 1958 uint_t i; 1959 static size_t smallest_percent = (size_t)-1; 1960 static uint_t full = 0; 1961 1962 /* create the stackinfo log, if doesn't already exist */ 1963 mutex_enter(&kmem_stkinfo_lock); 1964 if (kmem_stkinfo_log == NULL) { 1965 kmem_stkinfo_log = (kmem_stkinfo_t *) 1966 kmem_zalloc(KMEM_STKINFO_LOG_SIZE * 1967 (sizeof (kmem_stkinfo_t)), KM_NOSLEEP); 1968 if (kmem_stkinfo_log == NULL) { 1969 mutex_exit(&kmem_stkinfo_lock); 1970 return; 1971 } 1972 } 1973 mutex_exit(&kmem_stkinfo_lock); 1974 1975 /* 1976 * Stack grows up or down, see thread_create(), 1977 * compute stack memory area start and end (start < end). 1978 */ 1979 if (t->t_stk > t->t_stkbase) { 1980 /* stack grows down */ 1981 start = t->t_stkbase; 1982 end = t->t_stk; 1983 } else { 1984 /* stack grows up */ 1985 start = t->t_stk; 1986 end = t->t_stkbase; 1987 } 1988 1989 /* stack size as found in kthread_t */ 1990 stksz = end - start; 1991 1992 /* 1993 * Stackinfo pattern size is 8 bytes. Ensure proper 8 bytes 1994 * alignement for start and end in stack area boundaries 1995 * (protection against corrupt t_stkbase/t_stk data). 1996 */ 1997 if ((((uintptr_t)start) & 0x7) != 0) { 1998 start = (caddr_t)((((uintptr_t)start) & (~0x7)) + 8); 1999 } 2000 end = (caddr_t)(((uintptr_t)end) & (~0x7)); 2001 2002 if ((end <= start) || (end - start) > (1024 * 1024)) { 2003 /* negative or stack size > 1 meg, assume bogus */ 2004 return; 2005 } 2006 2007 /* search until no pattern in the stack */ 2008 if (t->t_stk > t->t_stkbase) { 2009 /* stack grows down */ 2010 #if defined(__i386) || defined(__amd64) 2011 /* 2012 * 6 longs are pushed on stack, see thread_load(). Skip 2013 * them, so if kthread has never run, percent is zero. 2014 * 8 bytes alignement is preserved for a 32 bit kernel, 2015 * 6 x 4 = 24, 24 is a multiple of 8. 2016 * 2017 */ 2018 end -= (6 * sizeof (long)); 2019 #endif 2020 ptr = (uint64_t *)((void *)start); 2021 while (ptr < (uint64_t *)((void *)end)) { 2022 if (*ptr != KMEM_STKINFO_PATTERN) { 2023 percent = stkinfo_percent(end, 2024 start, (caddr_t)ptr); 2025 break; 2026 } 2027 ptr++; 2028 } 2029 } else { 2030 /* stack grows up */ 2031 ptr = (uint64_t *)((void *)end); 2032 ptr--; 2033 while (ptr >= (uint64_t *)((void *)start)) { 2034 if (*ptr != KMEM_STKINFO_PATTERN) { 2035 percent = stkinfo_percent(start, 2036 end, (caddr_t)ptr); 2037 break; 2038 } 2039 ptr--; 2040 } 2041 } 2042 2043 DTRACE_PROBE3(stack__usage, kthread_t *, t, 2044 size_t, stksz, size_t, percent); 2045 2046 if (percent == 0) { 2047 return; 2048 } 2049 2050 mutex_enter(&kmem_stkinfo_lock); 2051 if (full == KMEM_STKINFO_LOG_SIZE && percent < smallest_percent) { 2052 /* 2053 * The log is full and already contains the highest values 2054 */ 2055 mutex_exit(&kmem_stkinfo_lock); 2056 return; 2057 } 2058 2059 /* keep a log of the highest used stack */ 2060 for (i = 0; i < KMEM_STKINFO_LOG_SIZE; i++) { 2061 if (kmem_stkinfo_log[i].percent == 0) { 2062 index = i; 2063 full++; 2064 break; 2065 } 2066 if (smallest == 0) { 2067 smallest = kmem_stkinfo_log[i].percent; 2068 index = i; 2069 continue; 2070 } 2071 if (kmem_stkinfo_log[i].percent < smallest) { 2072 smallest = kmem_stkinfo_log[i].percent; 2073 index = i; 2074 } 2075 } 2076 2077 if (percent >= kmem_stkinfo_log[index].percent) { 2078 kmem_stkinfo_log[index].kthread = (caddr_t)t; 2079 kmem_stkinfo_log[index].t_startpc = (caddr_t)t->t_startpc; 2080 kmem_stkinfo_log[index].start = start; 2081 kmem_stkinfo_log[index].stksz = stksz; 2082 kmem_stkinfo_log[index].percent = percent; 2083 kmem_stkinfo_log[index].t_tid = t->t_tid; 2084 kmem_stkinfo_log[index].cmd[0] = '\0'; 2085 if (t->t_tid != 0) { 2086 stksz = strlen((t->t_procp)->p_user.u_comm); 2087 if (stksz >= KMEM_STKINFO_STR_SIZE) { 2088 stksz = KMEM_STKINFO_STR_SIZE - 1; 2089 kmem_stkinfo_log[index].cmd[stksz] = '\0'; 2090 } else { 2091 stksz += 1; 2092 } 2093 (void) memcpy(kmem_stkinfo_log[index].cmd, 2094 (t->t_procp)->p_user.u_comm, stksz); 2095 } 2096 if (percent < smallest_percent) { 2097 smallest_percent = percent; 2098 } 2099 } 2100 mutex_exit(&kmem_stkinfo_lock); 2101 } 2102 2103 /* 2104 * Tunable kmem_stackinfo is set, compute stack utilization percentage. 2105 */ 2106 static size_t 2107 stkinfo_percent(caddr_t t_stk, caddr_t t_stkbase, caddr_t sp) 2108 { 2109 size_t percent; 2110 size_t s; 2111 2112 if (t_stk > t_stkbase) { 2113 /* stack grows down */ 2114 if (sp > t_stk) { 2115 return (0); 2116 } 2117 if (sp < t_stkbase) { 2118 return (100); 2119 } 2120 percent = t_stk - sp + 1; 2121 s = t_stk - t_stkbase + 1; 2122 } else { 2123 /* stack grows up */ 2124 if (sp < t_stk) { 2125 return (0); 2126 } 2127 if (sp > t_stkbase) { 2128 return (100); 2129 } 2130 percent = sp - t_stk + 1; 2131 s = t_stkbase - t_stk + 1; 2132 } 2133 percent = ((100 * percent) / s) + 1; 2134 if (percent > 100) { 2135 percent = 100; 2136 } 2137 return (percent); 2138 } 2139