1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #ifndef _SYS_THREAD_H 28 #define _SYS_THREAD_H 29 30 31 #include <sys/types.h> 32 #include <sys/t_lock.h> 33 #include <sys/klwp.h> 34 #include <sys/time.h> 35 #include <sys/signal.h> 36 #include <sys/kcpc.h> 37 #if defined(__GNUC__) && defined(_ASM_INLINES) && defined(_KERNEL) 38 #include <asm/thread.h> 39 #endif 40 41 #ifdef __cplusplus 42 extern "C" { 43 #endif 44 45 /* 46 * The thread object, its states, and the methods by which it 47 * is accessed. 48 */ 49 50 /* 51 * Values that t_state may assume. Note that t_state cannot have more 52 * than one of these flags set at a time. 53 */ 54 #define TS_FREE 0x00 /* Thread at loose ends */ 55 #define TS_SLEEP 0x01 /* Awaiting an event */ 56 #define TS_RUN 0x02 /* Runnable, but not yet on a processor */ 57 #define TS_ONPROC 0x04 /* Thread is being run on a processor */ 58 #define TS_ZOMB 0x08 /* Thread has died but hasn't been reaped */ 59 #define TS_STOPPED 0x10 /* Stopped, initial state */ 60 #define TS_WAIT 0x20 /* Waiting to become runnable */ 61 62 typedef struct ctxop { 63 void (*save_op)(void *); /* function to invoke to save context */ 64 void (*restore_op)(void *); /* function to invoke to restore ctx */ 65 void (*fork_op)(void *, void *); /* invoke to fork context */ 66 void (*lwp_create_op)(void *, void *); /* lwp_create context */ 67 void (*exit_op)(void *); /* invoked during {thread,lwp}_exit() */ 68 void (*free_op)(void *, int); /* function which frees the context */ 69 void *arg; /* argument to above functions, ctx pointer */ 70 struct ctxop *next; /* next context ops */ 71 } ctxop_t; 72 73 /* 74 * The active file descriptor table. 75 * Each member of a_fd[] not equalling -1 represents an active fd. 76 * The structure is initialized on first use; all zeros means uninitialized. 77 */ 78 typedef struct { 79 kmutex_t a_fdlock; /* protects a_fd and a_nfd */ 80 int *a_fd; /* pointer to list of fds */ 81 int a_nfd; /* number of entries in *a_fd */ 82 int a_stale; /* one of the active fds is being closed */ 83 int a_buf[2]; /* buffer to which a_fd initially refers */ 84 } afd_t; 85 86 /* 87 * An lwpchan provides uniqueness when sleeping on user-level 88 * synchronization primitives. The lc_wchan member is used 89 * for sleeping on kernel synchronization primitives. 90 */ 91 typedef struct { 92 caddr_t lc_wchan0; 93 caddr_t lc_wchan; 94 } lwpchan_t; 95 96 typedef struct _kthread *kthread_id_t; 97 98 struct turnstile; 99 struct panic_trap_info; 100 struct upimutex; 101 struct kproject; 102 struct on_trap_data; 103 struct waitq; 104 struct _kcpc_ctx; 105 struct _kcpc_set; 106 107 /* Definition for kernel thread identifier type */ 108 typedef uint64_t kt_did_t; 109 110 typedef struct _kthread { 111 struct _kthread *t_link; /* dispq, sleepq, and free queue link */ 112 113 caddr_t t_stk; /* base of stack (kernel sp value to use) */ 114 void (*t_startpc)(void); /* PC where thread started */ 115 struct cpu *t_bound_cpu; /* cpu bound to, or NULL if not bound */ 116 short t_affinitycnt; /* nesting level of kernel affinity-setting */ 117 short t_bind_cpu; /* user-specified CPU binding (-1 if none) */ 118 ushort_t t_flag; /* modified only by current thread */ 119 ushort_t t_proc_flag; /* modified holding ttproc(t)->p_lock */ 120 ushort_t t_schedflag; /* modified holding thread_lock(t) */ 121 volatile char t_preempt; /* don't preempt thread if set */ 122 volatile char t_preempt_lk; 123 uint_t t_state; /* thread state (protected by thread_lock) */ 124 pri_t t_pri; /* assigned thread priority */ 125 pri_t t_epri; /* inherited thread priority */ 126 pri_t t_cpri; /* thread scheduling class priority */ 127 char t_writer; /* sleeping in lwp_rwlock_lock(RW_WRITE_LOCK) */ 128 uchar_t t_bindflag; /* CPU and pset binding type */ 129 label_t t_pcb; /* pcb, save area when switching */ 130 lwpchan_t t_lwpchan; /* reason for blocking */ 131 #define t_wchan0 t_lwpchan.lc_wchan0 132 #define t_wchan t_lwpchan.lc_wchan 133 struct _sobj_ops *t_sobj_ops; 134 id_t t_cid; /* scheduling class id */ 135 struct thread_ops *t_clfuncs; /* scheduling class ops vector */ 136 void *t_cldata; /* per scheduling class specific data */ 137 ctxop_t *t_ctx; /* thread context */ 138 uintptr_t t_lofault; /* ret pc for failed page faults */ 139 label_t *t_onfault; /* on_fault() setjmp buf */ 140 struct on_trap_data *t_ontrap; /* on_trap() protection data */ 141 caddr_t t_swap; /* the bottom of the stack, if from segkp */ 142 lock_t t_lock; /* used to resume() a thread */ 143 uint8_t t_lockstat; /* set while thread is in lockstat code */ 144 uint8_t t_pil; /* interrupt thread PIL */ 145 disp_lock_t t_pi_lock; /* lock protecting t_prioinv list */ 146 char t_nomigrate; /* do not migrate if set */ 147 struct cpu *t_cpu; /* CPU that thread last ran on */ 148 struct cpu *t_weakbound_cpu; /* cpu weakly bound to */ 149 struct lgrp_ld *t_lpl; /* load average for home lgroup */ 150 void *t_lgrp_reserv[2]; /* reserved for future */ 151 struct _kthread *t_intr; /* interrupted (pinned) thread */ 152 uint64_t t_intr_start; /* timestamp when time slice began */ 153 kt_did_t t_did; /* thread id for kernel debuggers */ 154 caddr_t t_tnf_tpdp; /* Trace facility data pointer */ 155 struct _kcpc_ctx *t_cpc_ctx; /* performance counter context */ 156 struct _kcpc_set *t_cpc_set; /* set this thread has bound */ 157 158 /* 159 * non swappable part of the lwp state. 160 */ 161 id_t t_tid; /* lwp's id */ 162 id_t t_waitfor; /* target lwp id in lwp_wait() */ 163 struct sigqueue *t_sigqueue; /* queue of siginfo structs */ 164 k_sigset_t t_sig; /* signals pending to this process */ 165 k_sigset_t t_extsig; /* signals sent from another contract */ 166 k_sigset_t t_hold; /* hold signal bit mask */ 167 k_sigset_t t_sigwait; /* sigtimedwait/sigfd accepting these */ 168 struct _kthread *t_forw; /* process's forward thread link */ 169 struct _kthread *t_back; /* process's backward thread link */ 170 struct _kthread *t_thlink; /* tid (lwpid) lookup hash link */ 171 klwp_t *t_lwp; /* thread's lwp pointer */ 172 struct proc *t_procp; /* proc pointer */ 173 struct t_audit_data *t_audit_data; /* per thread audit data */ 174 struct _kthread *t_next; /* doubly linked list of all threads */ 175 struct _kthread *t_prev; 176 ushort_t t_whystop; /* reason for stopping */ 177 ushort_t t_whatstop; /* more detailed reason */ 178 int t_dslot; /* index in proc's thread directory */ 179 struct pollstate *t_pollstate; /* state used during poll(2) */ 180 struct pollcache *t_pollcache; /* to pass a pcache ptr by /dev/poll */ 181 struct cred *t_cred; /* pointer to current cred */ 182 time_t t_start; /* start time, seconds since epoch */ 183 clock_t t_lbolt; /* lbolt at last clock_tick() */ 184 hrtime_t t_stoptime; /* timestamp at stop() */ 185 uint_t t_pctcpu; /* %cpu at last clock_tick(), binary */ 186 /* point at right of high-order bit */ 187 short t_sysnum; /* system call number */ 188 kcondvar_t t_delay_cv; 189 kmutex_t t_delay_lock; 190 191 /* 192 * Pointer to the dispatcher lock protecting t_state and state-related 193 * flags. This pointer can change during waits on the lock, so 194 * it should be grabbed only by thread_lock(). 195 */ 196 disp_lock_t *t_lockp; /* pointer to the dispatcher lock */ 197 ushort_t t_oldspl; /* spl level before dispatcher locked */ 198 volatile char t_pre_sys; /* pre-syscall work needed */ 199 lock_t t_lock_flush; /* for lock_mutex_flush() impl */ 200 struct _disp *t_disp_queue; /* run queue for chosen CPU */ 201 clock_t t_disp_time; /* last time this thread was running */ 202 203 /* 204 * Post-syscall / post-trap flags. 205 * No lock is required to set these. 206 * These must be cleared only by the thread itself. 207 * 208 * t_astflag indicates that some post-trap processing is required, 209 * possibly a signal or a preemption. The thread will not 210 * return to user with this set. 211 * t_post_sys indicates that some unusualy post-system call 212 * handling is required, such as an error or tracing. 213 * t_sig_check indicates that some condition in ISSIG() must be 214 * checked, but doesn't prevent returning to user. 215 * t_post_sys_ast is a way of checking whether any of these three 216 * flags are set. 217 */ 218 union __tu { 219 struct __ts { 220 volatile char _t_astflag; /* AST requested */ 221 volatile char _t_sig_check; /* ISSIG required */ 222 volatile char _t_post_sys; /* post_syscall req */ 223 volatile char _t_trapret; /* call CL_TRAPRET */ 224 } _ts; 225 volatile int _t_post_sys_ast; /* OR of these flags */ 226 } _tu; 227 #define t_astflag _tu._ts._t_astflag 228 #define t_sig_check _tu._ts._t_sig_check 229 #define t_post_sys _tu._ts._t_post_sys 230 #define t_trapret _tu._ts._t_trapret 231 #define t_post_sys_ast _tu._t_post_sys_ast 232 233 /* 234 * Real time microstate profiling. 235 */ 236 /* possible 4-byte filler */ 237 hrtime_t t_waitrq; /* timestamp for run queue wait time */ 238 int t_mstate; /* current microstate */ 239 struct rprof { 240 int rp_anystate; /* set if any state non-zero */ 241 uint_t rp_state[NMSTATES]; /* mstate profiling counts */ 242 } *t_rprof; 243 244 /* 245 * There is a turnstile inserted into the list below for 246 * every priority inverted synchronization object that 247 * this thread holds. 248 */ 249 250 struct turnstile *t_prioinv; 251 252 /* 253 * Pointer to the turnstile attached to the synchronization 254 * object where this thread is blocked. 255 */ 256 257 struct turnstile *t_ts; 258 259 /* 260 * kernel thread specific data 261 * Borrowed from userland implementation of POSIX tsd 262 */ 263 struct tsd_thread { 264 struct tsd_thread *ts_next; /* threads with TSD */ 265 struct tsd_thread *ts_prev; /* threads with TSD */ 266 uint_t ts_nkeys; /* entries in value array */ 267 void **ts_value; /* array of value/key */ 268 } *t_tsd; 269 270 clock_t t_stime; /* time stamp used by the swapper */ 271 struct door_data *t_door; /* door invocation data */ 272 kmutex_t *t_plockp; /* pointer to process's p_lock */ 273 274 struct sc_shared *t_schedctl; /* scheduler activations shared data */ 275 uintptr_t t_sc_uaddr; /* user-level address of shared data */ 276 277 struct cpupart *t_cpupart; /* partition containing thread */ 278 int t_bind_pset; /* processor set binding */ 279 280 struct copyops *t_copyops; /* copy in/out ops vector */ 281 282 caddr_t t_stkbase; /* base of the the stack */ 283 struct page *t_red_pp; /* if non-NULL, redzone is mapped */ 284 285 afd_t t_activefd; /* active file descriptor table */ 286 287 struct _kthread *t_priforw; /* sleepq per-priority sublist */ 288 struct _kthread *t_priback; 289 290 struct sleepq *t_sleepq; /* sleep queue thread is waiting on */ 291 struct panic_trap_info *t_panic_trap; /* saved data from fatal trap */ 292 int *t_lgrp_affinity; /* lgroup affinity */ 293 struct upimutex *t_upimutex; /* list of upimutexes owned by thread */ 294 uint32_t t_nupinest; /* number of nested held upi mutexes */ 295 struct kproject *t_proj; /* project containing this thread */ 296 uint8_t t_unpark; /* modified holding t_delay_lock */ 297 uint8_t t_release; /* lwp_release() waked up the thread */ 298 uint8_t t_hatdepth; /* depth of recursive hat_memloads */ 299 uint8_t t_xpvcntr; /* see xen_block_migrate() */ 300 kcondvar_t t_joincv; /* cv used to wait for thread exit */ 301 void *t_taskq; /* for threads belonging to taskq */ 302 hrtime_t t_anttime; /* most recent time anticipatory load */ 303 /* was added to an lgroup's load */ 304 /* on this thread's behalf */ 305 char *t_pdmsg; /* privilege debugging message */ 306 307 uint_t t_predcache; /* DTrace predicate cache */ 308 hrtime_t t_dtrace_vtime; /* DTrace virtual time */ 309 hrtime_t t_dtrace_start; /* DTrace slice start time */ 310 311 uint8_t t_dtrace_stop; /* indicates a DTrace-desired stop */ 312 uint8_t t_dtrace_sig; /* signal sent via DTrace's raise() */ 313 314 union __tdu { 315 struct __tds { 316 uint8_t _t_dtrace_on; /* hit a fasttrap tracepoint */ 317 uint8_t _t_dtrace_step; /* about to return to kernel */ 318 uint8_t _t_dtrace_ret; /* handling a return probe */ 319 uint8_t _t_dtrace_ast; /* saved ast flag */ 320 #ifdef __amd64 321 uint8_t _t_dtrace_reg; /* modified register */ 322 #endif 323 } _tds; 324 ulong_t _t_dtrace_ft; /* bitwise or of these flags */ 325 } _tdu; 326 #define t_dtrace_ft _tdu._t_dtrace_ft 327 #define t_dtrace_on _tdu._tds._t_dtrace_on 328 #define t_dtrace_step _tdu._tds._t_dtrace_step 329 #define t_dtrace_ret _tdu._tds._t_dtrace_ret 330 #define t_dtrace_ast _tdu._tds._t_dtrace_ast 331 #ifdef __amd64 332 #define t_dtrace_reg _tdu._tds._t_dtrace_reg 333 #endif 334 335 uintptr_t t_dtrace_pc; /* DTrace saved pc from fasttrap */ 336 uintptr_t t_dtrace_npc; /* DTrace next pc from fasttrap */ 337 uintptr_t t_dtrace_scrpc; /* DTrace per-thread scratch location */ 338 uintptr_t t_dtrace_astpc; /* DTrace return sequence location */ 339 #ifdef __amd64 340 uint64_t t_dtrace_regv; /* DTrace saved reg from fasttrap */ 341 #endif 342 hrtime_t t_hrtime; /* high-res last time on cpu */ 343 kmutex_t t_ctx_lock; /* protects t_ctx in removectx() */ 344 struct waitq *t_waitq; /* wait queue */ 345 kmutex_t t_wait_mutex; /* used in CV wait functions */ 346 } kthread_t; 347 348 /* 349 * Thread flag (t_flag) definitions. 350 * These flags must be changed only for the current thread, 351 * and not during preemption code, since the code being 352 * preempted could be modifying the flags. 353 * 354 * For the most part these flags do not need locking. 355 * The following flags will only be changed while the thread_lock is held, 356 * to give assurrance that they are consistent with t_state: 357 * T_WAKEABLE 358 */ 359 #define T_INTR_THREAD 0x0001 /* thread is an interrupt thread */ 360 #define T_WAKEABLE 0x0002 /* thread is blocked, signals enabled */ 361 #define T_TOMASK 0x0004 /* use lwp_sigoldmask on return from signal */ 362 #define T_TALLOCSTK 0x0008 /* thread structure allocated from stk */ 363 #define T_FORKALL 0x0010 /* thread was cloned by forkall() */ 364 #define T_WOULDBLOCK 0x0020 /* for lockfs */ 365 #define T_DONTBLOCK 0x0040 /* for lockfs */ 366 #define T_DONTPEND 0x0080 /* for lockfs */ 367 #define T_SYS_PROF 0x0100 /* profiling on for duration of system call */ 368 #define T_WAITCVSEM 0x0200 /* waiting for a lwp_cv or lwp_sema on sleepq */ 369 #define T_WATCHPT 0x0400 /* thread undergoing a watchpoint emulation */ 370 #define T_PANIC 0x0800 /* thread initiated a system panic */ 371 #define T_LWPREUSE 0x1000 /* stack and LWP can be reused */ 372 #define T_CAPTURING 0x2000 /* thread is in page capture logic */ 373 #define T_VFPARENT 0x4000 /* thread is vfork parent, must call vfwait */ 374 #define T_DONTDTRACE 0x8000 /* disable DTrace probes */ 375 376 /* 377 * Flags in t_proc_flag. 378 * These flags must be modified only when holding the p_lock 379 * for the associated process. 380 */ 381 #define TP_DAEMON 0x0001 /* this is an LWP_DAEMON lwp */ 382 #define TP_HOLDLWP 0x0002 /* hold thread's lwp */ 383 #define TP_TWAIT 0x0004 /* wait to be freed by lwp_wait() */ 384 #define TP_LWPEXIT 0x0008 /* lwp has exited */ 385 #define TP_PRSTOP 0x0010 /* thread is being stopped via /proc */ 386 #define TP_CHKPT 0x0020 /* thread is being stopped via CPR checkpoint */ 387 #define TP_EXITLWP 0x0040 /* terminate this lwp */ 388 #define TP_PRVSTOP 0x0080 /* thread is virtually stopped via /proc */ 389 #define TP_MSACCT 0x0100 /* collect micro-state accounting information */ 390 #define TP_STOPPING 0x0200 /* thread is executing stop() */ 391 #define TP_WATCHPT 0x0400 /* process has watchpoints in effect */ 392 #define TP_PAUSE 0x0800 /* process is being stopped via pauselwps() */ 393 #define TP_CHANGEBIND 0x1000 /* thread has a new cpu/cpupart binding */ 394 #define TP_ZTHREAD 0x2000 /* this is a kernel thread for a zone */ 395 #define TP_WATCHSTOP 0x4000 /* thread is stopping via holdwatch() */ 396 397 /* 398 * Thread scheduler flag (t_schedflag) definitions. 399 * The thread must be locked via thread_lock() or equiv. to change these. 400 */ 401 #define TS_LOAD 0x0001 /* thread is in memory */ 402 #define TS_DONT_SWAP 0x0002 /* thread/lwp should not be swapped */ 403 #define TS_SWAPENQ 0x0004 /* swap thread when it reaches a safe point */ 404 #define TS_ON_SWAPQ 0x0008 /* thread is on the swap queue */ 405 #define TS_SIGNALLED 0x0010 /* thread was awakened by cv_signal() */ 406 #define TS_PROJWAITQ 0x0020 /* thread is on its project's waitq */ 407 #define TS_ZONEWAITQ 0x0040 /* thread is on its zone's waitq */ 408 #define TS_CSTART 0x0100 /* setrun() by continuelwps() */ 409 #define TS_UNPAUSE 0x0200 /* setrun() by unpauselwps() */ 410 #define TS_XSTART 0x0400 /* setrun() by SIGCONT */ 411 #define TS_PSTART 0x0800 /* setrun() by /proc */ 412 #define TS_RESUME 0x1000 /* setrun() by CPR resume process */ 413 #define TS_CREATE 0x2000 /* setrun() by syslwp_create() */ 414 #define TS_RUNQMATCH 0x4000 /* exact run queue balancing by setbackdq() */ 415 #define TS_ALLSTART \ 416 (TS_CSTART|TS_UNPAUSE|TS_XSTART|TS_PSTART|TS_RESUME|TS_CREATE) 417 #define TS_ANYWAITQ (TS_PROJWAITQ|TS_ZONEWAITQ) 418 419 /* 420 * Thread binding types 421 */ 422 #define TB_ALLHARD 0 423 #define TB_CPU_SOFT 0x01 /* soft binding to CPU */ 424 #define TB_PSET_SOFT 0x02 /* soft binding to pset */ 425 426 #define TB_CPU_SOFT_SET(t) ((t)->t_bindflag |= TB_CPU_SOFT) 427 #define TB_CPU_HARD_SET(t) ((t)->t_bindflag &= ~TB_CPU_SOFT) 428 #define TB_PSET_SOFT_SET(t) ((t)->t_bindflag |= TB_PSET_SOFT) 429 #define TB_PSET_HARD_SET(t) ((t)->t_bindflag &= ~TB_PSET_SOFT) 430 #define TB_CPU_IS_SOFT(t) ((t)->t_bindflag & TB_CPU_SOFT) 431 #define TB_CPU_IS_HARD(t) (!TB_CPU_IS_SOFT(t)) 432 #define TB_PSET_IS_SOFT(t) ((t)->t_bindflag & TB_PSET_SOFT) 433 434 /* 435 * No locking needed for AST field. 436 */ 437 #define aston(t) ((t)->t_astflag = 1) 438 #define astoff(t) ((t)->t_astflag = 0) 439 440 /* True if thread is stopped on an event of interest */ 441 #define ISTOPPED(t) ((t)->t_state == TS_STOPPED && \ 442 !((t)->t_schedflag & TS_PSTART)) 443 444 /* True if thread is asleep and wakeable */ 445 #define ISWAKEABLE(t) (((t)->t_state == TS_SLEEP && \ 446 ((t)->t_flag & T_WAKEABLE))) 447 448 /* True if thread is on the wait queue */ 449 #define ISWAITING(t) ((t)->t_state == TS_WAIT) 450 451 /* similar to ISTOPPED except the event of interest is CPR */ 452 #define CPR_ISTOPPED(t) ((t)->t_state == TS_STOPPED && \ 453 !((t)->t_schedflag & TS_RESUME)) 454 455 /* 456 * True if thread is virtually stopped (is or was asleep in 457 * one of the lwp_*() system calls and marked to stop by /proc.) 458 */ 459 #define VSTOPPED(t) ((t)->t_proc_flag & TP_PRVSTOP) 460 461 /* similar to VSTOPPED except the point of interest is CPR */ 462 #define CPR_VSTOPPED(t) \ 463 ((t)->t_state == TS_SLEEP && \ 464 (t)->t_wchan0 != NULL && \ 465 ((t)->t_flag & T_WAKEABLE) && \ 466 ((t)->t_proc_flag & TP_CHKPT)) 467 468 /* True if thread has been stopped by hold*() or was created stopped */ 469 #define SUSPENDED(t) ((t)->t_state == TS_STOPPED && \ 470 ((t)->t_schedflag & (TS_CSTART|TS_UNPAUSE)) != (TS_CSTART|TS_UNPAUSE)) 471 472 /* True if thread possesses an inherited priority */ 473 #define INHERITED(t) ((t)->t_epri != 0) 474 475 /* The dispatch priority of a thread */ 476 #define DISP_PRIO(t) ((t)->t_epri > (t)->t_pri ? (t)->t_epri : (t)->t_pri) 477 478 /* The assigned priority of a thread */ 479 #define ASSIGNED_PRIO(t) ((t)->t_pri) 480 481 /* 482 * Macros to determine whether a thread can be swapped. 483 * If t_lock is held, the thread is either on a processor or being swapped. 484 */ 485 #define SWAP_OK(t) (!LOCK_HELD(&(t)->t_lock)) 486 487 /* 488 * proctot(x) 489 * convert a proc pointer to a thread pointer. this only works with 490 * procs that have only one lwp. 491 * 492 * proctolwp(x) 493 * convert a proc pointer to a lwp pointer. this only works with 494 * procs that have only one lwp. 495 * 496 * ttolwp(x) 497 * convert a thread pointer to its lwp pointer. 498 * 499 * ttoproc(x) 500 * convert a thread pointer to its proc pointer. 501 * 502 * ttoproj(x) 503 * convert a thread pointer to its project pointer. 504 * 505 * ttozone(x) 506 * convert a thread pointer to its zone pointer. 507 * 508 * lwptot(x) 509 * convert a lwp pointer to its thread pointer. 510 * 511 * lwptoproc(x) 512 * convert a lwp to its proc pointer. 513 */ 514 #define proctot(x) ((x)->p_tlist) 515 #define proctolwp(x) ((x)->p_tlist->t_lwp) 516 #define ttolwp(x) ((x)->t_lwp) 517 #define ttoproc(x) ((x)->t_procp) 518 #define ttoproj(x) ((x)->t_proj) 519 #define ttozone(x) ((x)->t_procp->p_zone) 520 #define lwptot(x) ((x)->lwp_thread) 521 #define lwptoproc(x) ((x)->lwp_procp) 522 523 #define t_pc t_pcb.val[0] 524 #define t_sp t_pcb.val[1] 525 526 #ifdef _KERNEL 527 528 extern kthread_t *threadp(void); /* inline, returns thread pointer */ 529 #define curthread (threadp()) /* current thread pointer */ 530 #define curproc (ttoproc(curthread)) /* current process pointer */ 531 #define curproj (ttoproj(curthread)) /* current project pointer */ 532 #define curzone (curproc->p_zone) /* current zone pointer */ 533 534 extern struct _kthread t0; /* the scheduler thread */ 535 extern kmutex_t pidlock; /* global process lock */ 536 537 /* 538 * thread_free_lock is used by the tick accounting thread to keep a thread 539 * from being freed while it is being examined. 540 * 541 * Thread structures are 32-byte aligned structures. That is why we use the 542 * following formula. 543 */ 544 #define THREAD_FREE_BITS 10 545 #define THREAD_FREE_NUM (1 << THREAD_FREE_BITS) 546 #define THREAD_FREE_MASK (THREAD_FREE_NUM - 1) 547 #define THREAD_FREE_1 PTR24_LSB 548 #define THREAD_FREE_2 (PTR24_LSB + THREAD_FREE_BITS) 549 #define THREAD_FREE_SHIFT(t) \ 550 (((ulong_t)(t) >> THREAD_FREE_1) ^ ((ulong_t)(t) >> THREAD_FREE_2)) 551 #define THREAD_FREE_HASH(t) (THREAD_FREE_SHIFT(t) & THREAD_FREE_MASK) 552 553 typedef struct thread_free_lock { 554 kmutex_t tf_lock; 555 uchar_t tf_pad[64 - sizeof (kmutex_t)]; 556 } thread_free_lock_t; 557 558 extern void thread_free_prevent(kthread_t *); 559 extern void thread_free_allow(kthread_t *); 560 561 /* 562 * Routines to change the priority and effective priority 563 * of a thread-locked thread, whatever its state. 564 */ 565 extern int thread_change_pri(kthread_t *t, pri_t disp_pri, int front); 566 extern void thread_change_epri(kthread_t *t, pri_t disp_pri); 567 568 /* 569 * Routines that manipulate the dispatcher lock for the thread. 570 * The locking heirarchy is as follows: 571 * cpu_lock > sleepq locks > run queue locks 572 */ 573 void thread_transition(kthread_t *); /* move to transition lock */ 574 void thread_stop(kthread_t *); /* move to stop lock */ 575 void thread_lock(kthread_t *); /* lock thread and its queue */ 576 void thread_lock_high(kthread_t *); /* lock thread and its queue */ 577 void thread_onproc(kthread_t *, struct cpu *); /* set onproc state lock */ 578 579 #define thread_unlock(t) disp_lock_exit((t)->t_lockp) 580 #define thread_unlock_high(t) disp_lock_exit_high((t)->t_lockp) 581 #define thread_unlock_nopreempt(t) disp_lock_exit_nopreempt((t)->t_lockp) 582 583 #define THREAD_LOCK_HELD(t) (DISP_LOCK_HELD((t)->t_lockp)) 584 585 extern disp_lock_t transition_lock; /* lock protecting transiting threads */ 586 extern disp_lock_t stop_lock; /* lock protecting stopped threads */ 587 588 caddr_t thread_stk_init(caddr_t); /* init thread stack */ 589 590 extern int default_binding_mode; 591 592 #endif /* _KERNEL */ 593 594 /* 595 * Macro to change a thread's priority. 596 */ 597 #define THREAD_CHANGE_PRI(t, pri) { \ 598 pri_t __new_pri = (pri); \ 599 DTRACE_SCHED2(change__pri, kthread_t *, (t), pri_t, __new_pri); \ 600 (t)->t_pri = __new_pri; \ 601 schedctl_set_cidpri(t); \ 602 } 603 604 /* 605 * Macro to indicate that a thread's priority is about to be changed. 606 */ 607 #define THREAD_WILLCHANGE_PRI(t, pri) { \ 608 DTRACE_SCHED2(change__pri, kthread_t *, (t), pri_t, (pri)); \ 609 } 610 611 /* 612 * Macros to change thread state and the associated lock. 613 */ 614 #define THREAD_SET_STATE(tp, state, lp) \ 615 ((tp)->t_state = state, (tp)->t_lockp = lp) 616 617 /* 618 * Point it at the transition lock, which is always held. 619 * The previosly held lock is dropped. 620 */ 621 #define THREAD_TRANSITION(tp) thread_transition(tp); 622 /* 623 * Set the thread's lock to be the transition lock, without dropping 624 * previosly held lock. 625 */ 626 #define THREAD_TRANSITION_NOLOCK(tp) ((tp)->t_lockp = &transition_lock) 627 628 /* 629 * Put thread in run state, and set the lock pointer to the dispatcher queue 630 * lock pointer provided. This lock should be held. 631 */ 632 #define THREAD_RUN(tp, lp) THREAD_SET_STATE(tp, TS_RUN, lp) 633 634 /* 635 * Put thread in wait state, and set the lock pointer to the wait queue 636 * lock pointer provided. This lock should be held. 637 */ 638 #define THREAD_WAIT(tp, lp) THREAD_SET_STATE(tp, TS_WAIT, lp) 639 640 /* 641 * Put thread in run state, and set the lock pointer to the dispatcher queue 642 * lock pointer provided (i.e., the "swapped_lock"). This lock should be held. 643 */ 644 #define THREAD_SWAP(tp, lp) THREAD_SET_STATE(tp, TS_RUN, lp) 645 646 /* 647 * Put the thread in zombie state and set the lock pointer to NULL. 648 * The NULL will catch anything that tries to lock a zombie. 649 */ 650 #define THREAD_ZOMB(tp) THREAD_SET_STATE(tp, TS_ZOMB, NULL) 651 652 /* 653 * Set the thread into ONPROC state, and point the lock at the CPUs 654 * lock for the onproc thread(s). This lock should be held, so the 655 * thread deoes not become unlocked, since these stores can be reordered. 656 */ 657 #define THREAD_ONPROC(tp, cpu) \ 658 THREAD_SET_STATE(tp, TS_ONPROC, &(cpu)->cpu_thread_lock) 659 660 /* 661 * Set the thread into the TS_SLEEP state, and set the lock pointer to 662 * to some sleep queue's lock. The new lock should already be held. 663 */ 664 #define THREAD_SLEEP(tp, lp) { \ 665 disp_lock_t *tlp; \ 666 tlp = (tp)->t_lockp; \ 667 THREAD_SET_STATE(tp, TS_SLEEP, lp); \ 668 disp_lock_exit_high(tlp); \ 669 } 670 671 /* 672 * Interrupt threads are created in TS_FREE state, and their lock 673 * points at the associated CPU's lock. 674 */ 675 #define THREAD_FREEINTR(tp, cpu) \ 676 THREAD_SET_STATE(tp, TS_FREE, &(cpu)->cpu_thread_lock) 677 678 /* if tunable kmem_stackinfo is set, fill kthread stack with a pattern */ 679 #define KMEM_STKINFO_PATTERN 0xbadcbadcbadcbadcULL 680 681 /* 682 * If tunable kmem_stackinfo is set, log the latest KMEM_LOG_STK_USAGE_SIZE 683 * dead kthreads that used their kernel stack the most. 684 */ 685 #define KMEM_STKINFO_LOG_SIZE 16 686 687 /* kthread name (cmd/lwpid) string size in the stackinfo log */ 688 #define KMEM_STKINFO_STR_SIZE 64 689 690 /* 691 * stackinfo logged data. 692 */ 693 typedef struct kmem_stkinfo { 694 caddr_t kthread; /* kthread pointer */ 695 caddr_t t_startpc; /* where kthread started */ 696 caddr_t start; /* kthread stack start address */ 697 size_t stksz; /* kthread stack size */ 698 size_t percent; /* kthread stack high water mark */ 699 id_t t_tid; /* kthread id */ 700 char cmd[KMEM_STKINFO_STR_SIZE]; /* kthread name (cmd/lwpid) */ 701 } kmem_stkinfo_t; 702 703 #ifdef __cplusplus 704 } 705 #endif 706 707 #endif /* _SYS_THREAD_H */ 708