1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #ifndef _SYS_THREAD_H 28 #define _SYS_THREAD_H 29 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 #include <sys/types.h> 33 #include <sys/t_lock.h> 34 #include <sys/klwp.h> 35 #include <sys/time.h> 36 #include <sys/signal.h> 37 #include <sys/kcpc.h> 38 #if defined(__GNUC__) && defined(_ASM_INLINES) && defined(_KERNEL) 39 #include <asm/thread.h> 40 #endif 41 42 #ifdef __cplusplus 43 extern "C" { 44 #endif 45 46 /* 47 * The thread object, its states, and the methods by which it 48 * is accessed. 49 */ 50 51 /* 52 * Values that t_state may assume. Note that t_state cannot have more 53 * than one of these flags set at a time. 54 */ 55 #define TS_FREE 0x00 /* Thread at loose ends */ 56 #define TS_SLEEP 0x01 /* Awaiting an event */ 57 #define TS_RUN 0x02 /* Runnable, but not yet on a processor */ 58 #define TS_ONPROC 0x04 /* Thread is being run on a processor */ 59 #define TS_ZOMB 0x08 /* Thread has died but hasn't been reaped */ 60 #define TS_STOPPED 0x10 /* Stopped, initial state */ 61 #define TS_WAIT 0x20 /* Waiting to become runnable */ 62 63 typedef struct ctxop { 64 void (*save_op)(void *); /* function to invoke to save context */ 65 void (*restore_op)(void *); /* function to invoke to restore ctx */ 66 void (*fork_op)(void *, void *); /* invoke to fork context */ 67 void (*lwp_create_op)(void *, void *); /* lwp_create context */ 68 void (*exit_op)(void *); /* invoked during {thread,lwp}_exit() */ 69 void (*free_op)(void *, int); /* function which frees the context */ 70 void *arg; /* argument to above functions, ctx pointer */ 71 struct ctxop *next; /* next context ops */ 72 } ctxop_t; 73 74 /* 75 * The active file descriptor table. 76 * Each member of a_fd[] not equalling -1 represents an active fd. 77 * The structure is initialized on first use; all zeros means uninitialized. 78 */ 79 typedef struct _afd { 80 int *a_fd; /* pointer to list of fds */ 81 short a_nfd; /* number of entries in *a_fd */ 82 short a_stale; /* one of the active fds is being closed */ 83 int a_buf[1]; /* buffer to which a_fd initially refers */ 84 } afd_t; 85 86 /* 87 * An lwpchan provides uniqueness when sleeping on user-level 88 * synchronization primitives. The lc_wchan member is used 89 * for sleeping on kernel synchronization primitives. 90 */ 91 typedef struct { 92 caddr_t lc_wchan0; 93 caddr_t lc_wchan; 94 } lwpchan_t; 95 96 typedef struct _kthread *kthread_id_t; 97 98 struct turnstile; 99 struct panic_trap_info; 100 struct upimutex; 101 struct kproject; 102 struct on_trap_data; 103 struct waitq; 104 struct _kcpc_ctx; 105 struct _kcpc_set; 106 107 /* Definition for kernel thread identifier type */ 108 typedef uint64_t kt_did_t; 109 110 typedef struct _kthread { 111 struct _kthread *t_link; /* dispq, sleepq, and free queue link */ 112 113 caddr_t t_stk; /* base of stack (kernel sp value to use) */ 114 void (*t_startpc)(void); /* PC where thread started */ 115 struct cpu *t_bound_cpu; /* cpu bound to, or NULL if not bound */ 116 short t_affinitycnt; /* nesting level of kernel affinity-setting */ 117 short t_bind_cpu; /* user-specified CPU binding (-1 if none) */ 118 ushort_t t_flag; /* modified only by current thread */ 119 ushort_t t_proc_flag; /* modified holding ttproc(t)->p_lock */ 120 ushort_t t_schedflag; /* modified holding thread_lock(t) */ 121 volatile char t_preempt; /* don't preempt thread if set */ 122 volatile char t_preempt_lk; 123 uint_t t_state; /* thread state (protected by thread_lock) */ 124 pri_t t_pri; /* assigned thread priority */ 125 pri_t t_epri; /* inherited thread priority */ 126 pri_t t_cpri; /* thread scheduling class priority */ 127 char t_writer; /* sleeping in lwp_rwlock_lock(RW_WRITE_LOCK) */ 128 label_t t_pcb; /* pcb, save area when switching */ 129 lwpchan_t t_lwpchan; /* reason for blocking */ 130 #define t_wchan0 t_lwpchan.lc_wchan0 131 #define t_wchan t_lwpchan.lc_wchan 132 struct _sobj_ops *t_sobj_ops; 133 id_t t_cid; /* scheduling class id */ 134 struct thread_ops *t_clfuncs; /* scheduling class ops vector */ 135 void *t_cldata; /* per scheduling class specific data */ 136 ctxop_t *t_ctx; /* thread context */ 137 uintptr_t t_lofault; /* ret pc for failed page faults */ 138 label_t *t_onfault; /* on_fault() setjmp buf */ 139 struct on_trap_data *t_ontrap; /* on_trap() protection data */ 140 caddr_t t_swap; /* swappable thread storage */ 141 lock_t t_lock; /* used to resume() a thread */ 142 uint8_t t_lockstat; /* set while thread is in lockstat code */ 143 uint8_t t_pil; /* interrupt thread PIL */ 144 disp_lock_t t_pi_lock; /* lock protecting t_prioinv list */ 145 char t_nomigrate; /* do not migrate if set */ 146 struct cpu *t_cpu; /* CPU that thread last ran on */ 147 struct cpu *t_weakbound_cpu; /* cpu weakly bound to */ 148 struct lgrp_ld *t_lpl; /* load average for home lgroup */ 149 void *t_lgrp_reserv[2]; /* reserved for future */ 150 struct _kthread *t_intr; /* interrupted (pinned) thread */ 151 uint64_t t_intr_start; /* timestamp when time slice began */ 152 kt_did_t t_did; /* thread id for kernel debuggers */ 153 caddr_t t_tnf_tpdp; /* Trace facility data pointer */ 154 struct _kcpc_ctx *t_cpc_ctx; /* performance counter context */ 155 struct _kcpc_set *t_cpc_set; /* set this thread has bound */ 156 157 /* 158 * non swappable part of the lwp state. 159 */ 160 id_t t_tid; /* lwp's id */ 161 id_t t_waitfor; /* target lwp id in lwp_wait() */ 162 struct sigqueue *t_sigqueue; /* queue of siginfo structs */ 163 k_sigset_t t_sig; /* signals pending to this process */ 164 k_sigset_t t_extsig; /* signals sent from another contract */ 165 k_sigset_t t_hold; /* hold signal bit mask */ 166 struct _kthread *t_forw; /* process's forward thread link */ 167 struct _kthread *t_back; /* process's backward thread link */ 168 struct _kthread *t_thlink; /* tid (lwpid) lookup hash link */ 169 klwp_t *t_lwp; /* thread's lwp pointer */ 170 struct proc *t_procp; /* proc pointer */ 171 struct t_audit_data *t_audit_data; /* per thread audit data */ 172 struct _kthread *t_next; /* doubly linked list of all threads */ 173 struct _kthread *t_prev; 174 ushort_t t_whystop; /* reason for stopping */ 175 ushort_t t_whatstop; /* more detailed reason */ 176 int t_dslot; /* index in proc's thread directory */ 177 struct pollstate *t_pollstate; /* state used during poll(2) */ 178 struct pollcache *t_pollcache; /* to pass a pcache ptr by /dev/poll */ 179 struct cred *t_cred; /* pointer to current cred */ 180 time_t t_start; /* start time, seconds since epoch */ 181 clock_t t_lbolt; /* lbolt at last clock_tick() */ 182 hrtime_t t_stoptime; /* timestamp at stop() */ 183 uint_t t_pctcpu; /* %cpu at last clock_tick(), binary */ 184 /* point at right of high-order bit */ 185 short t_sysnum; /* system call number */ 186 kcondvar_t t_delay_cv; 187 kmutex_t t_delay_lock; 188 189 /* 190 * Pointer to the dispatcher lock protecting t_state and state-related 191 * flags. This pointer can change during waits on the lock, so 192 * it should be grabbed only by thread_lock(). 193 */ 194 disp_lock_t *t_lockp; /* pointer to the dispatcher lock */ 195 ushort_t t_oldspl; /* spl level before dispatcher locked */ 196 volatile char t_pre_sys; /* pre-syscall work needed */ 197 lock_t t_lock_flush; /* for lock_mutex_flush() impl */ 198 struct _disp *t_disp_queue; /* run queue for chosen CPU */ 199 clock_t t_disp_time; /* last time this thread was running */ 200 uint_t t_kpri_req; /* kernel priority required */ 201 202 /* 203 * Post-syscall / post-trap flags. 204 * No lock is required to set these. 205 * These must be cleared only by the thread itself. 206 * 207 * t_astflag indicates that some post-trap processing is required, 208 * possibly a signal or a preemption. The thread will not 209 * return to user with this set. 210 * t_post_sys indicates that some unusualy post-system call 211 * handling is required, such as an error or tracing. 212 * t_sig_check indicates that some condition in ISSIG() must be 213 * checked, but doesn't prevent returning to user. 214 * t_post_sys_ast is a way of checking whether any of these three 215 * flags are set. 216 */ 217 union __tu { 218 struct __ts { 219 volatile char _t_astflag; /* AST requested */ 220 volatile char _t_sig_check; /* ISSIG required */ 221 volatile char _t_post_sys; /* post_syscall req */ 222 volatile char _t_trapret; /* call CL_TRAPRET */ 223 } _ts; 224 volatile int _t_post_sys_ast; /* OR of these flags */ 225 } _tu; 226 #define t_astflag _tu._ts._t_astflag 227 #define t_sig_check _tu._ts._t_sig_check 228 #define t_post_sys _tu._ts._t_post_sys 229 #define t_trapret _tu._ts._t_trapret 230 #define t_post_sys_ast _tu._t_post_sys_ast 231 232 /* 233 * Real time microstate profiling. 234 */ 235 /* possible 4-byte filler */ 236 hrtime_t t_waitrq; /* timestamp for run queue wait time */ 237 int t_mstate; /* current microstate */ 238 struct rprof { 239 int rp_anystate; /* set if any state non-zero */ 240 uint_t rp_state[NMSTATES]; /* mstate profiling counts */ 241 } *t_rprof; 242 243 /* 244 * There is a turnstile inserted into the list below for 245 * every priority inverted synchronization object that 246 * this thread holds. 247 */ 248 249 struct turnstile *t_prioinv; 250 251 /* 252 * Pointer to the turnstile attached to the synchronization 253 * object where this thread is blocked. 254 */ 255 256 struct turnstile *t_ts; 257 258 /* 259 * kernel thread specific data 260 * Borrowed from userland implementation of POSIX tsd 261 */ 262 struct tsd_thread { 263 struct tsd_thread *ts_next; /* threads with TSD */ 264 struct tsd_thread *ts_prev; /* threads with TSD */ 265 uint_t ts_nkeys; /* entries in value array */ 266 void **ts_value; /* array of value/key */ 267 } *t_tsd; 268 269 clock_t t_stime; /* time stamp used by the swapper */ 270 struct door_data *t_door; /* door invocation data */ 271 kmutex_t *t_plockp; /* pointer to process's p_lock */ 272 273 struct sc_shared *t_schedctl; /* scheduler activations shared data */ 274 uintptr_t t_sc_uaddr; /* user-level address of shared data */ 275 276 struct cpupart *t_cpupart; /* partition containing thread */ 277 int t_bind_pset; /* processor set binding */ 278 279 struct copyops *t_copyops; /* copy in/out ops vector */ 280 281 caddr_t t_stkbase; /* base of the the stack */ 282 struct page *t_red_pp; /* if non-NULL, redzone is mapped */ 283 284 struct _afd t_activefd; /* active file descriptor table */ 285 286 struct _kthread *t_priforw; /* sleepq per-priority sublist */ 287 struct _kthread *t_priback; 288 289 struct sleepq *t_sleepq; /* sleep queue thread is waiting on */ 290 struct panic_trap_info *t_panic_trap; /* saved data from fatal trap */ 291 int *t_lgrp_affinity; /* lgroup affinity */ 292 struct upimutex *t_upimutex; /* list of upimutexes owned by thread */ 293 uint32_t t_nupinest; /* number of nested held upi mutexes */ 294 struct kproject *t_proj; /* project containing this thread */ 295 uint8_t t_unpark; /* modified holding t_delay_lock */ 296 uint8_t t_release; /* lwp_release() waked up the thread */ 297 uint8_t t_hatdepth; /* depth of recursive hat_memloads */ 298 uint8_t t_xpvcntr; /* see xen_block_migrate() */ 299 kcondvar_t t_joincv; /* cv used to wait for thread exit */ 300 void *t_taskq; /* for threads belonging to taskq */ 301 hrtime_t t_anttime; /* most recent time anticipatory load */ 302 /* was added to an lgroup's load */ 303 /* on this thread's behalf */ 304 char *t_pdmsg; /* privilege debugging message */ 305 306 uint_t t_predcache; /* DTrace predicate cache */ 307 hrtime_t t_dtrace_vtime; /* DTrace virtual time */ 308 hrtime_t t_dtrace_start; /* DTrace slice start time */ 309 310 uint8_t t_dtrace_stop; /* indicates a DTrace-desired stop */ 311 uint8_t t_dtrace_sig; /* signal sent via DTrace's raise() */ 312 313 union __tdu { 314 struct __tds { 315 uint8_t _t_dtrace_on; /* hit a fasttrap tracepoint */ 316 uint8_t _t_dtrace_step; /* about to return to kernel */ 317 uint8_t _t_dtrace_ret; /* handling a return probe */ 318 uint8_t _t_dtrace_ast; /* saved ast flag */ 319 #ifdef __amd64 320 uint8_t _t_dtrace_reg; /* modified register */ 321 #endif 322 } _tds; 323 ulong_t _t_dtrace_ft; /* bitwise or of these flags */ 324 } _tdu; 325 #define t_dtrace_ft _tdu._t_dtrace_ft 326 #define t_dtrace_on _tdu._tds._t_dtrace_on 327 #define t_dtrace_step _tdu._tds._t_dtrace_step 328 #define t_dtrace_ret _tdu._tds._t_dtrace_ret 329 #define t_dtrace_ast _tdu._tds._t_dtrace_ast 330 #ifdef __amd64 331 #define t_dtrace_reg _tdu._tds._t_dtrace_reg 332 #endif 333 334 uintptr_t t_dtrace_pc; /* DTrace saved pc from fasttrap */ 335 uintptr_t t_dtrace_npc; /* DTrace next pc from fasttrap */ 336 uintptr_t t_dtrace_scrpc; /* DTrace per-thread scratch location */ 337 uintptr_t t_dtrace_astpc; /* DTrace return sequence location */ 338 #ifdef __amd64 339 uint64_t t_dtrace_regv; /* DTrace saved reg from fasttrap */ 340 #endif 341 hrtime_t t_hrtime; /* high-res last time on cpu */ 342 kmutex_t t_ctx_lock; /* protects t_ctx in removectx() */ 343 struct waitq *t_waitq; /* wait queue */ 344 } kthread_t; 345 346 /* 347 * Thread flag (t_flag) definitions. 348 * These flags must be changed only for the current thread, 349 * and not during preemption code, since the code being 350 * preempted could be modifying the flags. 351 * 352 * For the most part these flags do not need locking. 353 * The following flags will only be changed while the thread_lock is held, 354 * to give assurrance that they are consistent with t_state: 355 * T_WAKEABLE 356 */ 357 #define T_INTR_THREAD 0x0001 /* thread is an interrupt thread */ 358 #define T_WAKEABLE 0x0002 /* thread is blocked, signals enabled */ 359 #define T_TOMASK 0x0004 /* use lwp_sigoldmask on return from signal */ 360 #define T_TALLOCSTK 0x0008 /* thread structure allocated from stk */ 361 #define T_FORKALL 0x0010 /* thread was cloned by forkall() */ 362 #define T_WOULDBLOCK 0x0020 /* for lockfs */ 363 #define T_DONTBLOCK 0x0040 /* for lockfs */ 364 #define T_DONTPEND 0x0080 /* for lockfs */ 365 #define T_SYS_PROF 0x0100 /* profiling on for duration of system call */ 366 #define T_WAITCVSEM 0x0200 /* waiting for a lwp_cv or lwp_sema on sleepq */ 367 #define T_WATCHPT 0x0400 /* thread undergoing a watchpoint emulation */ 368 #define T_PANIC 0x0800 /* thread initiated a system panic */ 369 #define T_DFLTSTK 0x1000 /* stack is default size */ 370 #define T_CAPTURING 0x2000 /* thread is in page capture logic */ 371 #define T_VFPARENT 0x4000 /* thread is vfork parent, must call vfwait */ 372 #define T_DONTDTRACE 0x8000 /* disable DTrace probes */ 373 374 /* 375 * Flags in t_proc_flag. 376 * These flags must be modified only when holding the p_lock 377 * for the associated process. 378 */ 379 #define TP_DAEMON 0x0001 /* this is an LWP_DAEMON lwp */ 380 #define TP_HOLDLWP 0x0002 /* hold thread's lwp */ 381 #define TP_TWAIT 0x0004 /* wait to be freed by lwp_wait() */ 382 #define TP_LWPEXIT 0x0008 /* lwp has exited */ 383 #define TP_PRSTOP 0x0010 /* thread is being stopped via /proc */ 384 #define TP_CHKPT 0x0020 /* thread is being stopped via CPR checkpoint */ 385 #define TP_EXITLWP 0x0040 /* terminate this lwp */ 386 #define TP_PRVSTOP 0x0080 /* thread is virtually stopped via /proc */ 387 #define TP_MSACCT 0x0100 /* collect micro-state accounting information */ 388 #define TP_STOPPING 0x0200 /* thread is executing stop() */ 389 #define TP_WATCHPT 0x0400 /* process has watchpoints in effect */ 390 #define TP_PAUSE 0x0800 /* process is being stopped via pauselwps() */ 391 #define TP_CHANGEBIND 0x1000 /* thread has a new cpu/cpupart binding */ 392 #define TP_ZTHREAD 0x2000 /* this is a kernel thread for a zone */ 393 #define TP_WATCHSTOP 0x4000 /* thread is stopping via holdwatch() */ 394 395 /* 396 * Thread scheduler flag (t_schedflag) definitions. 397 * The thread must be locked via thread_lock() or equiv. to change these. 398 */ 399 #define TS_LOAD 0x0001 /* thread is in memory */ 400 #define TS_DONT_SWAP 0x0002 /* thread/lwp should not be swapped */ 401 #define TS_SWAPENQ 0x0004 /* swap thread when it reaches a safe point */ 402 #define TS_ON_SWAPQ 0x0008 /* thread is on the swap queue */ 403 #define TS_SIGNALLED 0x0010 /* thread was awakened by cv_signal() */ 404 #define TS_PROJWAITQ 0x0020 /* thread is on its project's waitq */ 405 #define TS_ZONEWAITQ 0x0040 /* thread is on its zone's waitq */ 406 #define TS_CSTART 0x0100 /* setrun() by continuelwps() */ 407 #define TS_UNPAUSE 0x0200 /* setrun() by unpauselwps() */ 408 #define TS_XSTART 0x0400 /* setrun() by SIGCONT */ 409 #define TS_PSTART 0x0800 /* setrun() by /proc */ 410 #define TS_RESUME 0x1000 /* setrun() by CPR resume process */ 411 #define TS_CREATE 0x2000 /* setrun() by syslwp_create() */ 412 #define TS_RUNQMATCH 0x4000 /* exact run queue balancing by setbackdq() */ 413 #define TS_ALLSTART \ 414 (TS_CSTART|TS_UNPAUSE|TS_XSTART|TS_PSTART|TS_RESUME|TS_CREATE) 415 #define TS_ANYWAITQ (TS_PROJWAITQ|TS_ZONEWAITQ) 416 417 /* 418 * No locking needed for AST field. 419 */ 420 #define aston(t) ((t)->t_astflag = 1) 421 #define astoff(t) ((t)->t_astflag = 0) 422 423 /* True if thread is stopped on an event of interest */ 424 #define ISTOPPED(t) ((t)->t_state == TS_STOPPED && \ 425 !((t)->t_schedflag & TS_PSTART)) 426 427 /* True if thread is asleep and wakeable */ 428 #define ISWAKEABLE(t) (((t)->t_state == TS_SLEEP && \ 429 ((t)->t_flag & T_WAKEABLE))) 430 431 /* True if thread is on the wait queue */ 432 #define ISWAITING(t) ((t)->t_state == TS_WAIT) 433 434 /* similar to ISTOPPED except the event of interest is CPR */ 435 #define CPR_ISTOPPED(t) ((t)->t_state == TS_STOPPED && \ 436 !((t)->t_schedflag & TS_RESUME)) 437 438 /* 439 * True if thread is virtually stopped (is or was asleep in 440 * one of the lwp_*() system calls and marked to stop by /proc.) 441 */ 442 #define VSTOPPED(t) ((t)->t_proc_flag & TP_PRVSTOP) 443 444 /* similar to VSTOPPED except the point of interest is CPR */ 445 #define CPR_VSTOPPED(t) \ 446 ((t)->t_state == TS_SLEEP && \ 447 (t)->t_wchan0 != NULL && \ 448 ((t)->t_flag & T_WAKEABLE) && \ 449 ((t)->t_proc_flag & TP_CHKPT)) 450 451 /* True if thread has been stopped by hold*() or was created stopped */ 452 #define SUSPENDED(t) ((t)->t_state == TS_STOPPED && \ 453 ((t)->t_schedflag & (TS_CSTART|TS_UNPAUSE)) != (TS_CSTART|TS_UNPAUSE)) 454 455 /* True if thread possesses an inherited priority */ 456 #define INHERITED(t) ((t)->t_epri != 0) 457 458 /* The dispatch priority of a thread */ 459 #define DISP_PRIO(t) ((t)->t_epri > (t)->t_pri ? (t)->t_epri : (t)->t_pri) 460 461 /* The assigned priority of a thread */ 462 #define ASSIGNED_PRIO(t) ((t)->t_pri) 463 464 /* 465 * Macros to determine whether a thread can be swapped. 466 * If t_lock is held, the thread is either on a processor or being swapped. 467 */ 468 #define SWAP_OK(t) (!LOCK_HELD(&(t)->t_lock)) 469 470 /* 471 * proctot(x) 472 * convert a proc pointer to a thread pointer. this only works with 473 * procs that have only one lwp. 474 * 475 * proctolwp(x) 476 * convert a proc pointer to a lwp pointer. this only works with 477 * procs that have only one lwp. 478 * 479 * ttolwp(x) 480 * convert a thread pointer to its lwp pointer. 481 * 482 * ttoproc(x) 483 * convert a thread pointer to its proc pointer. 484 * 485 * ttoproj(x) 486 * convert a thread pointer to its project pointer. 487 * 488 * ttozone(x) 489 * convert a thread pointer to its zone pointer. 490 * 491 * lwptot(x) 492 * convert a lwp pointer to its thread pointer. 493 * 494 * lwptoproc(x) 495 * convert a lwp to its proc pointer. 496 */ 497 #define proctot(x) ((x)->p_tlist) 498 #define proctolwp(x) ((x)->p_tlist->t_lwp) 499 #define ttolwp(x) ((x)->t_lwp) 500 #define ttoproc(x) ((x)->t_procp) 501 #define ttoproj(x) ((x)->t_proj) 502 #define ttozone(x) ((x)->t_procp->p_zone) 503 #define lwptot(x) ((x)->lwp_thread) 504 #define lwptoproc(x) ((x)->lwp_procp) 505 506 #define t_pc t_pcb.val[0] 507 #define t_sp t_pcb.val[1] 508 509 #ifdef _KERNEL 510 511 extern kthread_t *threadp(void); /* inline, returns thread pointer */ 512 #define curthread (threadp()) /* current thread pointer */ 513 #define curproc (ttoproc(curthread)) /* current process pointer */ 514 #define curproj (ttoproj(curthread)) /* current project pointer */ 515 #define curzone (curproc->p_zone) /* current zone pointer */ 516 517 extern struct _kthread t0; /* the scheduler thread */ 518 extern kmutex_t pidlock; /* global process lock */ 519 520 /* 521 * thread_free_lock is used by the tick accounting thread to keep a thread 522 * from being freed while it is being examined. 523 */ 524 #define THREAD_FREE_NUM 1024 525 #define THREAD_FREE_MASK (THREAD_FREE_NUM - 1) 526 #define THREAD_FREE_SHIFT_BITS 5 527 #define THREAD_FREE_SHIFT(t) ((uintptr_t)t >> THREAD_FREE_SHIFT_BITS) 528 #define THREAD_FREE_HASH(t) (THREAD_FREE_SHIFT(t) & THREAD_FREE_MASK) 529 530 typedef struct thread_free_lock { 531 kmutex_t tf_lock; 532 uchar_t tf_pad[64 - sizeof (kmutex_t)]; 533 } thread_free_lock_t; 534 535 extern void thread_free_prevent(kthread_t *); 536 extern void thread_free_allow(kthread_t *); 537 538 /* 539 * Routines to change the priority and effective priority 540 * of a thread-locked thread, whatever its state. 541 */ 542 extern int thread_change_pri(kthread_t *t, pri_t disp_pri, int front); 543 extern void thread_change_epri(kthread_t *t, pri_t disp_pri); 544 545 /* 546 * Routines that manipulate the dispatcher lock for the thread. 547 * The locking heirarchy is as follows: 548 * cpu_lock > sleepq locks > run queue locks 549 */ 550 void thread_transition(kthread_t *); /* move to transition lock */ 551 void thread_stop(kthread_t *); /* move to stop lock */ 552 void thread_lock(kthread_t *); /* lock thread and its queue */ 553 void thread_lock_high(kthread_t *); /* lock thread and its queue */ 554 void thread_onproc(kthread_t *, struct cpu *); /* set onproc state lock */ 555 556 #define thread_unlock(t) disp_lock_exit((t)->t_lockp) 557 #define thread_unlock_high(t) disp_lock_exit_high((t)->t_lockp) 558 #define thread_unlock_nopreempt(t) disp_lock_exit_nopreempt((t)->t_lockp) 559 560 #define THREAD_LOCK_HELD(t) (DISP_LOCK_HELD((t)->t_lockp)) 561 562 extern disp_lock_t transition_lock; /* lock protecting transiting threads */ 563 extern disp_lock_t stop_lock; /* lock protecting stopped threads */ 564 565 caddr_t thread_stk_init(caddr_t); /* init thread stack */ 566 567 #endif /* _KERNEL */ 568 569 /* 570 * Macros to indicate that the thread holds resources that could be critical 571 * to other kernel threads, so this thread needs to have kernel priority 572 * if it blocks or is preempted. Note that this is not necessary if the 573 * resource is a mutex or a writer lock because of priority inheritance. 574 * 575 * The only way one thread may legally manipulate another thread's t_kpri_req 576 * is to hold the target thread's thread lock while that thread is asleep. 577 * (The rwlock code does this to implement direct handoff to waiting readers.) 578 */ 579 #define THREAD_KPRI_REQUEST() (curthread->t_kpri_req++) 580 #define THREAD_KPRI_RELEASE() (curthread->t_kpri_req--) 581 #define THREAD_KPRI_RELEASE_N(n) (curthread->t_kpri_req -= (n)) 582 583 /* 584 * Macro to change a thread's priority. 585 */ 586 #define THREAD_CHANGE_PRI(t, pri) { \ 587 pri_t __new_pri = (pri); \ 588 DTRACE_SCHED2(change__pri, kthread_t *, (t), pri_t, __new_pri); \ 589 (t)->t_pri = __new_pri; \ 590 schedctl_set_cidpri(t); \ 591 } 592 593 /* 594 * Macro to indicate that a thread's priority is about to be changed. 595 */ 596 #define THREAD_WILLCHANGE_PRI(t, pri) { \ 597 DTRACE_SCHED2(change__pri, kthread_t *, (t), pri_t, (pri)); \ 598 } 599 600 /* 601 * Macros to change thread state and the associated lock. 602 */ 603 #define THREAD_SET_STATE(tp, state, lp) \ 604 ((tp)->t_state = state, (tp)->t_lockp = lp) 605 606 /* 607 * Point it at the transition lock, which is always held. 608 * The previosly held lock is dropped. 609 */ 610 #define THREAD_TRANSITION(tp) thread_transition(tp); 611 /* 612 * Set the thread's lock to be the transition lock, without dropping 613 * previosly held lock. 614 */ 615 #define THREAD_TRANSITION_NOLOCK(tp) ((tp)->t_lockp = &transition_lock) 616 617 /* 618 * Put thread in run state, and set the lock pointer to the dispatcher queue 619 * lock pointer provided. This lock should be held. 620 */ 621 #define THREAD_RUN(tp, lp) THREAD_SET_STATE(tp, TS_RUN, lp) 622 623 /* 624 * Put thread in wait state, and set the lock pointer to the wait queue 625 * lock pointer provided. This lock should be held. 626 */ 627 #define THREAD_WAIT(tp, lp) THREAD_SET_STATE(tp, TS_WAIT, lp) 628 629 /* 630 * Put thread in run state, and set the lock pointer to the dispatcher queue 631 * lock pointer provided (i.e., the "swapped_lock"). This lock should be held. 632 */ 633 #define THREAD_SWAP(tp, lp) THREAD_SET_STATE(tp, TS_RUN, lp) 634 635 /* 636 * Put the thread in zombie state and set the lock pointer to NULL. 637 * The NULL will catch anything that tries to lock a zombie. 638 */ 639 #define THREAD_ZOMB(tp) THREAD_SET_STATE(tp, TS_ZOMB, NULL) 640 641 /* 642 * Set the thread into ONPROC state, and point the lock at the CPUs 643 * lock for the onproc thread(s). This lock should be held, so the 644 * thread deoes not become unlocked, since these stores can be reordered. 645 */ 646 #define THREAD_ONPROC(tp, cpu) \ 647 THREAD_SET_STATE(tp, TS_ONPROC, &(cpu)->cpu_thread_lock) 648 649 /* 650 * Set the thread into the TS_SLEEP state, and set the lock pointer to 651 * to some sleep queue's lock. The new lock should already be held. 652 */ 653 #define THREAD_SLEEP(tp, lp) { \ 654 disp_lock_t *tlp; \ 655 tlp = (tp)->t_lockp; \ 656 THREAD_SET_STATE(tp, TS_SLEEP, lp); \ 657 disp_lock_exit_high(tlp); \ 658 } 659 660 /* 661 * Interrupt threads are created in TS_FREE state, and their lock 662 * points at the associated CPU's lock. 663 */ 664 #define THREAD_FREEINTR(tp, cpu) \ 665 THREAD_SET_STATE(tp, TS_FREE, &(cpu)->cpu_thread_lock) 666 667 #ifdef __cplusplus 668 } 669 #endif 670 671 #endif /* _SYS_THREAD_H */ 672