1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #ifndef _SYS_THREAD_H 28 #define _SYS_THREAD_H 29 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 #include <sys/types.h> 33 #include <sys/t_lock.h> 34 #include <sys/klwp.h> 35 #include <sys/time.h> 36 #include <sys/signal.h> 37 #include <sys/kcpc.h> 38 #if defined(__GNUC__) && defined(_ASM_INLINES) && defined(_KERNEL) 39 #include <asm/thread.h> 40 #endif 41 42 #ifdef __cplusplus 43 extern "C" { 44 #endif 45 46 /* 47 * The thread object, its states, and the methods by which it 48 * is accessed. 49 */ 50 51 /* 52 * Values that t_state may assume. Note that t_state cannot have more 53 * than one of these flags set at a time. 54 */ 55 #define TS_FREE 0x00 /* Thread at loose ends */ 56 #define TS_SLEEP 0x01 /* Awaiting an event */ 57 #define TS_RUN 0x02 /* Runnable, but not yet on a processor */ 58 #define TS_ONPROC 0x04 /* Thread is being run on a processor */ 59 #define TS_ZOMB 0x08 /* Thread has died but hasn't been reaped */ 60 #define TS_STOPPED 0x10 /* Stopped, initial state */ 61 #define TS_WAIT 0x20 /* Waiting to become runnable */ 62 63 typedef struct ctxop { 64 void (*save_op)(void *); /* function to invoke to save context */ 65 void (*restore_op)(void *); /* function to invoke to restore ctx */ 66 void (*fork_op)(void *, void *); /* invoke to fork context */ 67 void (*lwp_create_op)(void *, void *); /* lwp_create context */ 68 void (*exit_op)(void *); /* invoked during {thread,lwp}_exit() */ 69 void (*free_op)(void *, int); /* function which frees the context */ 70 void *arg; /* argument to above functions, ctx pointer */ 71 struct ctxop *next; /* next context ops */ 72 } ctxop_t; 73 74 /* 75 * The active file descriptor table. 76 * Each member of a_fd[] not equalling -1 represents an active fd. 77 * The structure is initialized on first use; all zeros means uninitialized. 78 */ 79 typedef struct _afd { 80 int *a_fd; /* pointer to list of fds */ 81 short a_nfd; /* number of entries in *a_fd */ 82 short a_stale; /* one of the active fds is being closed */ 83 int a_buf[1]; /* buffer to which a_fd initially refers */ 84 } afd_t; 85 86 /* 87 * An lwpchan provides uniqueness when sleeping on user-level 88 * synchronization primitives. The lc_wchan member is used 89 * for sleeping on kernel synchronization primitives. 90 */ 91 typedef struct { 92 caddr_t lc_wchan0; 93 caddr_t lc_wchan; 94 } lwpchan_t; 95 96 typedef struct _kthread *kthread_id_t; 97 98 struct turnstile; 99 struct panic_trap_info; 100 struct upimutex; 101 struct kproject; 102 struct on_trap_data; 103 struct waitq; 104 105 /* Definition for kernel thread identifier type */ 106 typedef uint64_t kt_did_t; 107 108 typedef struct _kthread { 109 struct _kthread *t_link; /* dispq, sleepq, and free queue link */ 110 111 caddr_t t_stk; /* base of stack (kernel sp value to use) */ 112 void (*t_startpc)(void); /* PC where thread started */ 113 struct cpu *t_bound_cpu; /* cpu bound to, or NULL if not bound */ 114 short t_affinitycnt; /* nesting level of kernel affinity-setting */ 115 short t_bind_cpu; /* user-specified CPU binding (-1 if none) */ 116 ushort_t t_flag; /* modified only by current thread */ 117 ushort_t t_proc_flag; /* modified holding ttproc(t)->p_lock */ 118 ushort_t t_schedflag; /* modified holding thread_lock(t) */ 119 volatile char t_preempt; /* don't preempt thread if set */ 120 volatile char t_preempt_lk; 121 uint_t t_state; /* thread state (protected by thread_lock) */ 122 pri_t t_pri; /* assigned thread priority */ 123 pri_t t_epri; /* inherited thread priority */ 124 pri_t t_cpri; /* thread scheduling class priority */ 125 char t_writer; /* sleeping in lwp_rwlock_lock(RW_WRITE_LOCK) */ 126 label_t t_pcb; /* pcb, save area when switching */ 127 lwpchan_t t_lwpchan; /* reason for blocking */ 128 #define t_wchan0 t_lwpchan.lc_wchan0 129 #define t_wchan t_lwpchan.lc_wchan 130 struct _sobj_ops *t_sobj_ops; 131 id_t t_cid; /* scheduling class id */ 132 struct thread_ops *t_clfuncs; /* scheduling class ops vector */ 133 void *t_cldata; /* per scheduling class specific data */ 134 ctxop_t *t_ctx; /* thread context */ 135 uintptr_t t_lofault; /* ret pc for failed page faults */ 136 label_t *t_onfault; /* on_fault() setjmp buf */ 137 struct on_trap_data *t_ontrap; /* on_trap() protection data */ 138 caddr_t t_swap; /* swappable thread storage */ 139 lock_t t_lock; /* used to resume() a thread */ 140 uint8_t t_lockstat; /* set while thread is in lockstat code */ 141 uint8_t t_pil; /* interrupt thread PIL */ 142 disp_lock_t t_pi_lock; /* lock protecting t_prioinv list */ 143 char t_nomigrate; /* do not migrate if set */ 144 struct cpu *t_cpu; /* CPU that thread last ran on */ 145 struct cpu *t_weakbound_cpu; /* cpu weakly bound to */ 146 struct lgrp_ld *t_lpl; /* load average for home lgroup */ 147 void *t_lgrp_reserv[2]; /* reserved for future */ 148 struct _kthread *t_intr; /* interrupted (pinned) thread */ 149 uint64_t t_intr_start; /* timestamp when time slice began */ 150 kt_did_t t_did; /* thread id for kernel debuggers */ 151 caddr_t t_tnf_tpdp; /* Trace facility data pointer */ 152 kcpc_ctx_t *t_cpc_ctx; /* performance counter context */ 153 kcpc_set_t *t_cpc_set; /* set this thread has bound */ 154 155 /* 156 * non swappable part of the lwp state. 157 */ 158 id_t t_tid; /* lwp's id */ 159 id_t t_waitfor; /* target lwp id in lwp_wait() */ 160 struct sigqueue *t_sigqueue; /* queue of siginfo structs */ 161 k_sigset_t t_sig; /* signals pending to this process */ 162 k_sigset_t t_extsig; /* signals sent from another contract */ 163 k_sigset_t t_hold; /* hold signal bit mask */ 164 struct _kthread *t_forw; /* process's forward thread link */ 165 struct _kthread *t_back; /* process's backward thread link */ 166 struct _kthread *t_thlink; /* tid (lwpid) lookup hash link */ 167 klwp_t *t_lwp; /* thread's lwp pointer */ 168 struct proc *t_procp; /* proc pointer */ 169 struct t_audit_data *t_audit_data; /* per thread audit data */ 170 struct _kthread *t_next; /* doubly linked list of all threads */ 171 struct _kthread *t_prev; 172 ushort_t t_whystop; /* reason for stopping */ 173 ushort_t t_whatstop; /* more detailed reason */ 174 int t_dslot; /* index in proc's thread directory */ 175 struct pollstate *t_pollstate; /* state used during poll(2) */ 176 struct pollcache *t_pollcache; /* to pass a pcache ptr by /dev/poll */ 177 struct cred *t_cred; /* pointer to current cred */ 178 time_t t_start; /* start time, seconds since epoch */ 179 clock_t t_lbolt; /* lbolt at last clock_tick() */ 180 hrtime_t t_stoptime; /* timestamp at stop() */ 181 uint_t t_pctcpu; /* %cpu at last clock_tick(), binary */ 182 /* point at right of high-order bit */ 183 short t_sysnum; /* system call number */ 184 kcondvar_t t_delay_cv; 185 kmutex_t t_delay_lock; 186 187 /* 188 * Pointer to the dispatcher lock protecting t_state and state-related 189 * flags. This pointer can change during waits on the lock, so 190 * it should be grabbed only by thread_lock(). 191 */ 192 disp_lock_t *t_lockp; /* pointer to the dispatcher lock */ 193 ushort_t t_oldspl; /* spl level before dispatcher locked */ 194 volatile char t_pre_sys; /* pre-syscall work needed */ 195 lock_t t_lock_flush; /* for lock_mutex_flush() impl */ 196 struct _disp *t_disp_queue; /* run queue for chosen CPU */ 197 clock_t t_disp_time; /* last time this thread was running */ 198 uint_t t_kpri_req; /* kernel priority required */ 199 200 /* 201 * Post-syscall / post-trap flags. 202 * No lock is required to set these. 203 * These must be cleared only by the thread itself. 204 * 205 * t_astflag indicates that some post-trap processing is required, 206 * possibly a signal or a preemption. The thread will not 207 * return to user with this set. 208 * t_post_sys indicates that some unusualy post-system call 209 * handling is required, such as an error or tracing. 210 * t_sig_check indicates that some condition in ISSIG() must be 211 * checked, but doesn't prevent returning to user. 212 * t_post_sys_ast is a way of checking whether any of these three 213 * flags are set. 214 */ 215 union __tu { 216 struct __ts { 217 volatile char _t_astflag; /* AST requested */ 218 volatile char _t_sig_check; /* ISSIG required */ 219 volatile char _t_post_sys; /* post_syscall req */ 220 volatile char _t_trapret; /* call CL_TRAPRET */ 221 } _ts; 222 volatile int _t_post_sys_ast; /* OR of these flags */ 223 } _tu; 224 #define t_astflag _tu._ts._t_astflag 225 #define t_sig_check _tu._ts._t_sig_check 226 #define t_post_sys _tu._ts._t_post_sys 227 #define t_trapret _tu._ts._t_trapret 228 #define t_post_sys_ast _tu._t_post_sys_ast 229 230 /* 231 * Real time microstate profiling. 232 */ 233 /* possible 4-byte filler */ 234 hrtime_t t_waitrq; /* timestamp for run queue wait time */ 235 int t_mstate; /* current microstate */ 236 struct rprof { 237 int rp_anystate; /* set if any state non-zero */ 238 uint_t rp_state[NMSTATES]; /* mstate profiling counts */ 239 } *t_rprof; 240 241 /* 242 * There is a turnstile inserted into the list below for 243 * every priority inverted synchronization object that 244 * this thread holds. 245 */ 246 247 struct turnstile *t_prioinv; 248 249 /* 250 * Pointer to the turnstile attached to the synchronization 251 * object where this thread is blocked. 252 */ 253 254 struct turnstile *t_ts; 255 256 /* 257 * kernel thread specific data 258 * Borrowed from userland implementation of POSIX tsd 259 */ 260 struct tsd_thread { 261 struct tsd_thread *ts_next; /* threads with TSD */ 262 struct tsd_thread *ts_prev; /* threads with TSD */ 263 uint_t ts_nkeys; /* entries in value array */ 264 void **ts_value; /* array of value/key */ 265 } *t_tsd; 266 267 clock_t t_stime; /* time stamp used by the swapper */ 268 struct door_data *t_door; /* door invocation data */ 269 kmutex_t *t_plockp; /* pointer to process's p_lock */ 270 271 struct sc_shared *t_schedctl; /* scheduler activations shared data */ 272 uintptr_t t_sc_uaddr; /* user-level address of shared data */ 273 274 struct cpupart *t_cpupart; /* partition containing thread */ 275 int t_bind_pset; /* processor set binding */ 276 277 struct copyops *t_copyops; /* copy in/out ops vector */ 278 279 caddr_t t_stkbase; /* base of the the stack */ 280 struct page *t_red_pp; /* if non-NULL, redzone is mapped */ 281 282 struct _afd t_activefd; /* active file descriptor table */ 283 284 struct _kthread *t_priforw; /* sleepq per-priority sublist */ 285 struct _kthread *t_priback; 286 287 struct sleepq *t_sleepq; /* sleep queue thread is waiting on */ 288 struct panic_trap_info *t_panic_trap; /* saved data from fatal trap */ 289 int *t_lgrp_affinity; /* lgroup affinity */ 290 struct upimutex *t_upimutex; /* list of upimutexes owned by thread */ 291 uint32_t t_nupinest; /* number of nested held upi mutexes */ 292 struct kproject *t_proj; /* project containing this thread */ 293 uint8_t t_unpark; /* modified holding t_delay_lock */ 294 uint8_t t_release; /* lwp_release() waked up the thread */ 295 uint8_t t_hatdepth; /* depth of recursive hat_memloads */ 296 uint8_t t_xpvcntr; /* see xen_block_migrate() */ 297 kcondvar_t t_joincv; /* cv used to wait for thread exit */ 298 void *t_taskq; /* for threads belonging to taskq */ 299 hrtime_t t_anttime; /* most recent time anticipatory load */ 300 /* was added to an lgroup's load */ 301 /* on this thread's behalf */ 302 char *t_pdmsg; /* privilege debugging message */ 303 304 uint_t t_predcache; /* DTrace predicate cache */ 305 hrtime_t t_dtrace_vtime; /* DTrace virtual time */ 306 hrtime_t t_dtrace_start; /* DTrace slice start time */ 307 308 uint8_t t_dtrace_stop; /* indicates a DTrace-desired stop */ 309 uint8_t t_dtrace_sig; /* signal sent via DTrace's raise() */ 310 311 union __tdu { 312 struct __tds { 313 uint8_t _t_dtrace_on; /* hit a fasttrap tracepoint */ 314 uint8_t _t_dtrace_step; /* about to return to kernel */ 315 uint8_t _t_dtrace_ret; /* handling a return probe */ 316 uint8_t _t_dtrace_ast; /* saved ast flag */ 317 #ifdef __amd64 318 uint8_t _t_dtrace_reg; /* modified register */ 319 #endif 320 } _tds; 321 ulong_t _t_dtrace_ft; /* bitwise or of these flags */ 322 } _tdu; 323 #define t_dtrace_ft _tdu._t_dtrace_ft 324 #define t_dtrace_on _tdu._tds._t_dtrace_on 325 #define t_dtrace_step _tdu._tds._t_dtrace_step 326 #define t_dtrace_ret _tdu._tds._t_dtrace_ret 327 #define t_dtrace_ast _tdu._tds._t_dtrace_ast 328 #ifdef __amd64 329 #define t_dtrace_reg _tdu._tds._t_dtrace_reg 330 #endif 331 332 uintptr_t t_dtrace_pc; /* DTrace saved pc from fasttrap */ 333 uintptr_t t_dtrace_npc; /* DTrace next pc from fasttrap */ 334 uintptr_t t_dtrace_scrpc; /* DTrace per-thread scratch location */ 335 uintptr_t t_dtrace_astpc; /* DTrace return sequence location */ 336 #ifdef __amd64 337 uint64_t t_dtrace_regv; /* DTrace saved reg from fasttrap */ 338 #endif 339 hrtime_t t_hrtime; /* high-res last time on cpu */ 340 kmutex_t t_ctx_lock; /* protects t_ctx in removectx() */ 341 struct waitq *t_waitq; /* wait queue */ 342 } kthread_t; 343 344 /* 345 * Thread flag (t_flag) definitions. 346 * These flags must be changed only for the current thread, 347 * and not during preemption code, since the code being 348 * preempted could be modifying the flags. 349 * 350 * For the most part these flags do not need locking. 351 * The following flags will only be changed while the thread_lock is held, 352 * to give assurrance that they are consistent with t_state: 353 * T_WAKEABLE 354 */ 355 #define T_INTR_THREAD 0x0001 /* thread is an interrupt thread */ 356 #define T_WAKEABLE 0x0002 /* thread is blocked, signals enabled */ 357 #define T_TOMASK 0x0004 /* use lwp_sigoldmask on return from signal */ 358 #define T_TALLOCSTK 0x0008 /* thread structure allocated from stk */ 359 #define T_FORKALL 0x0010 /* thread was cloned by forkall() */ 360 #define T_WOULDBLOCK 0x0020 /* for lockfs */ 361 #define T_DONTBLOCK 0x0040 /* for lockfs */ 362 #define T_DONTPEND 0x0080 /* for lockfs */ 363 #define T_SYS_PROF 0x0100 /* profiling on for duration of system call */ 364 #define T_WAITCVSEM 0x0200 /* waiting for a lwp_cv or lwp_sema on sleepq */ 365 #define T_WATCHPT 0x0400 /* thread undergoing a watchpoint emulation */ 366 #define T_PANIC 0x0800 /* thread initiated a system panic */ 367 #define T_DFLTSTK 0x1000 /* stack is default size */ 368 #define T_CAPTURING 0x2000 /* thread is in page capture logic */ 369 #define T_VFPARENT 0x4000 /* thread is vfork parent, must call vfwait */ 370 #define T_DONTDTRACE 0x8000 /* disable DTrace probes */ 371 372 /* 373 * Flags in t_proc_flag. 374 * These flags must be modified only when holding the p_lock 375 * for the associated process. 376 */ 377 #define TP_DAEMON 0x0001 /* this is an LWP_DAEMON lwp */ 378 #define TP_HOLDLWP 0x0002 /* hold thread's lwp */ 379 #define TP_TWAIT 0x0004 /* wait to be freed by lwp_wait() */ 380 #define TP_LWPEXIT 0x0008 /* lwp has exited */ 381 #define TP_PRSTOP 0x0010 /* thread is being stopped via /proc */ 382 #define TP_CHKPT 0x0020 /* thread is being stopped via CPR checkpoint */ 383 #define TP_EXITLWP 0x0040 /* terminate this lwp */ 384 #define TP_PRVSTOP 0x0080 /* thread is virtually stopped via /proc */ 385 #define TP_MSACCT 0x0100 /* collect micro-state accounting information */ 386 #define TP_STOPPING 0x0200 /* thread is executing stop() */ 387 #define TP_WATCHPT 0x0400 /* process has watchpoints in effect */ 388 #define TP_PAUSE 0x0800 /* process is being stopped via pauselwps() */ 389 #define TP_CHANGEBIND 0x1000 /* thread has a new cpu/cpupart binding */ 390 #define TP_ZTHREAD 0x2000 /* this is a kernel thread for a zone */ 391 #define TP_WATCHSTOP 0x4000 /* thread is stopping via holdwatch() */ 392 393 /* 394 * Thread scheduler flag (t_schedflag) definitions. 395 * The thread must be locked via thread_lock() or equiv. to change these. 396 */ 397 #define TS_LOAD 0x0001 /* thread is in memory */ 398 #define TS_DONT_SWAP 0x0002 /* thread/lwp should not be swapped */ 399 #define TS_SWAPENQ 0x0004 /* swap thread when it reaches a safe point */ 400 #define TS_ON_SWAPQ 0x0008 /* thread is on the swap queue */ 401 #define TS_SIGNALLED 0x0010 /* thread was awakened by cv_signal() */ 402 #define TS_PROJWAITQ 0x0020 /* thread is on its project's waitq */ 403 #define TS_ZONEWAITQ 0x0040 /* thread is on its zone's waitq */ 404 #define TS_CSTART 0x0100 /* setrun() by continuelwps() */ 405 #define TS_UNPAUSE 0x0200 /* setrun() by unpauselwps() */ 406 #define TS_XSTART 0x0400 /* setrun() by SIGCONT */ 407 #define TS_PSTART 0x0800 /* setrun() by /proc */ 408 #define TS_RESUME 0x1000 /* setrun() by CPR resume process */ 409 #define TS_CREATE 0x2000 /* setrun() by syslwp_create() */ 410 #define TS_RUNQMATCH 0x4000 /* exact run queue balancing by setbackdq() */ 411 #define TS_ALLSTART \ 412 (TS_CSTART|TS_UNPAUSE|TS_XSTART|TS_PSTART|TS_RESUME|TS_CREATE) 413 #define TS_ANYWAITQ (TS_PROJWAITQ|TS_ZONEWAITQ) 414 415 /* 416 * No locking needed for AST field. 417 */ 418 #define aston(t) ((t)->t_astflag = 1) 419 #define astoff(t) ((t)->t_astflag = 0) 420 421 /* True if thread is stopped on an event of interest */ 422 #define ISTOPPED(t) ((t)->t_state == TS_STOPPED && \ 423 !((t)->t_schedflag & TS_PSTART)) 424 425 /* True if thread is asleep and wakeable */ 426 #define ISWAKEABLE(t) (((t)->t_state == TS_SLEEP && \ 427 ((t)->t_flag & T_WAKEABLE))) 428 429 /* True if thread is on the wait queue */ 430 #define ISWAITING(t) ((t)->t_state == TS_WAIT) 431 432 /* similar to ISTOPPED except the event of interest is CPR */ 433 #define CPR_ISTOPPED(t) ((t)->t_state == TS_STOPPED && \ 434 !((t)->t_schedflag & TS_RESUME)) 435 436 /* 437 * True if thread is virtually stopped (is or was asleep in 438 * one of the lwp_*() system calls and marked to stop by /proc.) 439 */ 440 #define VSTOPPED(t) ((t)->t_proc_flag & TP_PRVSTOP) 441 442 /* similar to VSTOPPED except the point of interest is CPR */ 443 #define CPR_VSTOPPED(t) \ 444 ((t)->t_state == TS_SLEEP && \ 445 (t)->t_wchan0 != NULL && \ 446 ((t)->t_flag & T_WAKEABLE) && \ 447 ((t)->t_proc_flag & TP_CHKPT)) 448 449 /* True if thread has been stopped by hold*() or was created stopped */ 450 #define SUSPENDED(t) ((t)->t_state == TS_STOPPED && \ 451 ((t)->t_schedflag & (TS_CSTART|TS_UNPAUSE)) != (TS_CSTART|TS_UNPAUSE)) 452 453 /* True if thread possesses an inherited priority */ 454 #define INHERITED(t) ((t)->t_epri != 0) 455 456 /* The dispatch priority of a thread */ 457 #define DISP_PRIO(t) ((t)->t_epri > (t)->t_pri ? (t)->t_epri : (t)->t_pri) 458 459 /* The assigned priority of a thread */ 460 #define ASSIGNED_PRIO(t) ((t)->t_pri) 461 462 /* 463 * Macros to determine whether a thread can be swapped. 464 * If t_lock is held, the thread is either on a processor or being swapped. 465 */ 466 #define SWAP_OK(t) (!LOCK_HELD(&(t)->t_lock)) 467 468 /* 469 * proctot(x) 470 * convert a proc pointer to a thread pointer. this only works with 471 * procs that have only one lwp. 472 * 473 * proctolwp(x) 474 * convert a proc pointer to a lwp pointer. this only works with 475 * procs that have only one lwp. 476 * 477 * ttolwp(x) 478 * convert a thread pointer to its lwp pointer. 479 * 480 * ttoproc(x) 481 * convert a thread pointer to its proc pointer. 482 * 483 * ttoproj(x) 484 * convert a thread pointer to its project pointer. 485 * 486 * ttozone(x) 487 * convert a thread pointer to its zone pointer. 488 * 489 * lwptot(x) 490 * convert a lwp pointer to its thread pointer. 491 * 492 * lwptoproc(x) 493 * convert a lwp to its proc pointer. 494 */ 495 #define proctot(x) ((x)->p_tlist) 496 #define proctolwp(x) ((x)->p_tlist->t_lwp) 497 #define ttolwp(x) ((x)->t_lwp) 498 #define ttoproc(x) ((x)->t_procp) 499 #define ttoproj(x) ((x)->t_proj) 500 #define ttozone(x) ((x)->t_procp->p_zone) 501 #define lwptot(x) ((x)->lwp_thread) 502 #define lwptoproc(x) ((x)->lwp_procp) 503 504 #define t_pc t_pcb.val[0] 505 #define t_sp t_pcb.val[1] 506 507 #ifdef _KERNEL 508 509 extern kthread_t *threadp(void); /* inline, returns thread pointer */ 510 #define curthread (threadp()) /* current thread pointer */ 511 #define curproc (ttoproc(curthread)) /* current process pointer */ 512 #define curproj (ttoproj(curthread)) /* current project pointer */ 513 #define curzone (curproc->p_zone) /* current zone pointer */ 514 515 extern struct _kthread t0; /* the scheduler thread */ 516 extern kmutex_t pidlock; /* global process lock */ 517 518 /* 519 * thread_free_lock is used by the tick accounting thread to keep a thread 520 * from being freed while it is being examined. 521 */ 522 #define THREAD_FREE_NUM 1024 523 #define THREAD_FREE_MASK (THREAD_FREE_NUM - 1) 524 #define THREAD_FREE_SHIFT_BITS 5 525 #define THREAD_FREE_SHIFT(t) ((uintptr_t)t >> THREAD_FREE_SHIFT_BITS) 526 #define THREAD_FREE_HASH(t) (THREAD_FREE_SHIFT(t) & THREAD_FREE_MASK) 527 528 typedef struct thread_free_lock { 529 kmutex_t tf_lock; 530 uchar_t tf_pad[64 - sizeof (kmutex_t)]; 531 } thread_free_lock_t; 532 533 extern void thread_free_prevent(kthread_t *); 534 extern void thread_free_allow(kthread_t *); 535 536 /* 537 * Routines to change the priority and effective priority 538 * of a thread-locked thread, whatever its state. 539 */ 540 extern int thread_change_pri(kthread_t *t, pri_t disp_pri, int front); 541 extern void thread_change_epri(kthread_t *t, pri_t disp_pri); 542 543 /* 544 * Routines that manipulate the dispatcher lock for the thread. 545 * The locking heirarchy is as follows: 546 * cpu_lock > sleepq locks > run queue locks 547 */ 548 void thread_transition(kthread_t *); /* move to transition lock */ 549 void thread_stop(kthread_t *); /* move to stop lock */ 550 void thread_lock(kthread_t *); /* lock thread and its queue */ 551 void thread_lock_high(kthread_t *); /* lock thread and its queue */ 552 void thread_onproc(kthread_t *, struct cpu *); /* set onproc state lock */ 553 554 #define thread_unlock(t) disp_lock_exit((t)->t_lockp) 555 #define thread_unlock_high(t) disp_lock_exit_high((t)->t_lockp) 556 #define thread_unlock_nopreempt(t) disp_lock_exit_nopreempt((t)->t_lockp) 557 558 #define THREAD_LOCK_HELD(t) (DISP_LOCK_HELD((t)->t_lockp)) 559 560 extern disp_lock_t transition_lock; /* lock protecting transiting threads */ 561 extern disp_lock_t stop_lock; /* lock protecting stopped threads */ 562 563 caddr_t thread_stk_init(caddr_t); /* init thread stack */ 564 565 #endif /* _KERNEL */ 566 567 /* 568 * Macros to indicate that the thread holds resources that could be critical 569 * to other kernel threads, so this thread needs to have kernel priority 570 * if it blocks or is preempted. Note that this is not necessary if the 571 * resource is a mutex or a writer lock because of priority inheritance. 572 * 573 * The only way one thread may legally manipulate another thread's t_kpri_req 574 * is to hold the target thread's thread lock while that thread is asleep. 575 * (The rwlock code does this to implement direct handoff to waiting readers.) 576 */ 577 #define THREAD_KPRI_REQUEST() (curthread->t_kpri_req++) 578 #define THREAD_KPRI_RELEASE() (curthread->t_kpri_req--) 579 #define THREAD_KPRI_RELEASE_N(n) (curthread->t_kpri_req -= (n)) 580 581 /* 582 * Macro to change a thread's priority. 583 */ 584 #define THREAD_CHANGE_PRI(t, pri) { \ 585 pri_t __new_pri = (pri); \ 586 DTRACE_SCHED2(change__pri, kthread_t *, (t), pri_t, __new_pri); \ 587 (t)->t_pri = __new_pri; \ 588 schedctl_set_cidpri(t); \ 589 } 590 591 /* 592 * Macro to indicate that a thread's priority is about to be changed. 593 */ 594 #define THREAD_WILLCHANGE_PRI(t, pri) { \ 595 DTRACE_SCHED2(change__pri, kthread_t *, (t), pri_t, (pri)); \ 596 } 597 598 /* 599 * Macros to change thread state and the associated lock. 600 */ 601 #define THREAD_SET_STATE(tp, state, lp) \ 602 ((tp)->t_state = state, (tp)->t_lockp = lp) 603 604 /* 605 * Point it at the transition lock, which is always held. 606 * The previosly held lock is dropped. 607 */ 608 #define THREAD_TRANSITION(tp) thread_transition(tp); 609 /* 610 * Set the thread's lock to be the transition lock, without dropping 611 * previosly held lock. 612 */ 613 #define THREAD_TRANSITION_NOLOCK(tp) ((tp)->t_lockp = &transition_lock) 614 615 /* 616 * Put thread in run state, and set the lock pointer to the dispatcher queue 617 * lock pointer provided. This lock should be held. 618 */ 619 #define THREAD_RUN(tp, lp) THREAD_SET_STATE(tp, TS_RUN, lp) 620 621 /* 622 * Put thread in wait state, and set the lock pointer to the wait queue 623 * lock pointer provided. This lock should be held. 624 */ 625 #define THREAD_WAIT(tp, lp) THREAD_SET_STATE(tp, TS_WAIT, lp) 626 627 /* 628 * Put thread in run state, and set the lock pointer to the dispatcher queue 629 * lock pointer provided (i.e., the "swapped_lock"). This lock should be held. 630 */ 631 #define THREAD_SWAP(tp, lp) THREAD_SET_STATE(tp, TS_RUN, lp) 632 633 /* 634 * Put the thread in zombie state and set the lock pointer to NULL. 635 * The NULL will catch anything that tries to lock a zombie. 636 */ 637 #define THREAD_ZOMB(tp) THREAD_SET_STATE(tp, TS_ZOMB, NULL) 638 639 /* 640 * Set the thread into ONPROC state, and point the lock at the CPUs 641 * lock for the onproc thread(s). This lock should be held, so the 642 * thread deoes not become unlocked, since these stores can be reordered. 643 */ 644 #define THREAD_ONPROC(tp, cpu) \ 645 THREAD_SET_STATE(tp, TS_ONPROC, &(cpu)->cpu_thread_lock) 646 647 /* 648 * Set the thread into the TS_SLEEP state, and set the lock pointer to 649 * to some sleep queue's lock. The new lock should already be held. 650 */ 651 #define THREAD_SLEEP(tp, lp) { \ 652 disp_lock_t *tlp; \ 653 tlp = (tp)->t_lockp; \ 654 THREAD_SET_STATE(tp, TS_SLEEP, lp); \ 655 disp_lock_exit_high(tlp); \ 656 } 657 658 /* 659 * Interrupt threads are created in TS_FREE state, and their lock 660 * points at the associated CPU's lock. 661 */ 662 #define THREAD_FREEINTR(tp, cpu) \ 663 THREAD_SET_STATE(tp, TS_FREE, &(cpu)->cpu_thread_lock) 664 665 #ifdef __cplusplus 666 } 667 #endif 668 669 #endif /* _SYS_THREAD_H */ 670