1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #ifndef _SYS_THREAD_H 27 #define _SYS_THREAD_H 28 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 #include <sys/types.h> 32 #include <sys/t_lock.h> 33 #include <sys/klwp.h> 34 #include <sys/time.h> 35 #include <sys/signal.h> 36 #include <sys/kcpc.h> 37 #if defined(__GNUC__) && defined(_ASM_INLINES) && defined(_KERNEL) 38 #include <asm/thread.h> 39 #endif 40 41 #ifdef __cplusplus 42 extern "C" { 43 #endif 44 45 /* 46 * The thread object, its states, and the methods by which it 47 * is accessed. 48 */ 49 50 /* 51 * Values that t_state may assume. Note that t_state cannot have more 52 * than one of these flags set at a time. 53 */ 54 #define TS_FREE 0x00 /* Thread at loose ends */ 55 #define TS_SLEEP 0x01 /* Awaiting an event */ 56 #define TS_RUN 0x02 /* Runnable, but not yet on a processor */ 57 #define TS_ONPROC 0x04 /* Thread is being run on a processor */ 58 #define TS_ZOMB 0x08 /* Thread has died but hasn't been reaped */ 59 #define TS_STOPPED 0x10 /* Stopped, initial state */ 60 61 typedef struct ctxop { 62 void (*save_op)(void *); /* function to invoke to save context */ 63 void (*restore_op)(void *); /* function to invoke to restore ctx */ 64 void (*fork_op)(void *, void *); /* invoke to fork context */ 65 void (*lwp_create_op)(void *, void *); /* lwp_create context */ 66 void (*exit_op)(void *); /* invoked during {thread,lwp}_exit() */ 67 void (*free_op)(void *, int); /* function which frees the context */ 68 void *arg; /* argument to above functions, ctx pointer */ 69 struct ctxop *next; /* next context ops */ 70 } ctxop_t; 71 72 /* 73 * The active file descriptor table. 74 * Each member of a_fd[] not equalling -1 represents an active fd. 75 * The structure is initialized on first use; all zeros means uninitialized. 76 */ 77 typedef struct _afd { 78 int *a_fd; /* pointer to list of fds */ 79 short a_nfd; /* number of entries in *a_fd */ 80 short a_stale; /* one of the active fds is being closed */ 81 int a_buf[1]; /* buffer to which a_fd initially refers */ 82 } afd_t; 83 84 /* 85 * An lwpchan provides uniqueness when sleeping on user-level 86 * synchronization primitives. The lc_wchan member is used 87 * for sleeping on kernel synchronization primitives. 88 */ 89 typedef struct { 90 caddr_t lc_wchan0; 91 caddr_t lc_wchan; 92 } lwpchan_t; 93 94 typedef struct _kthread *kthread_id_t; 95 96 struct turnstile; 97 struct trap_info; 98 struct upimutex; 99 struct kproject; 100 struct on_trap_data; 101 102 /* Definition for kernel thread identifier type */ 103 typedef uint64_t kt_did_t; 104 105 typedef struct _kthread { 106 struct _kthread *t_link; /* dispq, sleepq, and free queue link */ 107 108 caddr_t t_stk; /* base of stack (kernel sp value to use) */ 109 void (*t_startpc)(void); /* PC where thread started */ 110 struct cpu *t_bound_cpu; /* cpu bound to, or NULL if not bound */ 111 short t_affinitycnt; /* nesting level of kernel affinity-setting */ 112 short t_bind_cpu; /* user-specified CPU binding (-1 if none) */ 113 ushort_t t_flag; /* modified only by current thread */ 114 ushort_t t_proc_flag; /* modified holding ttproc(t)->p_lock */ 115 ushort_t t_schedflag; /* modified holding thread_lock(t) */ 116 volatile char t_preempt; /* don't preempt thread if set */ 117 volatile char t_preempt_lk; 118 uint_t t_state; /* thread state (protected by thread_lock) */ 119 pri_t t_pri; /* assigned thread priority */ 120 pri_t t_epri; /* inherited thread priority */ 121 char t_writer; /* sleeping in lwp_rwlock_lock(RW_WRITE_LOCK) */ 122 label_t t_pcb; /* pcb, save area when switching */ 123 lwpchan_t t_lwpchan; /* reason for blocking */ 124 #define t_wchan0 t_lwpchan.lc_wchan0 125 #define t_wchan t_lwpchan.lc_wchan 126 struct _sobj_ops *t_sobj_ops; 127 id_t t_cid; /* scheduling class id */ 128 struct thread_ops *t_clfuncs; /* scheduling class ops vector */ 129 void *t_cldata; /* per scheduling class specific data */ 130 ctxop_t *t_ctx; /* thread context */ 131 uintptr_t t_lofault; /* ret pc for failed page faults */ 132 label_t *t_onfault; /* on_fault() setjmp buf */ 133 struct on_trap_data *t_ontrap; /* on_trap() protection data */ 134 caddr_t t_swap; /* swappable thread storage */ 135 lock_t t_lock; /* used to resume() a thread */ 136 uint8_t t_lockstat; /* set while thread is in lockstat code */ 137 uint8_t t_pil; /* interrupt thread PIL */ 138 disp_lock_t t_pi_lock; /* lock protecting t_prioinv list */ 139 char t_nomigrate; /* do not migrate if set */ 140 struct cpu *t_cpu; /* CPU that thread last ran on */ 141 struct cpu *t_weakbound_cpu; /* cpu weakly bound to */ 142 struct lgrp_ld *t_lpl; /* load average for home lgroup */ 143 void *t_lgrp_reserv[2]; /* reserved for future */ 144 struct _kthread *t_intr; /* interrupted (pinned) thread */ 145 uint64_t t_intr_start; /* timestamp when time slice began */ 146 kt_did_t t_did; /* thread id for kernel debuggers */ 147 caddr_t t_tnf_tpdp; /* Trace facility data pointer */ 148 kcpc_ctx_t *t_cpc_ctx; /* performance counter context */ 149 kcpc_set_t *t_cpc_set; /* set this thread has bound */ 150 151 /* 152 * non swappable part of the lwp state. 153 */ 154 id_t t_tid; /* lwp's id */ 155 id_t t_waitfor; /* target lwp id in lwp_wait() */ 156 struct sigqueue *t_sigqueue; /* queue of siginfo structs */ 157 k_sigset_t t_sig; /* signals pending to this process */ 158 k_sigset_t t_extsig; /* signals sent from another contract */ 159 k_sigset_t t_hold; /* hold signal bit mask */ 160 struct _kthread *t_forw; /* process's forward thread link */ 161 struct _kthread *t_back; /* process's backward thread link */ 162 struct _kthread *t_thlink; /* tid (lwpid) lookup hash link */ 163 klwp_t *t_lwp; /* thread's lwp pointer */ 164 struct proc *t_procp; /* proc pointer */ 165 struct t_audit_data *t_audit_data; /* per thread audit data */ 166 struct _kthread *t_next; /* doubly linked list of all threads */ 167 struct _kthread *t_prev; 168 ushort_t t_whystop; /* reason for stopping */ 169 ushort_t t_whatstop; /* more detailed reason */ 170 int t_dslot; /* index in proc's thread directory */ 171 struct pollstate *t_pollstate; /* state used during poll(2) */ 172 struct pollcache *t_pollcache; /* to pass a pcache ptr by /dev/poll */ 173 struct cred *t_cred; /* pointer to current cred */ 174 time_t t_start; /* start time, seconds since epoch */ 175 clock_t t_lbolt; /* lbolt at last clock_tick() */ 176 hrtime_t t_stoptime; /* timestamp at stop() */ 177 uint_t t_pctcpu; /* %cpu at last clock_tick(), binary */ 178 /* point at right of high-order bit */ 179 short t_sysnum; /* system call number */ 180 kcondvar_t t_delay_cv; 181 kmutex_t t_delay_lock; 182 183 /* 184 * Pointer to the dispatcher lock protecting t_state and state-related 185 * flags. This pointer can change during waits on the lock, so 186 * it should be grabbed only by thread_lock(). 187 */ 188 disp_lock_t *t_lockp; /* pointer to the dispatcher lock */ 189 ushort_t t_oldspl; /* spl level before dispatcher locked */ 190 volatile char t_pre_sys; /* pre-syscall work needed */ 191 lock_t t_lock_flush; /* for lock_mutex_flush() impl */ 192 struct _disp *t_disp_queue; /* run queue for chosen CPU */ 193 clock_t t_disp_time; /* last time this thread was running */ 194 uint_t t_kpri_req; /* kernel priority required */ 195 196 /* 197 * Post-syscall / post-trap flags. 198 * No lock is required to set these. 199 * These must be cleared only by the thread itself. 200 * 201 * t_astflag indicates that some post-trap processing is required, 202 * possibly a signal or a preemption. The thread will not 203 * return to user with this set. 204 * t_post_sys indicates that some unusualy post-system call 205 * handling is required, such as an error or tracing. 206 * t_sig_check indicates that some condition in ISSIG() must be 207 * checked, but doesn't prevent returning to user. 208 * t_post_sys_ast is a way of checking whether any of these three 209 * flags are set. 210 */ 211 union __tu { 212 struct __ts { 213 volatile char _t_astflag; /* AST requested */ 214 volatile char _t_sig_check; /* ISSIG required */ 215 volatile char _t_post_sys; /* post_syscall req */ 216 volatile char _t_trapret; /* call CL_TRAPRET */ 217 } _ts; 218 volatile int _t_post_sys_ast; /* OR of these flags */ 219 } _tu; 220 #define t_astflag _tu._ts._t_astflag 221 #define t_sig_check _tu._ts._t_sig_check 222 #define t_post_sys _tu._ts._t_post_sys 223 #define t_trapret _tu._ts._t_trapret 224 #define t_post_sys_ast _tu._t_post_sys_ast 225 226 /* 227 * Real time microstate profiling. 228 */ 229 /* possible 4-byte filler */ 230 hrtime_t t_waitrq; /* timestamp for run queue wait time */ 231 int t_mstate; /* current microstate */ 232 struct rprof { 233 int rp_anystate; /* set if any state non-zero */ 234 uint_t rp_state[NMSTATES]; /* mstate profiling counts */ 235 } *t_rprof; 236 237 /* 238 * There is a turnstile inserted into the list below for 239 * every priority inverted synchronization object that 240 * this thread holds. 241 */ 242 243 struct turnstile *t_prioinv; 244 245 /* 246 * Pointer to the turnstile attached to the synchronization 247 * object where this thread is blocked. 248 */ 249 250 struct turnstile *t_ts; 251 252 /* 253 * kernel thread specific data 254 * Borrowed from userland implementation of POSIX tsd 255 */ 256 struct tsd_thread { 257 struct tsd_thread *ts_next; /* threads with TSD */ 258 struct tsd_thread *ts_prev; /* threads with TSD */ 259 uint_t ts_nkeys; /* entries in value array */ 260 void **ts_value; /* array of value/key */ 261 } *t_tsd; 262 263 clock_t t_stime; /* time stamp used by the swapper */ 264 struct door_data *t_door; /* door invocation data */ 265 kmutex_t *t_plockp; /* pointer to process's p_lock */ 266 267 struct sc_shared *t_schedctl; /* scheduler activations shared data */ 268 uintptr_t t_sc_uaddr; /* user-level address of shared data */ 269 270 struct cpupart *t_cpupart; /* partition containing thread */ 271 int t_bind_pset; /* processor set binding */ 272 273 struct copyops *t_copyops; /* copy in/out ops vector */ 274 275 caddr_t t_stkbase; /* base of the the stack */ 276 struct page *t_red_pp; /* if non-NULL, redzone is mapped */ 277 278 struct _afd t_activefd; /* active file descriptor table */ 279 280 struct _kthread *t_priforw; /* sleepq per-priority sublist */ 281 struct _kthread *t_priback; 282 283 struct sleepq *t_sleepq; /* sleep queue thread is waiting on */ 284 struct trap_info *t_panic_trap; /* saved data from fatal trap */ 285 int *t_lgrp_affinity; /* lgroup affinity */ 286 struct upimutex *t_upimutex; /* list of upimutexes owned by thread */ 287 uint32_t t_nupinest; /* number of nested held upi mutexes */ 288 struct kproject *t_proj; /* project containing this thread */ 289 uint8_t t_unpark; /* modified holding t_delay_lock */ 290 uint8_t t_release; /* lwp_release() waked up the thread */ 291 uint8_t t_hatdepth; /* depth of recursive hat_memloads */ 292 kcondvar_t t_joincv; /* cv used to wait for thread exit */ 293 void *t_taskq; /* for threads belonging to taskq */ 294 hrtime_t t_anttime; /* most recent time anticipatory load */ 295 /* was added to an lgroup's load */ 296 /* on this thread's behalf */ 297 char *t_pdmsg; /* privilege debugging message */ 298 299 uint_t t_predcache; /* DTrace predicate cache */ 300 hrtime_t t_dtrace_vtime; /* DTrace virtual time */ 301 hrtime_t t_dtrace_start; /* DTrace slice start time */ 302 303 uint8_t t_dtrace_stop; /* indicates a DTrace-desired stop */ 304 uint8_t t_dtrace_sig; /* signal sent via DTrace's raise() */ 305 306 union __tdu { 307 struct __tds { 308 uint8_t _t_dtrace_on; /* hit a fasttrap tracepoint */ 309 uint8_t _t_dtrace_step; /* about to return to kernel */ 310 uint8_t _t_dtrace_ret; /* handling a return probe */ 311 uint8_t _t_dtrace_ast; /* saved ast flag */ 312 #ifdef __amd64 313 uint8_t _t_dtrace_reg; /* modified register */ 314 #endif 315 } _tds; 316 ulong_t _t_dtrace_ft; /* bitwise or of these flags */ 317 } _tdu; 318 #define t_dtrace_ft _tdu._t_dtrace_ft 319 #define t_dtrace_on _tdu._tds._t_dtrace_on 320 #define t_dtrace_step _tdu._tds._t_dtrace_step 321 #define t_dtrace_ret _tdu._tds._t_dtrace_ret 322 #define t_dtrace_ast _tdu._tds._t_dtrace_ast 323 #ifdef __amd64 324 #define t_dtrace_reg _tdu._tds._t_dtrace_reg 325 #endif 326 327 uintptr_t t_dtrace_pc; /* DTrace saved pc from fasttrap */ 328 uintptr_t t_dtrace_npc; /* DTrace next pc from fasttrap */ 329 uintptr_t t_dtrace_scrpc; /* DTrace per-thread scratch location */ 330 uintptr_t t_dtrace_astpc; /* DTrace return sequence location */ 331 #ifdef __amd64 332 uint64_t t_dtrace_regv; /* DTrace saved reg from fasttrap */ 333 #endif 334 hrtime_t t_hrtime; /* high-res last time on cpu */ 335 kmutex_t t_ctx_lock; /* protects t_ctx in removectx() */ 336 } kthread_t; 337 338 /* 339 * Thread flag (t_flag) definitions. 340 * These flags must be changed only for the current thread, 341 * and not during preemption code, since the code being 342 * preempted could be modifying the flags. 343 * 344 * For the most part these flags do not need locking. 345 * The following flags will only be changed while the thread_lock is held, 346 * to give assurrance that they are consistent with t_state: 347 * T_WAKEABLE 348 */ 349 #define T_INTR_THREAD 0x0001 /* thread is an interrupt thread */ 350 #define T_WAKEABLE 0x0002 /* thread is blocked, signals enabled */ 351 #define T_TOMASK 0x0004 /* use lwp_sigoldmask on return from signal */ 352 #define T_TALLOCSTK 0x0008 /* thread structure allocated from stk */ 353 #define T_FORKALL 0x0010 /* thread was cloned by forkall() */ 354 #define T_WOULDBLOCK 0x0020 /* for lockfs */ 355 #define T_DONTBLOCK 0x0040 /* for lockfs */ 356 #define T_DONTPEND 0x0080 /* for lockfs */ 357 #define T_SYS_PROF 0x0100 /* profiling on for duration of system call */ 358 #define T_WAITCVSEM 0x0200 /* waiting for a lwp_cv or lwp_sema on sleepq */ 359 #define T_WATCHPT 0x0400 /* thread undergoing a watchpoint emulation */ 360 #define T_PANIC 0x0800 /* thread initiated a system panic */ 361 #define T_DFLTSTK 0x1000 /* stack is default size */ 362 #define T_CAPTURING 0x2000 /* thread is in page capture logic */ 363 364 /* 365 * Flags in t_proc_flag. 366 * These flags must be modified only when holding the p_lock 367 * for the associated process. 368 */ 369 #define TP_DAEMON 0x0001 /* this is an LWP_DAEMON lwp */ 370 #define TP_HOLDLWP 0x0002 /* hold thread's lwp */ 371 #define TP_TWAIT 0x0004 /* wait to be freed by lwp_wait() */ 372 #define TP_LWPEXIT 0x0008 /* lwp has exited */ 373 #define TP_PRSTOP 0x0010 /* thread is being stopped via /proc */ 374 #define TP_CHKPT 0x0020 /* thread is being stopped via CPR checkpoint */ 375 #define TP_EXITLWP 0x0040 /* terminate this lwp */ 376 #define TP_PRVSTOP 0x0080 /* thread is virtually stopped via /proc */ 377 #define TP_MSACCT 0x0100 /* collect micro-state accounting information */ 378 #define TP_STOPPING 0x0200 /* thread is executing stop() */ 379 #define TP_WATCHPT 0x0400 /* process has watchpoints in effect */ 380 #define TP_PAUSE 0x0800 /* process is being stopped via pauselwps() */ 381 #define TP_CHANGEBIND 0x1000 /* thread has a new cpu/cpupart binding */ 382 #define TP_ZTHREAD 0x2000 /* this is a kernel thread for a zone */ 383 #define TP_WATCHSTOP 0x4000 /* thread is stopping via holdwatch() */ 384 385 /* 386 * Thread scheduler flag (t_schedflag) definitions. 387 * The thread must be locked via thread_lock() or equiv. to change these. 388 */ 389 #define TS_LOAD 0x0001 /* thread is in memory */ 390 #define TS_DONT_SWAP 0x0002 /* thread/lwp should not be swapped */ 391 #define TS_SWAPENQ 0x0004 /* swap thread when it reaches a safe point */ 392 #define TS_ON_SWAPQ 0x0008 /* thread is on the swap queue */ 393 #define TS_SIGNALLED 0x0010 /* thread was awakened by cv_signal() */ 394 #define TS_CSTART 0x0100 /* setrun() by continuelwps() */ 395 #define TS_UNPAUSE 0x0200 /* setrun() by unpauselwps() */ 396 #define TS_XSTART 0x0400 /* setrun() by SIGCONT */ 397 #define TS_PSTART 0x0800 /* setrun() by /proc */ 398 #define TS_RESUME 0x1000 /* setrun() by CPR resume process */ 399 #define TS_CREATE 0x2000 /* setrun() by syslwp_create() */ 400 #define TS_RUNQMATCH 0x4000 /* exact run queue balancing by setbackdq() */ 401 #define TS_ALLSTART \ 402 (TS_CSTART|TS_UNPAUSE|TS_XSTART|TS_PSTART|TS_RESUME|TS_CREATE) 403 404 /* 405 * No locking needed for AST field. 406 */ 407 #define aston(t) ((t)->t_astflag = 1) 408 #define astoff(t) ((t)->t_astflag = 0) 409 410 /* True if thread is stopped on an event of interest */ 411 #define ISTOPPED(t) ((t)->t_state == TS_STOPPED && \ 412 !((t)->t_schedflag & TS_PSTART)) 413 414 /* similar to ISTOPPED except the event of interest is CPR */ 415 #define CPR_ISTOPPED(t) ((t)->t_state == TS_STOPPED && \ 416 !((t)->t_schedflag & TS_RESUME)) 417 418 /* 419 * True if thread is virtually stopped (is or was asleep in 420 * one of the lwp_*() system calls and marked to stop by /proc.) 421 */ 422 #define VSTOPPED(t) ((t)->t_proc_flag & TP_PRVSTOP) 423 424 /* similar to VSTOPPED except the point of interest is CPR */ 425 #define CPR_VSTOPPED(t) \ 426 ((t)->t_state == TS_SLEEP && \ 427 (t)->t_wchan0 != NULL && \ 428 ((t)->t_flag & T_WAKEABLE) && \ 429 ((t)->t_proc_flag & TP_CHKPT)) 430 431 /* True if thread has been stopped by hold*() or was created stopped */ 432 #define SUSPENDED(t) ((t)->t_state == TS_STOPPED && \ 433 ((t)->t_schedflag & (TS_CSTART|TS_UNPAUSE)) != (TS_CSTART|TS_UNPAUSE)) 434 435 /* True if thread possesses an inherited priority */ 436 #define INHERITED(t) ((t)->t_epri != 0) 437 438 /* The dispatch priority of a thread */ 439 #define DISP_PRIO(t) ((t)->t_epri > (t)->t_pri ? (t)->t_epri : (t)->t_pri) 440 441 /* The assigned priority of a thread */ 442 #define ASSIGNED_PRIO(t) ((t)->t_pri) 443 444 /* 445 * Macros to determine whether a thread can be swapped. 446 * If t_lock is held, the thread is either on a processor or being swapped. 447 */ 448 #define SWAP_OK(t) (!LOCK_HELD(&(t)->t_lock)) 449 450 /* 451 * proctot(x) 452 * convert a proc pointer to a thread pointer. this only works with 453 * procs that have only one lwp. 454 * 455 * proctolwp(x) 456 * convert a proc pointer to a lwp pointer. this only works with 457 * procs that have only one lwp. 458 * 459 * ttolwp(x) 460 * convert a thread pointer to its lwp pointer. 461 * 462 * ttoproc(x) 463 * convert a thread pointer to its proc pointer. 464 * 465 * ttoproj(x) 466 * convert a thread pointer to its project pointer. 467 * 468 * lwptot(x) 469 * convert a lwp pointer to its thread pointer. 470 * 471 * lwptoproc(x) 472 * convert a lwp to its proc pointer. 473 */ 474 #define proctot(x) ((x)->p_tlist) 475 #define proctolwp(x) ((x)->p_tlist->t_lwp) 476 #define ttolwp(x) ((x)->t_lwp) 477 #define ttoproc(x) ((x)->t_procp) 478 #define ttoproj(x) ((x)->t_proj) 479 #define lwptot(x) ((x)->lwp_thread) 480 #define lwptoproc(x) ((x)->lwp_procp) 481 482 #define t_pc t_pcb.val[0] 483 #define t_sp t_pcb.val[1] 484 485 #ifdef _KERNEL 486 487 extern kthread_t *threadp(void); /* inline, returns thread pointer */ 488 #define curthread (threadp()) /* current thread pointer */ 489 #define curproc (ttoproc(curthread)) /* current process pointer */ 490 #define curproj (ttoproj(curthread)) /* current project pointer */ 491 492 extern struct _kthread t0; /* the scheduler thread */ 493 extern kmutex_t pidlock; /* global process lock */ 494 495 /* 496 * thread_free_lock is used by the clock thread to keep a thread 497 * from being freed while it is being examined. 498 */ 499 extern kmutex_t thread_free_lock; 500 501 /* 502 * Routines to change the priority and effective priority 503 * of a thread-locked thread, whatever its state. 504 */ 505 extern int thread_change_pri(kthread_t *t, pri_t disp_pri, int front); 506 extern void thread_change_epri(kthread_t *t, pri_t disp_pri); 507 508 /* 509 * Routines that manipulate the dispatcher lock for the thread. 510 * The locking heirarchy is as follows: 511 * cpu_lock > sleepq locks > run queue locks 512 */ 513 void thread_transition(kthread_t *); /* move to transition lock */ 514 void thread_stop(kthread_t *); /* move to stop lock */ 515 void thread_lock(kthread_t *); /* lock thread and its queue */ 516 void thread_lock_high(kthread_t *); /* lock thread and its queue */ 517 void thread_onproc(kthread_t *, struct cpu *); /* set onproc state lock */ 518 519 #define thread_unlock(t) disp_lock_exit((t)->t_lockp) 520 #define thread_unlock_high(t) disp_lock_exit_high((t)->t_lockp) 521 #define thread_unlock_nopreempt(t) disp_lock_exit_nopreempt((t)->t_lockp) 522 523 #define THREAD_LOCK_HELD(t) (DISP_LOCK_HELD((t)->t_lockp)) 524 525 extern disp_lock_t transition_lock; /* lock protecting transiting threads */ 526 extern disp_lock_t stop_lock; /* lock protecting stopped threads */ 527 528 caddr_t thread_stk_init(caddr_t); /* init thread stack */ 529 530 #endif /* _KERNEL */ 531 532 /* 533 * Macros to indicate that the thread holds resources that could be critical 534 * to other kernel threads, so this thread needs to have kernel priority 535 * if it blocks or is preempted. Note that this is not necessary if the 536 * resource is a mutex or a writer lock because of priority inheritance. 537 * 538 * The only way one thread may legally manipulate another thread's t_kpri_req 539 * is to hold the target thread's thread lock while that thread is asleep. 540 * (The rwlock code does this to implement direct handoff to waiting readers.) 541 */ 542 #define THREAD_KPRI_REQUEST() (curthread->t_kpri_req++) 543 #define THREAD_KPRI_RELEASE() (curthread->t_kpri_req--) 544 #define THREAD_KPRI_RELEASE_N(n) (curthread->t_kpri_req -= (n)) 545 546 /* 547 * Macro to change a thread's priority. 548 */ 549 #define THREAD_CHANGE_PRI(t, pri) { \ 550 pri_t __new_pri = (pri); \ 551 DTRACE_SCHED2(change__pri, kthread_t *, (t), pri_t, __new_pri); \ 552 (t)->t_pri = __new_pri; \ 553 } 554 555 /* 556 * Macro to indicate that a thread's priority is about to be changed. 557 */ 558 #define THREAD_WILLCHANGE_PRI(t, pri) { \ 559 DTRACE_SCHED2(change__pri, kthread_t *, (t), pri_t, (pri)); \ 560 } 561 562 /* 563 * Macros to change thread state and the associated lock. 564 */ 565 #define THREAD_SET_STATE(tp, state, lp) \ 566 ((tp)->t_state = state, (tp)->t_lockp = lp) 567 568 /* 569 * Point it at the transition lock, which is always held. 570 * The previosly held lock is dropped. 571 */ 572 #define THREAD_TRANSITION(tp) thread_transition(tp); 573 /* 574 * Set the thread's lock to be the transition lock, without dropping 575 * previosly held lock. 576 */ 577 #define THREAD_TRANSITION_NOLOCK(tp) ((tp)->t_lockp = &transition_lock) 578 579 /* 580 * Put thread in run state, and set the lock pointer to the dispatcher queue 581 * lock pointer provided. This lock should be held. 582 */ 583 #define THREAD_RUN(tp, lp) THREAD_SET_STATE(tp, TS_RUN, lp) 584 585 /* 586 * Put thread in run state, and set the lock pointer to the dispatcher queue 587 * lock pointer provided (i.e., the "swapped_lock"). This lock should be held. 588 */ 589 #define THREAD_SWAP(tp, lp) THREAD_SET_STATE(tp, TS_RUN, lp) 590 591 /* 592 * Put the thread in zombie state and set the lock pointer to NULL. 593 * The NULL will catch anything that tries to lock a zombie. 594 */ 595 #define THREAD_ZOMB(tp) THREAD_SET_STATE(tp, TS_ZOMB, NULL) 596 597 /* 598 * Set the thread into ONPROC state, and point the lock at the CPUs 599 * lock for the onproc thread(s). This lock should be held, so the 600 * thread deoes not become unlocked, since these stores can be reordered. 601 */ 602 #define THREAD_ONPROC(tp, cpu) \ 603 THREAD_SET_STATE(tp, TS_ONPROC, &(cpu)->cpu_thread_lock) 604 605 /* 606 * Set the thread into the TS_SLEEP state, and set the lock pointer to 607 * to some sleep queue's lock. The new lock should already be held. 608 */ 609 #define THREAD_SLEEP(tp, lp) { \ 610 disp_lock_t *tlp; \ 611 tlp = (tp)->t_lockp; \ 612 THREAD_SET_STATE(tp, TS_SLEEP, lp); \ 613 disp_lock_exit_high(tlp); \ 614 } 615 616 /* 617 * Interrupt threads are created in TS_FREE state, and their lock 618 * points at the associated CPU's lock. 619 */ 620 #define THREAD_FREEINTR(tp, cpu) \ 621 THREAD_SET_STATE(tp, TS_FREE, &(cpu)->cpu_thread_lock) 622 623 624 #ifdef __cplusplus 625 } 626 #endif 627 628 #endif /* _SYS_THREAD_H */ 629