1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1986, 1989, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 #ifndef _SYS_PROC_H_ 38 #define _SYS_PROC_H_ 39 40 #include <sys/callout.h> /* For struct callout. */ 41 #include <sys/event.h> /* For struct klist. */ 42 #ifdef _KERNEL 43 #include <sys/_eventhandler.h> 44 #endif 45 #include <sys/_exterr.h> 46 #include <sys/condvar.h> 47 #ifndef _KERNEL 48 #include <sys/filedesc.h> 49 #endif 50 #include <sys/queue.h> 51 #include <sys/_lock.h> 52 #include <sys/lock_profile.h> 53 #include <sys/_mutex.h> 54 #include <sys/osd.h> 55 #include <sys/priority.h> 56 #include <sys/rtprio.h> /* XXX. */ 57 #include <sys/resource.h> 58 #include <sys/sigio.h> 59 #include <sys/signal.h> 60 #include <sys/signalvar.h> 61 #ifndef _KERNEL 62 #include <sys/time.h> /* For structs itimerval, timeval. */ 63 #else 64 #include <sys/pcpu.h> 65 #include <sys/systm.h> 66 #endif 67 #include <sys/ucontext.h> 68 #include <sys/ucred.h> 69 #include <sys/types.h> 70 #include <sys/_domainset.h> 71 72 #include <machine/proc.h> /* Machine-dependent proc substruct. */ 73 #ifdef _KERNEL 74 #include <machine/cpu.h> 75 #endif 76 77 /* 78 * One structure allocated per session. 79 * 80 * List of locks 81 * (m) locked by s_mtx mtx 82 * (e) locked by proctree_lock sx 83 * (c) const until freeing 84 */ 85 struct session { 86 u_int s_count; /* Ref cnt; pgrps in session - atomic. */ 87 struct proc *s_leader; /* (m + e) Session leader. */ 88 struct vnode *s_ttyvp; /* (m) Vnode of controlling tty. */ 89 struct cdev_priv *s_ttydp; /* (m) Device of controlling tty. */ 90 struct tty *s_ttyp; /* (e) Controlling tty. */ 91 pid_t s_sid; /* (c) Session ID. */ 92 /* (m) Setlogin() name: */ 93 char s_login[roundup(MAXLOGNAME, sizeof(long))]; 94 struct mtx s_mtx; /* Mutex to protect members. */ 95 }; 96 97 /* 98 * One structure allocated per process group. 99 * 100 * List of locks 101 * (m) locked by pg_mtx mtx 102 * (e) locked by proctree_lock sx 103 * (c) const until freeing 104 */ 105 struct pgrp { 106 LIST_ENTRY(pgrp) pg_hash; /* (e) Hash chain. */ 107 LIST_HEAD(, proc) pg_members; /* (m + e) Pointer to pgrp members. */ 108 struct session *pg_session; /* (c) Pointer to session. */ 109 struct sigiolst pg_sigiolst; /* (m) List of sigio sources. */ 110 pid_t pg_id; /* (c) Process group id. */ 111 struct mtx pg_mtx; /* Mutex to protect members */ 112 int pg_flags; /* (m) PGRP_ flags */ 113 struct sx pg_killsx; /* Mutual exclusion between group member 114 * fork() and killpg() */ 115 }; 116 117 #define PGRP_ORPHANED 0x00000001 /* Group is orphaned */ 118 119 /* 120 * pargs, used to hold a copy of the command line, if it had a sane length. 121 */ 122 struct pargs { 123 u_int ar_ref; /* Reference count. */ 124 u_int ar_length; /* Length. */ 125 u_char ar_args[1]; /* Arguments. */ 126 }; 127 128 /*- 129 * Description of a process. 130 * 131 * This structure contains the information needed to manage a thread of 132 * control, known in UN*X as a process; it has references to substructures 133 * containing descriptions of things that the process uses, but may share 134 * with related processes. The process structure and the substructures 135 * are always addressable except for those marked "(CPU)" below, 136 * which might be addressable only on a processor on which the process 137 * is running. 138 * 139 * Below is a key of locks used to protect each member of struct proc. The 140 * lock is indicated by a reference to a specific character in parens in the 141 * associated comment. 142 * * - not yet protected 143 * a - only touched by curproc or parent during fork/wait 144 * b - created at fork, never changes 145 * (exception aiods switch vmspaces, but they are also 146 * marked 'P_SYSTEM' so hopefully it will be left alone) 147 * c - locked by proc mtx 148 * d - locked by allproc_lock lock 149 * e - locked by proctree_lock lock 150 * f - session mtx 151 * g - process group mtx 152 * h - callout_lock mtx 153 * i - by curproc or the master session mtx 154 * j - locked by proc slock 155 * k - only accessed by curthread 156 * k*- only accessed by curthread and from an interrupt 157 * kx- only accessed by curthread and by debugger 158 * l - the attaching proc or attaching proc parent 159 * n - not locked, lazy 160 * o - ktrace lock 161 * q - td_contested lock 162 * r - p_peers lock 163 * s - see sleepq_switch(), sleeping_on_old_rtc(), and sleep(9) 164 * t - thread lock 165 * u - process stat lock 166 * w - process timer lock 167 * x - created at fork, only changes during single threading in exec 168 * y - created at first aio, doesn't change until exit or exec at which 169 * point we are single-threaded and only curthread changes it 170 * 171 * If the locking key specifies two identifiers (for example, p_pptr) then 172 * either lock is sufficient for read access, but both locks must be held 173 * for write access. 174 */ 175 struct cpuset; 176 struct filecaps; 177 struct filemon; 178 struct kaioinfo; 179 struct kaudit_record; 180 struct kcov_info; 181 struct kdtrace_proc; 182 struct kdtrace_thread; 183 struct kmsan_td; 184 struct kq_timer_cb_data; 185 struct mqueue_notifier; 186 struct p_sched; 187 struct proc; 188 struct procdesc; 189 struct racct; 190 struct sbuf; 191 struct sleepqueue; 192 struct socket; 193 struct td_sched; 194 struct thread; 195 struct trapframe; 196 struct turnstile; 197 struct vm_map; 198 struct vm_map_entry; 199 struct epoch_tracker; 200 201 struct syscall_args { 202 u_int code; 203 u_int original_code; 204 struct sysent *callp; 205 register_t args[8]; 206 }; 207 208 /* 209 * XXX: Does this belong in resource.h or resourcevar.h instead? 210 * Resource usage extension. The times in rusage structs in the kernel are 211 * never up to date. The actual times are kept as runtimes and tick counts 212 * (with control info in the "previous" times), and are converted when 213 * userland asks for rusage info. Backwards compatibility prevents putting 214 * this directly in the user-visible rusage struct. 215 * 216 * Locking for p_rux: (cu) means (u) for p_rux and (c) for p_crux. 217 * Locking for td_rux: (t) for all fields. 218 */ 219 struct rusage_ext { 220 uint64_t rux_runtime; /* (cu) Real time. */ 221 uint64_t rux_uticks; /* (cu) Statclock hits in user mode. */ 222 uint64_t rux_sticks; /* (cu) Statclock hits in sys mode. */ 223 uint64_t rux_iticks; /* (cu) Statclock hits in intr mode. */ 224 uint64_t rux_uu; /* (c) Previous user time in usec. */ 225 uint64_t rux_su; /* (c) Previous sys time in usec. */ 226 uint64_t rux_tu; /* (c) Previous total time in usec. */ 227 }; 228 229 /* 230 * Kernel runnable context (thread). 231 * This is what is put to sleep and reactivated. 232 * Thread context. Processes may have multiple threads. 233 */ 234 struct thread { 235 struct mtx *volatile td_lock; /* replaces sched lock */ 236 struct proc *td_proc; /* (*) Associated process. */ 237 TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */ 238 TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */ 239 union { 240 TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */ 241 struct thread *td_zombie; /* Zombie list linkage */ 242 }; 243 TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */ 244 LIST_ENTRY(thread) td_hash; /* (d) Hash chain. */ 245 struct cpuset *td_cpuset; /* (t) CPU affinity mask. */ 246 struct domainset_ref td_domain; /* (a) NUMA policy */ 247 struct seltd *td_sel; /* Select queue/channel. */ 248 struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */ 249 struct turnstile *td_turnstile; /* (k) Associated turnstile. */ 250 void *td_pad1; /* Available */ 251 struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */ 252 lwpid_t td_tid; /* (b) Thread ID. */ 253 sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */ 254 #define td_siglist td_sigqueue.sq_signals 255 u_char td_lend_user_pri; /* (t) Lend user pri. */ 256 u_char td_allocdomain; /* (b) NUMA domain backing this struct thread. */ 257 u_char td_base_ithread_pri; /* (t) Base ithread pri */ 258 struct kmsan_td *td_kmsan; /* (k) KMSAN state */ 259 260 /* Cleared during fork1(), thread_create(), or kthread_add(). */ 261 #define td_startzero td_flags 262 int td_flags; /* (t) TDF_* flags. */ 263 int td_ast; /* (t) TDA_* indicators */ 264 int td_inhibitors; /* (t) Why can not run. */ 265 int td_pflags; /* (k) Private thread (TDP_*) flags. */ 266 int td_pflags2; /* (k) Private thread (TDP2_*) flags. */ 267 int td_dupfd; /* (k) Ret value from fdopen. XXX */ 268 int td_sqqueue; /* (t) Sleepqueue queue blocked on. */ 269 const void *td_wchan; /* (t) Sleep address. */ 270 const char *td_wmesg; /* (t) Reason for sleep. */ 271 volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */ 272 u_char td_tsqueue; /* (t) Turnstile queue blocked on. */ 273 u_char _td_pad0[2]; /* Available. */ 274 int td_locks; /* (k) Debug: count of non-spin locks */ 275 int td_rw_rlocks; /* (k) Count of rwlock read locks. */ 276 int td_sx_slocks; /* (k) Count of sx shared locks. */ 277 int td_lk_slocks; /* (k) Count of lockmgr shared locks. */ 278 struct lock_object *td_wantedlock; /* (k) Lock we are contending on */ 279 struct turnstile *td_blocked; /* (t) Lock thread is blocked on. */ 280 const char *td_lockname; /* (t) Name of lock blocked on. */ 281 LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */ 282 struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */ 283 int td_intr_nesting_level; /* (k) Interrupt recursion. */ 284 int td_pinned; /* (k) Temporary cpu pin count. */ 285 struct ucred *td_realucred; /* (k) Reference to credentials. */ 286 struct ucred *td_ucred; /* (k) Used credentials, temporarily switchable. */ 287 struct plimit *td_limit; /* (k) Resource limits. */ 288 int td_slptick; /* (t) Time at sleep. */ 289 int td_blktick; /* (t) Time spent blocked. */ 290 int td_swvoltick; /* (t) Time at last SW_VOL switch. */ 291 int td_swinvoltick; /* (t) Time at last SW_INVOL switch. */ 292 u_int td_cow; /* (*) Number of copy-on-write faults */ 293 struct rusage td_ru; /* (t) rusage information. */ 294 struct rusage_ext td_rux; /* (t) Internal rusage information. */ 295 uint64_t td_incruntime; /* (t) Cpu ticks to transfer to proc. */ 296 uint64_t td_runtime; /* (t) How many cpu ticks we've run. */ 297 u_int td_pticks; /* (t) Statclock hits for profiling */ 298 u_int td_sticks; /* (t) Statclock hits in system mode. */ 299 u_int td_iticks; /* (t) Statclock hits in intr mode. */ 300 u_int td_uticks; /* (t) Statclock hits in user mode. */ 301 int td_intrval; /* (t) Return value for sleepq. */ 302 sigset_t td_oldsigmask; /* (k) Saved mask from pre sigpause. */ 303 volatile u_int td_generation; /* (k) For detection of preemption */ 304 stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */ 305 int td_xsig; /* (c) Signal for ptrace */ 306 u_long td_profil_addr; /* (k) Temporary addr until AST. */ 307 u_int td_profil_ticks; /* (k) Temporary ticks until AST. */ 308 char td_name[MAXCOMLEN + 1]; /* (*) Thread name. */ 309 struct file *td_fpop; /* (k) file referencing cdev under op */ 310 int td_dbgflags; /* (c) Userland debugger flags */ 311 siginfo_t td_si; /* (c) For debugger or core file */ 312 int td_ng_outbound; /* (k) Thread entered ng from above. */ 313 struct osd td_osd; /* (k) Object specific data. */ 314 struct vm_map_entry *td_map_def_user; /* (k) Deferred entries. */ 315 pid_t td_dbg_forked; /* (c) Child pid for debugger. */ 316 u_int td_no_sleeping; /* (k) Sleeping disabled count. */ 317 struct vnode *td_vp_reserved;/* (k) Preallocated vnode. */ 318 void *td_su; /* (k) FFS SU private */ 319 sbintime_t td_sleeptimo; /* (t) Sleep timeout. */ 320 int td_rtcgen; /* (s) rtc_generation of abs. sleep */ 321 int td_errno; /* (k) Error from last syscall. */ 322 size_t td_vslock_sz; /* (k) amount of vslock-ed space */ 323 struct kcov_info *td_kcov_info; /* (*) Kernel code coverage data */ 324 long td_ucredref; /* (k) references on td_realucred */ 325 struct kexterr td_kexterr; 326 #define td_endzero td_sigmask 327 328 /* Copied during fork1(), thread_create(), or kthread_add(). */ 329 #define td_startcopy td_endzero 330 sigset_t td_sigmask; /* (c) Current signal mask. */ 331 u_char td_rqindex; /* (t) Run queue index. */ 332 u_char td_base_pri; /* (t) Thread base kernel priority. */ 333 u_char td_priority; /* (t) Thread active priority. */ 334 u_char td_pri_class; /* (t) Scheduling class. */ 335 u_char td_user_pri; /* (t) User pri from estcpu and nice. */ 336 u_char td_base_user_pri; /* (t) Base user pri */ 337 uintptr_t td_rb_list; /* (k) Robust list head. */ 338 uintptr_t td_rbp_list; /* (k) Robust priv list head. */ 339 uintptr_t td_rb_inact; /* (k) Current in-action mutex loc. */ 340 struct syscall_args td_sa; /* (kx) Syscall parameters. Copied on 341 fork for child tracing. */ 342 void *td_sigblock_ptr; /* (k) uptr for fast sigblock. */ 343 uint32_t td_sigblock_val; /* (k) fast sigblock value read at 344 td_sigblock_ptr on kern entry */ 345 void *td_exterr_ptr; 346 #define td_endcopy td_pcb 347 348 /* 349 * Fields that must be manually set in fork1(), thread_create(), kthread_add(), 350 * or already have been set in the allocator, constructor, etc. 351 */ 352 struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */ 353 enum td_states { 354 TDS_INACTIVE = 0x0, 355 TDS_INHIBITED, 356 TDS_CAN_RUN, 357 TDS_RUNQ, 358 TDS_RUNNING 359 } td_state; /* (t) thread state */ 360 /* Note: td_state must be accessed using TD_{GET,SET}_STATE(). */ 361 union { 362 syscallarg_t tdu_retval[2]; 363 off_t tdu_off; 364 } td_uretoff; /* (k) Syscall aux returns. */ 365 #define td_retval td_uretoff.tdu_retval 366 u_int td_cowgen; /* (k) Generation of COW pointers. */ 367 /* LP64 hole */ 368 struct callout td_slpcallout; /* (h) Callout for sleep. */ 369 struct trapframe *td_frame; /* (k) */ 370 vm_offset_t td_kstack; /* (a) Kernel VA of kstack. */ 371 u_short td_kstack_pages; /* (a) Size of the kstack. */ 372 u_short td_kstack_domain; /* (a) Domain backing kstack KVA. */ 373 volatile u_int td_critnest; /* (k*) Critical section nest level. */ 374 struct mdthread td_md; /* (k) Any machine-dependent fields. */ 375 struct kaudit_record *td_ar; /* (k) Active audit record, if any. */ 376 struct lpohead td_lprof[2]; /* (a) lock profiling objects. */ 377 struct kdtrace_thread *td_dtrace; /* (*) DTrace-specific data. */ 378 struct vnet *td_vnet; /* (k) Effective vnet. */ 379 const char *td_vnet_lpush; /* (k) Debugging vnet push / pop. */ 380 struct trapframe *td_intr_frame;/* (k) Frame of the current irq */ 381 struct proc *td_rfppwait_p; /* (k) The vforked child */ 382 struct vm_page **td_ma; /* (k) uio pages held */ 383 int td_ma_cnt; /* (k) size of *td_ma */ 384 /* LP64 hole */ 385 void *td_emuldata; /* Emulator state data */ 386 int td_lastcpu; /* (t) Last cpu we were on. */ 387 int td_oncpu; /* (t) Which cpu we are on. */ 388 void *td_lkpi_task; /* LinuxKPI task struct pointer */ 389 int td_pmcpend; 390 void *td_remotereq; /* (c) dbg remote request. */ 391 off_t td_ktr_io_lim; /* (k) limit for ktrace file size */ 392 #ifdef EPOCH_TRACE 393 SLIST_HEAD(, epoch_tracker) td_epochs; 394 #endif 395 }; 396 397 struct thread0_storage { 398 struct thread t0st_thread; 399 uint64_t t0st_sched[10]; 400 }; 401 402 struct mtx *thread_lock_block(struct thread *); 403 void thread_lock_block_wait(struct thread *); 404 void thread_lock_set(struct thread *, struct mtx *); 405 void thread_lock_unblock(struct thread *, struct mtx *); 406 #define THREAD_LOCK_ASSERT(td, type) \ 407 mtx_assert((td)->td_lock, (type)) 408 409 #define THREAD_LOCK_BLOCKED_ASSERT(td, type) \ 410 do { \ 411 struct mtx *__m = (td)->td_lock; \ 412 if (__m != &blocked_lock) \ 413 mtx_assert(__m, (type)); \ 414 } while (0) 415 416 #ifdef INVARIANTS 417 #define THREAD_LOCKPTR_ASSERT(td, lock) \ 418 do { \ 419 struct mtx *__m; \ 420 __m = (td)->td_lock; \ 421 KASSERT(__m == (lock), \ 422 ("Thread %p lock %p does not match %p", td, __m, (lock))); \ 423 } while (0) 424 425 #define THREAD_LOCKPTR_BLOCKED_ASSERT(td, lock) \ 426 do { \ 427 struct mtx *__m; \ 428 __m = (td)->td_lock; \ 429 KASSERT(__m == (lock) || __m == &blocked_lock, \ 430 ("Thread %p lock %p does not match %p", td, __m, (lock))); \ 431 } while (0) 432 433 #define TD_LOCKS_INC(td) ((td)->td_locks++) 434 #define TD_LOCKS_DEC(td) do { \ 435 KASSERT(SCHEDULER_STOPPED() || (td)->td_locks > 0, \ 436 ("Thread %p owns no locks", (td))); \ 437 (td)->td_locks--; \ 438 } while (0) 439 #else 440 #define THREAD_LOCKPTR_ASSERT(td, lock) 441 #define THREAD_LOCKPTR_BLOCKED_ASSERT(td, lock) 442 443 #define TD_LOCKS_INC(td) 444 #define TD_LOCKS_DEC(td) 445 #endif 446 447 /* 448 * Flags kept in td_flags: 449 * To change these you MUST have the scheduler lock. 450 */ 451 #define TDF_BORROWING 0x00000001 /* Thread is borrowing pri from another. */ 452 #define TDF_INPANIC 0x00000002 /* Caused a panic, let it drive crashdump. */ 453 #define TDF_INMEM 0x00000004 /* Thread's stack is in memory. */ 454 #define TDF_SINTR 0x00000008 /* Sleep is interruptible. */ 455 #define TDF_TIMEOUT 0x00000010 /* Timing out during sleep. */ 456 #define TDF_IDLETD 0x00000020 /* This is a per-CPU idle thread. */ 457 #define TDF_UNUSED11 0x00000040 /* Available */ 458 #define TDF_SIGWAIT 0x00000080 /* Ignore ignored signals */ 459 #define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */ 460 #define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */ 461 #define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */ 462 #define TDF_UNUSED1 0x00000800 /* Available */ 463 #define TDF_UNUSED2 0x00001000 /* Available */ 464 #define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */ 465 #define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */ 466 #define TDF_UNUSED3 0x00008000 /* Available */ 467 #define TDF_UNUSED4 0x00010000 /* Available */ 468 #define TDF_UNUSED5 0x00020000 /* Available */ 469 #define TDF_NOLOAD 0x00040000 /* Ignore during load avg calculations. */ 470 #define TDF_SERESTART 0x00080000 /* ERESTART on stop attempts. */ 471 #define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */ 472 #define TDF_SEINTR 0x00200000 /* EINTR on stop attempts. */ 473 #define TDF_UNUSED12 0x00400000 /* Available */ 474 #define TDF_UNUSED6 0x00800000 /* Available */ 475 #define TDF_SCHED0 0x01000000 /* Reserved for scheduler private use */ 476 #define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */ 477 #define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */ 478 #define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */ 479 #define TDF_UNUSED7 0x10000000 /* Available */ 480 #define TDF_UNUSED8 0x20000000 /* Available */ 481 #define TDF_UNUSED9 0x40000000 /* Available */ 482 #define TDF_UNUSED10 0x80000000 /* Available */ 483 484 enum { 485 TDA_AST = 0, /* Special: call all non-flagged AST handlers */ 486 TDA_OWEUPC, 487 TDA_HWPMC, 488 TDA_VFORK, 489 TDA_ALRM, 490 TDA_PROF, 491 TDA_MAC, 492 TDA_SCHED, 493 TDA_UFS, 494 TDA_GEOM, 495 TDA_KQUEUE, 496 TDA_RACCT, 497 TDA_MOD1, /* For third party use, before signals are */ 498 TDA_MOD2, /* processed .. */ 499 TDA_PSELECT, /* For discarding temporary signal mask */ 500 TDA_SIG, 501 TDA_KTRACE, 502 TDA_SUSPEND, 503 TDA_SIGSUSPEND, 504 TDA_MOD3, /* .. and after */ 505 TDA_MOD4, 506 TDA_MAX, 507 }; 508 #define TDAI(tda) (1U << (tda)) 509 #define td_ast_pending(td, tda) ((td->td_ast & TDAI(tda)) != 0) 510 511 /* Userland debug flags */ 512 #define TDB_SUSPEND 0x00000001 /* Thread is suspended by debugger */ 513 #define TDB_XSIG 0x00000002 /* Thread is exchanging signal under trace */ 514 #define TDB_USERWR 0x00000004 /* Debugger modified memory or registers */ 515 #define TDB_SCE 0x00000008 /* Thread performs syscall enter */ 516 #define TDB_SCX 0x00000010 /* Thread performs syscall exit */ 517 #define TDB_EXEC 0x00000020 /* TDB_SCX from exec(2) family */ 518 #define TDB_FORK 0x00000040 /* TDB_SCX from fork(2) that created new 519 process */ 520 #define TDB_STOPATFORK 0x00000080 /* Stop at the return from fork (child 521 only) */ 522 #define TDB_CHILD 0x00000100 /* New child indicator for ptrace() */ 523 #define TDB_BORN 0x00000200 /* New LWP indicator for ptrace() */ 524 #define TDB_EXIT 0x00000400 /* Exiting LWP indicator for ptrace() */ 525 #define TDB_VFORK 0x00000800 /* vfork indicator for ptrace() */ 526 #define TDB_FSTP 0x00001000 /* The thread is PT_ATTACH leader */ 527 #define TDB_STEP 0x00002000 /* (x86) PSL_T set for PT_STEP */ 528 #define TDB_SSWITCH 0x00004000 /* Suspended in ptracestop */ 529 #define TDB_BOUNDARY 0x00008000 /* ptracestop() at boundary */ 530 #define TDB_COREDUMPREQ 0x00010000 /* Coredump request */ 531 #define TDB_SCREMOTEREQ 0x00020000 /* Remote syscall request */ 532 533 /* 534 * "Private" flags kept in td_pflags: 535 * These are only written by curthread and thus need no locking. 536 */ 537 #define TDP_OLDMASK 0x00000001 /* Need to restore mask after suspend. */ 538 #define TDP_INKTR 0x00000002 /* Thread is currently in KTR code. */ 539 #define TDP_INKTRACE 0x00000004 /* Thread is currently in KTRACE code. */ 540 #define TDP_BUFNEED 0x00000008 /* Do not recurse into the buf flush */ 541 #define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */ 542 #define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */ 543 #define TDP_DEADLKTREAT 0x00000040 /* Lock acquisition - deadlock treatment. */ 544 #define TDP_NOFAULTING 0x00000080 /* Do not handle page faults. */ 545 #define TDP_SIGFASTBLOCK 0x00000100 /* Fast sigblock active */ 546 #define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */ 547 #define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */ 548 #define TDP_SYNCIO 0x00000800 /* Local override, disable async i/o. */ 549 #define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */ 550 #define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */ 551 #define TDP_SCHED3 0x00004000 /* Reserved for scheduler private use */ 552 #define TDP_SCHED4 0x00008000 /* Reserved for scheduler private use */ 553 #define TDP_GEOM 0x00010000 /* Settle GEOM before finishing syscall */ 554 #define TDP_SOFTDEP 0x00020000 /* Stuck processing softdep worklist */ 555 #define TDP_NORUNNINGBUF 0x00040000 /* Ignore runningbufspace check */ 556 #define TDP_WAKEUP 0x00080000 /* Don't sleep in umtx cond_wait */ 557 #define TDP_INBDFLUSH 0x00100000 /* Already in BO_BDFLUSH, do not recurse */ 558 #define TDP_KTHREAD 0x00200000 /* This is an official kernel thread */ 559 #define TDP_CALLCHAIN 0x00400000 /* Capture thread's callchain */ 560 #define TDP_IGNSUSP 0x00800000 /* Permission to ignore the MNTK_SUSPEND* */ 561 #define TDP_AUDITREC 0x01000000 /* Audit record pending on thread */ 562 #define TDP_RFPPWAIT 0x02000000 /* Handle RFPPWAIT on syscall exit */ 563 #define TDP_RESETSPUR 0x04000000 /* Reset spurious page fault history. */ 564 #define TDP_NERRNO 0x08000000 /* Last errno is already in td_errno */ 565 #define TDP_UIOHELD 0x10000000 /* Current uio has pages held in td_ma */ 566 #define TDP_EFIRT 0x20000000 /* In firmware (EFI RT) call */ 567 #define TDP_EXECVMSPC 0x40000000 /* Execve destroyed old vmspace */ 568 #define TDP_SIGFASTPENDING 0x80000000 /* Pending signal due to sigfastblock */ 569 570 #define TDP2_SBPAGES 0x00000001 /* Owns sbusy on some pages */ 571 #define TDP2_COMPAT32RB 0x00000002 /* compat32 ABI for robust lists */ 572 #define TDP2_ACCT 0x00000004 /* Doing accounting */ 573 #define TDP2_SAN_QUIET 0x00000008 /* Disable warnings from K(A|M)SAN */ 574 #define TDP2_EXTERR 0x00000010 /* Kernel reported ext error */ 575 #define TDP2_UEXTERR 0x00000020 /* User set ext error reporting ptr */ 576 577 /* 578 * Reasons that the current thread can not be run yet. 579 * More than one may apply. 580 */ 581 #define TDI_SUSPENDED 0x0001 /* On suspension queue. */ 582 #define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */ 583 #define TDI_LOCK 0x0008 /* Stopped on a lock. */ 584 #define TDI_IWAIT 0x0010 /* Awaiting interrupt. */ 585 586 #define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING) 587 #define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL) 588 #define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED) 589 #define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK) 590 #define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT) 591 #ifdef _KERNEL 592 #define TD_GET_STATE(td) atomic_load_int(&(td)->td_state) 593 #else 594 #define TD_GET_STATE(td) ((td)->td_state) 595 #endif 596 #define TD_IS_RUNNING(td) (TD_GET_STATE(td) == TDS_RUNNING) 597 #define TD_ON_RUNQ(td) (TD_GET_STATE(td) == TDS_RUNQ) 598 #define TD_CAN_RUN(td) (TD_GET_STATE(td) == TDS_CAN_RUN) 599 #define TD_IS_INHIBITED(td) (TD_GET_STATE(td) == TDS_INHIBITED) 600 #define TD_ON_UPILOCK(td) ((td)->td_flags & TDF_UPIBLOCKED) 601 #define TD_IS_IDLETHREAD(td) ((td)->td_flags & TDF_IDLETD) 602 603 #define TD_CAN_ABORT(td) (TD_ON_SLEEPQ((td)) && \ 604 ((td)->td_flags & TDF_SINTR) != 0) 605 606 #define KTDSTATE(td) \ 607 (((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep" : \ 608 ((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" : \ 609 ((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" : \ 610 ((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding") 611 612 #define TD_SET_INHIB(td, inhib) do { \ 613 TD_SET_STATE(td, TDS_INHIBITED); \ 614 (td)->td_inhibitors |= (inhib); \ 615 } while (0) 616 617 #define TD_CLR_INHIB(td, inhib) do { \ 618 if (((td)->td_inhibitors & (inhib)) && \ 619 (((td)->td_inhibitors &= ~(inhib)) == 0)) \ 620 TD_SET_STATE(td, TDS_CAN_RUN); \ 621 } while (0) 622 623 #define TD_SET_SLEEPING(td) TD_SET_INHIB((td), TDI_SLEEPING) 624 #define TD_SET_LOCK(td) TD_SET_INHIB((td), TDI_LOCK) 625 #define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED) 626 #define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT) 627 #define TD_SET_EXITING(td) TD_SET_INHIB((td), TDI_EXITING) 628 629 #define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING) 630 #define TD_CLR_LOCK(td) TD_CLR_INHIB((td), TDI_LOCK) 631 #define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED) 632 #define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT) 633 634 #ifdef _KERNEL 635 #define TD_SET_STATE(td, state) atomic_store_int(&(td)->td_state, state) 636 #else 637 #define TD_SET_STATE(td, state) (td)->td_state = state 638 #endif 639 #define TD_SET_RUNNING(td) TD_SET_STATE(td, TDS_RUNNING) 640 #define TD_SET_RUNQ(td) TD_SET_STATE(td, TDS_RUNQ) 641 #define TD_SET_CAN_RUN(td) TD_SET_STATE(td, TDS_CAN_RUN) 642 643 644 #define TD_SBDRY_INTR(td) \ 645 (((td)->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 0) 646 #define TD_SBDRY_ERRNO(td) \ 647 (((td)->td_flags & TDF_SEINTR) != 0 ? EINTR : ERESTART) 648 649 /* 650 * Process structure. 651 */ 652 struct proc { 653 LIST_ENTRY(proc) p_list; /* (d) List of all processes. */ 654 TAILQ_HEAD(, thread) p_threads; /* (c) all threads. */ 655 struct mtx p_slock; /* process spin lock */ 656 struct ucred *p_ucred; /* (c) Process owner's identity. */ 657 struct filedesc *p_fd; /* (b) Open files. */ 658 struct filedesc_to_leader *p_fdtol; /* (b) Tracking node */ 659 struct pwddesc *p_pd; /* (b) Cwd, chroot, jail, umask */ 660 struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */ 661 struct plimit *p_limit; /* (c) Resource limits. */ 662 struct callout p_limco; /* (c) Limit callout handle */ 663 struct sigacts *p_sigacts; /* (x) Signal actions, state (CPU). */ 664 665 int p_flag; /* (c) P_* flags. */ 666 int p_flag2; /* (c) P2_* flags. */ 667 enum p_states { 668 PRS_NEW = 0, /* In creation */ 669 PRS_NORMAL, /* threads can be run. */ 670 PRS_ZOMBIE 671 } p_state; /* (j/c) Process status. */ 672 pid_t p_pid; /* (b) Process identifier. */ 673 LIST_ENTRY(proc) p_hash; /* (d) Hash chain. */ 674 LIST_ENTRY(proc) p_pglist; /* (g + e) List of processes in pgrp. */ 675 struct proc *p_pptr; /* (c + e) Pointer to parent process. */ 676 LIST_ENTRY(proc) p_sibling; /* (e) List of sibling processes. */ 677 LIST_HEAD(, proc) p_children; /* (e) Pointer to list of children. */ 678 struct proc *p_reaper; /* (e) My reaper. */ 679 LIST_HEAD(, proc) p_reaplist; /* (e) List of my descendants 680 (if I am reaper). */ 681 LIST_ENTRY(proc) p_reapsibling; /* (e) List of siblings - descendants of 682 the same reaper. */ 683 struct mtx p_mtx; /* (n) Lock for this struct. */ 684 struct mtx p_statmtx; /* Lock for the stats */ 685 struct mtx p_itimmtx; /* Lock for the virt/prof timers */ 686 struct mtx p_profmtx; /* Lock for the profiling */ 687 struct ksiginfo *p_ksi; /* Locked by parent proc lock */ 688 sigqueue_t p_sigqueue; /* (c) Sigs not delivered to a td. */ 689 #define p_siglist p_sigqueue.sq_signals 690 pid_t p_oppid; /* (c + e) Real parent pid. */ 691 692 /* The following fields are all zeroed upon creation in fork. */ 693 #define p_startzero p_vmspace 694 struct vmspace *p_vmspace; /* (b) Address space. */ 695 u_int p_swtick; /* (c) Tick when swapped in or out. */ 696 u_int p_cowgen; /* (c) Generation of COW pointers. */ 697 struct itimerval p_realtimer; /* (c) Alarm timer. */ 698 struct rusage p_ru; /* (a) Exit information. */ 699 struct rusage_ext p_rux; /* (cu) Internal resource usage. */ 700 struct rusage_ext p_crux; /* (c) Internal child resource usage. */ 701 int p_profthreads; /* (c) Num threads in addupc_task. */ 702 volatile int p_exitthreads; /* (j) Number of threads exiting */ 703 int p_traceflag; /* (o) Kernel trace points. */ 704 struct ktr_io_params *p_ktrioparms; /* (c + o) Params for ktrace. */ 705 struct vnode *p_textvp; /* (b) Vnode of executable. */ 706 struct vnode *p_textdvp; /* (b) Dir containing textvp. */ 707 char *p_binname; /* (b) Binary hardlink name. */ 708 u_int p_lock; /* (c) Prevent exit. */ 709 struct sigiolst p_sigiolst; /* (c) List of sigio sources. */ 710 int p_sigparent; /* (c) Signal to parent on exit. */ 711 int p_sig; /* (n) For core dump/debugger XXX. */ 712 u_int p_ptevents; /* (c + e) ptrace() event mask. */ 713 struct kaioinfo *p_aioinfo; /* (y) ASYNC I/O info. */ 714 struct thread *p_singlethread;/* (c + j) If single threading this is it */ 715 int p_suspcount; /* (j) Num threads in suspended mode. */ 716 struct thread *p_xthread; /* (c) Trap thread */ 717 int p_boundary_count;/* (j) Num threads at user boundary */ 718 int p_pendingcnt; /* (c) how many signals are pending */ 719 struct itimers *p_itimers; /* (c) POSIX interval timers. */ 720 struct procdesc *p_procdesc; /* (e) Process descriptor, if any. */ 721 u_int p_treeflag; /* (e) P_TREE flags */ 722 int p_pendingexits; /* (c) Count of pending thread exits. */ 723 struct filemon *p_filemon; /* (c) filemon-specific data. */ 724 int p_pdeathsig; /* (c) Signal from parent on exit. */ 725 /* End area that is zeroed on creation. */ 726 #define p_endzero p_magic 727 728 /* The following fields are all copied upon creation in fork. */ 729 #define p_startcopy p_endzero 730 u_int p_magic; /* (b) Magic number. */ 731 int p_osrel; /* (x) osreldate for the 732 binary (from ELF note, if any) */ 733 uint32_t p_fctl0; /* (x) ABI feature control, ELF note */ 734 char p_comm[MAXCOMLEN + 1]; /* (x) Process name. */ 735 struct sysentvec *p_sysent; /* (b) Syscall dispatch info. */ 736 struct pargs *p_args; /* (c) Process arguments. */ 737 rlim_t p_cpulimit; /* (c) Current CPU limit in seconds. */ 738 signed char p_nice; /* (c) Process "nice" value. */ 739 int p_fibnum; /* in this routing domain XXX MRT */ 740 pid_t p_reapsubtree; /* (e) Pid of the direct child of the 741 reaper which spawned 742 our subtree. */ 743 uint64_t p_elf_flags; /* (x) ELF flags */ 744 void *p_elf_brandinfo; /* (x) Elf_Brandinfo, NULL for 745 non ELF binaries. */ 746 sbintime_t p_umtx_min_timeout; 747 /* End area that is copied on creation. */ 748 #define p_endcopy p_xexit 749 750 u_int p_xexit; /* (c) Exit code. */ 751 u_int p_xsig; /* (c) Stop/kill sig. */ 752 struct pgrp *p_pgrp; /* (c + e) Pointer to process group. */ 753 struct knlist *p_klist; /* (c) Knotes attached to this proc. */ 754 int p_numthreads; /* (c) Number of threads. */ 755 struct mdproc p_md; /* Any machine-dependent fields. */ 756 struct callout p_itcallout; /* (h + c) Interval timer callout. */ 757 u_short p_acflag; /* (c) Accounting flags. */ 758 struct proc *p_peers; /* (r) */ 759 struct proc *p_leader; /* (b) */ 760 void *p_emuldata; /* (c) Emulator state data. */ 761 struct label *p_label; /* (*) Proc (not subject) MAC label. */ 762 STAILQ_HEAD(, ktr_request) p_ktr; /* (o) KTR event queue. */ 763 LIST_HEAD(, mqueue_notifier) p_mqnotifier; /* (c) mqueue notifiers.*/ 764 struct kdtrace_proc *p_dtrace; /* (*) DTrace-specific data. */ 765 struct cv p_pwait; /* (*) wait cv for exit/exec. */ 766 struct racct *p_racct; /* (b) Resource accounting. */ 767 int p_throttled; /* (c) Flag for racct pcpu throttling */ 768 /* 769 * An orphan is the child that has been re-parented to the 770 * debugger as a result of attaching to it. Need to keep 771 * track of them for parent to be able to collect the exit 772 * status of what used to be children. 773 */ 774 LIST_ENTRY(proc) p_orphan; /* (e) List of orphan processes. */ 775 LIST_HEAD(, proc) p_orphans; /* (e) Pointer to list of orphans. */ 776 777 TAILQ_HEAD(, kq_timer_cb_data) p_kqtim_stop; /* (c) */ 778 LIST_ENTRY(proc) p_jaillist; /* (d) Jail process linkage. */ 779 }; 780 781 #define p_session p_pgrp->pg_session 782 #define p_pgid p_pgrp->pg_id 783 784 #define NOCPU (-1) /* For when we aren't on a CPU. */ 785 #define NOCPU_OLD (255) 786 #define MAXCPU_OLD (254) 787 788 #define PROC_SLOCK(p) mtx_lock_spin(&(p)->p_slock) 789 #define PROC_SUNLOCK(p) mtx_unlock_spin(&(p)->p_slock) 790 #define PROC_SLOCK_ASSERT(p, type) mtx_assert(&(p)->p_slock, (type)) 791 792 #define PROC_STATLOCK(p) mtx_lock_spin(&(p)->p_statmtx) 793 #define PROC_STATUNLOCK(p) mtx_unlock_spin(&(p)->p_statmtx) 794 #define PROC_STATLOCK_ASSERT(p, type) mtx_assert(&(p)->p_statmtx, (type)) 795 796 #define PROC_ITIMLOCK(p) mtx_lock_spin(&(p)->p_itimmtx) 797 #define PROC_ITIMUNLOCK(p) mtx_unlock_spin(&(p)->p_itimmtx) 798 #define PROC_ITIMLOCK_ASSERT(p, type) mtx_assert(&(p)->p_itimmtx, (type)) 799 800 #define PROC_PROFLOCK(p) mtx_lock_spin(&(p)->p_profmtx) 801 #define PROC_PROFUNLOCK(p) mtx_unlock_spin(&(p)->p_profmtx) 802 #define PROC_PROFLOCK_ASSERT(p, type) mtx_assert(&(p)->p_profmtx, (type)) 803 804 /* These flags are kept in p_flag. */ 805 #define P_ADVLOCK 0x00000001 /* Process may hold a POSIX advisory 806 lock. */ 807 #define P_CONTROLT 0x00000002 /* Has a controlling terminal. */ 808 #define P_KPROC 0x00000004 /* Kernel process. */ 809 #define P_IDLEPROC 0x00000008 /* Container for system idle threads. */ 810 #define P_PPWAIT 0x00000010 /* Parent is waiting for child to 811 exec/exit. */ 812 #define P_PROFIL 0x00000020 /* Has started profiling. */ 813 #define P_STOPPROF 0x00000040 /* Has thread requesting to stop 814 profiling. */ 815 #define P_HADTHREADS 0x00000080 /* Has had threads (no cleanup 816 shortcuts) */ 817 #define P_SUGID 0x00000100 /* Had set id privileges since last 818 exec. */ 819 #define P_SYSTEM 0x00000200 /* System proc: no sigs or stats. */ 820 #define P_SINGLE_EXIT 0x00000400 /* Threads suspending should exit, 821 not wait. */ 822 #define P_TRACED 0x00000800 /* Debugged process being traced. */ 823 #define P_WAITED 0x00001000 /* Someone is waiting for us. */ 824 #define P_WEXIT 0x00002000 /* Working on exiting. */ 825 #define P_EXEC 0x00004000 /* Process called exec. */ 826 #define P_WKILLED 0x00008000 /* Killed, go to kernel/user boundary 827 ASAP. */ 828 #define P_CONTINUED 0x00010000 /* Proc has continued from a stopped 829 state. */ 830 #define P_STOPPED_SIG 0x00020000 /* Stopped due to SIGSTOP/SIGTSTP. */ 831 #define P_STOPPED_TRACE 0x00040000 /* Stopped because of tracing. */ 832 #define P_STOPPED_SINGLE 0x00080000 /* Only 1 thread can continue (not to 833 user). */ 834 #define P_PROTECTED 0x00100000 /* Do not kill on memory overcommit. */ 835 #define P_SIGEVENT 0x00200000 /* Process pending signals changed. */ 836 #define P_SINGLE_BOUNDARY 0x00400000 /* Threads should suspend at user 837 boundary. */ 838 #define P_HWPMC 0x00800000 /* Process is using HWPMCs */ 839 #define P_JAILED 0x01000000 /* Process is in jail. */ 840 #define P_TOTAL_STOP 0x02000000 /* Stopped in stop_all_proc. */ 841 #define P_INEXEC 0x04000000 /* Process is in execve(). */ 842 #define P_STATCHILD 0x08000000 /* Child process stopped or exited. */ 843 #define P_INMEM 0x10000000 /* Loaded into memory, always set. */ 844 #define P_UNUSED1 0x20000000 /* --available-- */ 845 #define P_UNUSED2 0x40000000 /* --available-- */ 846 #define P_PPTRACE 0x80000000 /* PT_TRACEME by vforked child. */ 847 848 #define P_STOPPED (P_STOPPED_SIG|P_STOPPED_SINGLE|P_STOPPED_TRACE) 849 #define P_SHOULDSTOP(p) ((p)->p_flag & P_STOPPED) 850 #define P_KILLED(p) ((p)->p_flag & P_WKILLED) 851 852 /* These flags are kept in p_flag2. */ 853 #define P2_INHERIT_PROTECTED 0x00000001 /* New children get 854 P_PROTECTED. */ 855 #define P2_NOTRACE 0x00000002 /* No ptrace(2) attach or 856 coredumps. */ 857 #define P2_NOTRACE_EXEC 0x00000004 /* Keep P2_NOPTRACE on 858 exec(2). */ 859 #define P2_AST_SU 0x00000008 /* Handles SU ast for 860 kthreads. */ 861 #define P2_PTRACE_FSTP 0x00000010 /* SIGSTOP from PT_ATTACH not 862 yet handled. */ 863 #define P2_TRAPCAP 0x00000020 /* SIGTRAP on ENOTCAPABLE */ 864 #define P2_ASLR_ENABLE 0x00000040 /* Force enable ASLR. */ 865 #define P2_ASLR_DISABLE 0x00000080 /* Force disable ASLR. */ 866 #define P2_ASLR_IGNSTART 0x00000100 /* Enable ASLR to consume sbrk 867 area. */ 868 #define P2_PROTMAX_ENABLE 0x00000200 /* Force enable implied 869 PROT_MAX. */ 870 #define P2_PROTMAX_DISABLE 0x00000400 /* Force disable implied 871 PROT_MAX. */ 872 #define P2_STKGAP_DISABLE 0x00000800 /* Disable stack gap for 873 MAP_STACK */ 874 #define P2_STKGAP_DISABLE_EXEC 0x00001000 /* Stack gap disabled 875 after exec */ 876 #define P2_ITSTOPPED 0x00002000 /* itimers stopped */ 877 #define P2_PTRACEREQ 0x00004000 /* Active ptrace req */ 878 #define P2_NO_NEW_PRIVS 0x00008000 /* Ignore setuid */ 879 #define P2_WXORX_DISABLE 0x00010000 /* WX mappings enabled */ 880 #define P2_WXORX_ENABLE_EXEC 0x00020000 /* WXORX enabled after exec */ 881 #define P2_WEXIT 0x00040000 /* exit just started, no 882 external thread_single() is 883 permitted */ 884 #define P2_REAPKILLED 0x00080000 /* REAP_KILL pass touched me */ 885 #define P2_MEMBAR_PRIVE 0x00100000 /* membar private expedited 886 registered */ 887 #define P2_MEMBAR_PRIVE_SYNCORE 0x00200000 /* membar private expedited 888 sync core registered */ 889 #define P2_MEMBAR_GLOBE 0x00400000 /* membar global expedited 890 registered */ 891 892 #define P2_LOGSIGEXIT_ENABLE 0x00800000 /* Disable logging on sigexit */ 893 #define P2_LOGSIGEXIT_CTL 0x01000000 /* Override kern.logsigexit */ 894 895 #define P2_HWT 0x02000000 /* Process is using HWT. */ 896 897 /* Flags protected by proctree_lock, kept in p_treeflags. */ 898 #define P_TREE_ORPHANED 0x00000001 /* Reparented, on orphan list */ 899 #define P_TREE_FIRST_ORPHAN 0x00000002 /* First element of orphan 900 list */ 901 #define P_TREE_REAPER 0x00000004 /* Reaper of subtree */ 902 #define P_TREE_GRPEXITED 0x00000008 /* exit1() done with job ctl */ 903 904 /* 905 * These were process status values (p_stat), now they are only used in 906 * legacy conversion code. 907 */ 908 #define SIDL 1 /* Process being created by fork. */ 909 #define SRUN 2 /* Currently runnable. */ 910 #define SSLEEP 3 /* Sleeping on an address. */ 911 #define SSTOP 4 /* Process debugging or suspension. */ 912 #define SZOMB 5 /* Awaiting collection by parent. */ 913 #define SWAIT 6 /* Waiting for interrupt. */ 914 #define SLOCK 7 /* Blocked on a lock. */ 915 916 #define P_MAGIC 0xbeefface 917 918 #ifdef _KERNEL 919 920 /* Types and flags for mi_switch(9). */ 921 #define SW_TYPE_MASK 0xff /* First 8 bits are switch type */ 922 #define SWT_OWEPREEMPT 1 /* Switching due to owepreempt. */ 923 #define SWT_TURNSTILE 2 /* Turnstile contention. */ 924 #define SWT_SLEEPQ 3 /* Sleepq wait. */ 925 #define SWT_RELINQUISH 4 /* yield call. */ 926 #define SWT_NEEDRESCHED 5 /* NEEDRESCHED was set. */ 927 #define SWT_IDLE 6 /* Switching from the idle thread. */ 928 #define SWT_IWAIT 7 /* Waiting for interrupts. */ 929 #define SWT_SUSPEND 8 /* Thread suspended. */ 930 #define SWT_REMOTEPREEMPT 9 /* Remote processor preempted. */ 931 #define SWT_REMOTEWAKEIDLE 10 /* Remote processor preempted idle. */ 932 #define SWT_BIND 11 /* Thread bound to a new CPU. */ 933 #define SWT_COUNT 12 /* Number of switch types. */ 934 /* Flags */ 935 #define SW_VOL 0x0100 /* Voluntary switch. */ 936 #define SW_INVOL 0x0200 /* Involuntary switch. */ 937 #define SW_PREEMPT 0x0400 /* The invol switch is a preemption */ 938 939 /* How values for thread_single(). */ 940 #define SINGLE_NO_EXIT 0 941 #define SINGLE_EXIT 1 942 #define SINGLE_BOUNDARY 2 943 #define SINGLE_ALLPROC 3 944 945 #define FOREACH_PROC_IN_SYSTEM(p) \ 946 LIST_FOREACH((p), &allproc, p_list) 947 #define FOREACH_THREAD_IN_PROC(p, td) \ 948 TAILQ_FOREACH((td), &(p)->p_threads, td_plist) 949 950 #define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&(p)->p_threads) 951 952 /* 953 * We use process IDs <= pid_max <= PID_MAX; PID_MAX + 1 must also fit 954 * in a pid_t, as it is used to represent "no process group". 955 */ 956 #define PID_MAX 99999 957 #define NO_PID (PID_MAX + 1) 958 #define THREAD0_TID NO_PID 959 extern pid_t pid_max; 960 961 #define SESS_LEADER(p) ((p)->p_session->s_leader == (p)) 962 963 /* Lock and unlock a process. */ 964 #define PROC_LOCK(p) mtx_lock(&(p)->p_mtx) 965 #define PROC_TRYLOCK(p) mtx_trylock(&(p)->p_mtx) 966 #define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx) 967 #define PROC_LOCKED(p) mtx_owned(&(p)->p_mtx) 968 #define PROC_WAIT_UNLOCKED(p) mtx_wait_unlocked(&(p)->p_mtx) 969 #define PROC_LOCK_ASSERT(p, type) mtx_assert(&(p)->p_mtx, (type)) 970 971 /* Lock and unlock a process group. */ 972 #define PGRP_LOCK(pg) mtx_lock(&(pg)->pg_mtx) 973 #define PGRP_UNLOCK(pg) mtx_unlock(&(pg)->pg_mtx) 974 #define PGRP_LOCKED(pg) mtx_owned(&(pg)->pg_mtx) 975 #define PGRP_LOCK_ASSERT(pg, type) mtx_assert(&(pg)->pg_mtx, (type)) 976 977 #define PGRP_LOCK_PGSIGNAL(pg) do { \ 978 if ((pg) != NULL) \ 979 PGRP_LOCK(pg); \ 980 } while (0) 981 #define PGRP_UNLOCK_PGSIGNAL(pg) do { \ 982 if ((pg) != NULL) \ 983 PGRP_UNLOCK(pg); \ 984 } while (0) 985 986 /* Lock and unlock a session. */ 987 #define SESS_LOCK(s) mtx_lock(&(s)->s_mtx) 988 #define SESS_UNLOCK(s) mtx_unlock(&(s)->s_mtx) 989 #define SESS_LOCKED(s) mtx_owned(&(s)->s_mtx) 990 #define SESS_LOCK_ASSERT(s, type) mtx_assert(&(s)->s_mtx, (type)) 991 992 /* 993 * A non-zero p_lock prevents the process from exiting; it will sleep in exit1() 994 * until the count reaches zero. 995 * 996 * PHOLD() asserts that the process (except the current process) is 997 * not exiting and increments p_lock. 998 * _PHOLD() is same as PHOLD(), it takes the process locked. 999 */ 1000 #define PHOLD(p) do { \ 1001 PROC_LOCK(p); \ 1002 _PHOLD(p); \ 1003 PROC_UNLOCK(p); \ 1004 } while (0) 1005 #define _PHOLD(p) do { \ 1006 PROC_LOCK_ASSERT((p), MA_OWNED); \ 1007 KASSERT(!((p)->p_flag & P_WEXIT) || (p) == curproc, \ 1008 ("PHOLD of exiting process %p", p)); \ 1009 (p)->p_lock++; \ 1010 } while (0) 1011 #define PROC_ASSERT_HELD(p) do { \ 1012 KASSERT((p)->p_lock > 0, ("process %p not held", p)); \ 1013 } while (0) 1014 1015 #define PRELE(p) do { \ 1016 PROC_LOCK((p)); \ 1017 _PRELE((p)); \ 1018 PROC_UNLOCK((p)); \ 1019 } while (0) 1020 #define _PRELE(p) do { \ 1021 PROC_LOCK_ASSERT((p), MA_OWNED); \ 1022 PROC_ASSERT_HELD(p); \ 1023 (--(p)->p_lock); \ 1024 if (((p)->p_flag & P_WEXIT) && (p)->p_lock == 0) \ 1025 wakeup(&(p)->p_lock); \ 1026 } while (0) 1027 #define PROC_ASSERT_NOT_HELD(p) do { \ 1028 KASSERT((p)->p_lock == 0, ("process %p held", p)); \ 1029 } while (0) 1030 1031 #define PROC_UPDATE_COW(p) do { \ 1032 struct proc *_p = (p); \ 1033 PROC_LOCK_ASSERT((_p), MA_OWNED); \ 1034 atomic_store_int(&_p->p_cowgen, _p->p_cowgen + 1); \ 1035 } while (0) 1036 1037 #define PROC_COW_CHANGECOUNT(td, p) ({ \ 1038 struct thread *_td = (td); \ 1039 struct proc *_p = (p); \ 1040 MPASS(_td == curthread); \ 1041 PROC_LOCK_ASSERT(_p, MA_OWNED); \ 1042 _p->p_cowgen - _td->td_cowgen; \ 1043 }) 1044 1045 /* Control whether or not it is safe for curthread to sleep. */ 1046 #define THREAD_NO_SLEEPING() do { \ 1047 curthread->td_no_sleeping++; \ 1048 MPASS(curthread->td_no_sleeping > 0); \ 1049 } while (0) 1050 1051 #define THREAD_SLEEPING_OK() do { \ 1052 MPASS(curthread->td_no_sleeping > 0); \ 1053 curthread->td_no_sleeping--; \ 1054 } while (0) 1055 1056 #define THREAD_CAN_SLEEP() ((curthread)->td_no_sleeping == 0) 1057 1058 #define THREAD_CONTENDS_ON_LOCK(lo) do { \ 1059 MPASS(curthread->td_wantedlock == NULL); \ 1060 curthread->td_wantedlock = lo; \ 1061 } while (0) 1062 1063 #define THREAD_CONTENTION_DONE(lo) do { \ 1064 MPASS(curthread->td_wantedlock == lo); \ 1065 curthread->td_wantedlock = NULL; \ 1066 } while (0) 1067 1068 #define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash]) 1069 #define PIDHASHLOCK(pid) (&pidhashtbl_lock[((pid) & pidhashlock)]) 1070 extern LIST_HEAD(pidhashhead, proc) *pidhashtbl; 1071 extern struct sx *pidhashtbl_lock; 1072 extern u_long pidhash; 1073 extern u_long pidhashlock; 1074 1075 #define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash]) 1076 extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl; 1077 extern u_long pgrphash; 1078 1079 extern struct sx allproc_lock; 1080 extern int allproc_gen; 1081 extern struct sx proctree_lock; 1082 extern struct mtx ppeers_lock; 1083 extern struct mtx procid_lock; 1084 extern struct proc proc0; /* Process slot for swapper. */ 1085 extern struct thread0_storage thread0_st; /* Primary thread in proc0. */ 1086 #define thread0 (thread0_st.t0st_thread) 1087 extern struct vmspace vmspace0; /* VM space for proc0. */ 1088 extern int hogticks; /* Limit on kernel cpu hogs. */ 1089 extern int lastpid; 1090 extern int nprocs, maxproc; /* Current and max number of procs. */ 1091 extern int maxprocperuid; /* Max procs per uid. */ 1092 extern u_long ps_arg_cache_limit; 1093 1094 LIST_HEAD(proclist, proc); 1095 TAILQ_HEAD(procqueue, proc); 1096 TAILQ_HEAD(threadqueue, thread); 1097 extern struct proclist allproc; /* List of all processes. */ 1098 extern struct proc *initproc, *pageproc; /* Process slots for init, pager. */ 1099 1100 extern struct uma_zone *proc_zone; 1101 extern struct uma_zone *pgrp_zone; 1102 1103 struct proc *pfind(pid_t); /* Find process by id. */ 1104 struct proc *pfind_any(pid_t); /* Find (zombie) process by id. */ 1105 struct proc *pfind_any_locked(pid_t pid); /* Find process by id, locked. */ 1106 struct pgrp *pgfind(pid_t); /* Find process group by id. */ 1107 void pidhash_slockall(void); /* Shared lock all pid hash lists. */ 1108 void pidhash_sunlockall(void); /* Shared unlock all pid hash lists. */ 1109 1110 struct fork_req { 1111 int fr_flags; 1112 int fr_pages; 1113 int *fr_pidp; 1114 struct proc **fr_procp; 1115 int *fr_pd_fd; 1116 int fr_pd_flags; 1117 struct filecaps *fr_pd_fcaps; 1118 int fr_flags2; 1119 #define FR2_DROPSIG_CAUGHT 0x00000001 /* Drop caught non-DFL signals */ 1120 #define FR2_SHARE_PATHS 0x00000002 /* Invert sense of RFFDG for paths */ 1121 #define FR2_KPROC 0x00000004 /* Create a kernel process */ 1122 }; 1123 1124 /* 1125 * pget() flags. 1126 */ 1127 #define PGET_HOLD 0x00001 /* Hold the process. */ 1128 #define PGET_CANSEE 0x00002 /* Check against p_cansee(). */ 1129 #define PGET_CANDEBUG 0x00004 /* Check against p_candebug(). */ 1130 #define PGET_ISCURRENT 0x00008 /* Check that the found process is current. */ 1131 #define PGET_NOTWEXIT 0x00010 /* Check that the process is not in P_WEXIT. */ 1132 #define PGET_NOTINEXEC 0x00020 /* Check that the process is not in P_INEXEC. */ 1133 #define PGET_NOTID 0x00040 /* Do not assume tid if pid > PID_MAX. */ 1134 1135 #define PGET_WANTREAD (PGET_HOLD | PGET_CANDEBUG | PGET_NOTWEXIT) 1136 1137 int pget(pid_t pid, int flags, struct proc **pp); 1138 1139 /* ast_register() flags */ 1140 #define ASTR_ASTF_REQUIRED 0x0001 /* td_ast TDAI(TDA_X) flag set is 1141 required for call */ 1142 #define ASTR_TDP 0x0002 /* td_pflags flag set is required */ 1143 #define ASTR_KCLEAR 0x0004 /* call me on ast_kclear() */ 1144 #define ASTR_UNCOND 0x0008 /* call me always */ 1145 1146 void ast(struct trapframe *framep); 1147 void ast_kclear(struct thread *td); 1148 void ast_register(int ast, int ast_flags, int tdp, 1149 void (*f)(struct thread *td, int asts)); 1150 void ast_deregister(int tda); 1151 void ast_sched_locked(struct thread *td, int tda); 1152 void ast_sched_mask(struct thread *td, int ast); 1153 void ast_sched(struct thread *td, int tda); 1154 void ast_unsched_locked(struct thread *td, int tda); 1155 1156 struct thread *choosethread(void); 1157 int cr_bsd_visible(struct ucred *u1, struct ucred *u2); 1158 int cr_cansee(struct ucred *u1, struct ucred *u2); 1159 int cr_canseesocket(struct ucred *cred, struct socket *so); 1160 int cr_cansignal(struct ucred *cred, struct proc *proc, int signum); 1161 int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, 1162 struct session *sess); 1163 int enterthispgrp(struct proc *p, struct pgrp *pgrp); 1164 int fork1(struct thread *, struct fork_req *); 1165 void fork_exit(void (*)(void *, struct trapframe *), void *, 1166 struct trapframe *); 1167 void fork_return(struct thread *, struct trapframe *); 1168 int inferior(struct proc *p); 1169 void itimer_proc_continue(struct proc *p); 1170 void kqtimer_proc_continue(struct proc *p); 1171 void kern_proc_vmmap_resident(struct vm_map *map, struct vm_map_entry *entry, 1172 int *resident_count, bool *super); 1173 void kern_yield(int); 1174 void killjobc(void); 1175 int leavepgrp(struct proc *p); 1176 int maybe_preempt(struct thread *td); 1177 void maybe_yield(void); 1178 void mi_switch(int flags); 1179 int p_candebug(struct thread *td, struct proc *p); 1180 int p_cansee(struct thread *td, struct proc *p); 1181 int p_cansched(struct thread *td, struct proc *p); 1182 int p_cansignal(struct thread *td, struct proc *p, int signum); 1183 int p_canwait(struct thread *td, struct proc *p); 1184 struct pargs *pargs_alloc(int len); 1185 void pargs_drop(struct pargs *pa); 1186 void pargs_hold(struct pargs *pa); 1187 int pgrp_calc_jobc(struct pgrp *pgrp); 1188 void proc_add_orphan(struct proc *child, struct proc *parent); 1189 int proc_get_binpath(struct proc *p, char *binname, char **fullpath, 1190 char **freepath); 1191 int proc_getargv(struct thread *td, struct proc *p, struct sbuf *sb); 1192 int proc_getauxv(struct thread *td, struct proc *p, struct sbuf *sb); 1193 int proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb); 1194 void procinit(void); 1195 int proc_iterate(int (*cb)(struct proc *, void *), void *cbarg); 1196 void proc_linkup0(struct proc *p, struct thread *td); 1197 void proc_linkup(struct proc *p, struct thread *td); 1198 struct proc *proc_realparent(struct proc *child); 1199 void proc_reap(struct thread *td, struct proc *p, int *status, int options); 1200 void proc_reparent(struct proc *child, struct proc *newparent, bool set_oppid); 1201 void proc_set_p2_wexit(struct proc *p); 1202 void proc_set_traced(struct proc *p, bool stop); 1203 void proc_wkilled(struct proc *p); 1204 struct pstats *pstats_alloc(void); 1205 void pstats_fork(struct pstats *src, struct pstats *dst); 1206 void pstats_free(struct pstats *ps); 1207 void proc_clear_orphan(struct proc *p); 1208 void reaper_abandon_children(struct proc *p, bool exiting); 1209 int securelevel_ge(struct ucred *cr, int level); 1210 int securelevel_gt(struct ucred *cr, int level); 1211 void sess_hold(struct session *); 1212 void sess_release(struct session *); 1213 void setrunnable(struct thread *, int); 1214 void setsugid(struct proc *p); 1215 bool should_yield(void); 1216 int sigonstack(size_t sp); 1217 void stopevent(struct proc *, u_int, u_int); 1218 struct thread *tdfind(lwpid_t, pid_t); 1219 void threadinit(void); 1220 void tidhash_add(struct thread *); 1221 void tidhash_remove(struct thread *); 1222 void cpu_idle(int); 1223 int cpu_idle_wakeup(int); 1224 extern void (*cpu_idle_hook)(sbintime_t); /* Hook to machdep CPU idler. */ 1225 void cpu_switch(struct thread *, struct thread *, struct mtx *); 1226 void cpu_sync_core(void); 1227 void cpu_throw(struct thread *, struct thread *) __dead2; 1228 void cpu_update_pcb(struct thread *); 1229 bool curproc_sigkilled(void); 1230 void userret(struct thread *, struct trapframe *); 1231 1232 void cpu_exit(struct thread *); 1233 void exit1(struct thread *, int, int) __dead2; 1234 void cpu_copy_thread(struct thread *td, struct thread *td0); 1235 bool cpu_exec_vmspace_reuse(struct proc *p, struct vm_map *map); 1236 int cpu_fetch_syscall_args(struct thread *td); 1237 void cpu_fork(struct thread *, struct proc *, struct thread *, int); 1238 void cpu_fork_kthread_handler(struct thread *, void (*)(void *), void *); 1239 int cpu_procctl(struct thread *td, int idtype, id_t id, int com, 1240 void *data); 1241 void cpu_set_syscall_retval(struct thread *, int); 1242 int cpu_set_upcall(struct thread *, void (*)(void *), void *, 1243 stack_t *); 1244 int cpu_set_user_tls(struct thread *, void *tls_base, int flags); 1245 void cpu_thread_alloc(struct thread *); 1246 void cpu_thread_clean(struct thread *); 1247 void cpu_thread_exit(struct thread *); 1248 void cpu_thread_free(struct thread *); 1249 struct thread *thread_alloc(int pages); 1250 int thread_check_susp(struct thread *td, bool sleep); 1251 void thread_cow_get_proc(struct thread *newtd, struct proc *p); 1252 void thread_cow_get(struct thread *newtd, struct thread *td); 1253 void thread_cow_free(struct thread *td); 1254 void thread_cow_update(struct thread *td); 1255 void thread_cow_synced(struct thread *td); 1256 int thread_create(struct thread *td, struct rtprio *rtp, 1257 int (*initialize_thread)(struct thread *, void *), void *thunk); 1258 void thread_exit(void) __dead2; 1259 void thread_free(struct thread *td); 1260 void thread_link(struct thread *td, struct proc *p); 1261 void thread_reap_barrier(void); 1262 int thread_recycle(struct thread *, int pages); 1263 int thread_single(struct proc *p, int how); 1264 void thread_single_end(struct proc *p, int how); 1265 void thread_stash(struct thread *td); 1266 void thread_stopped(struct proc *p); 1267 void childproc_stopped(struct proc *child, int reason); 1268 void childproc_continued(struct proc *child); 1269 void childproc_exited(struct proc *child); 1270 void thread_run_flash(struct thread *td); 1271 int thread_suspend_check(int how); 1272 bool thread_suspend_check_needed(void); 1273 void thread_suspend_switch(struct thread *, struct proc *p); 1274 void thread_suspend_one(struct thread *td); 1275 void thread_unlink(struct thread *td); 1276 void thread_unsuspend(struct proc *p); 1277 void thread_wait(struct proc *p); 1278 1279 bool stop_all_proc_block(void); 1280 void stop_all_proc_unblock(void); 1281 void stop_all_proc(void); 1282 void resume_all_proc(void); 1283 1284 static __inline int 1285 curthread_pflags_set(int flags) 1286 { 1287 struct thread *td; 1288 int save; 1289 1290 td = curthread; 1291 save = ~flags | (td->td_pflags & flags); 1292 td->td_pflags |= flags; 1293 return (save); 1294 } 1295 1296 static __inline void 1297 curthread_pflags_restore(int save) 1298 { 1299 1300 curthread->td_pflags &= save; 1301 } 1302 1303 static __inline int 1304 curthread_pflags2_set(int flags) 1305 { 1306 struct thread *td; 1307 int save; 1308 1309 td = curthread; 1310 save = ~flags | (td->td_pflags2 & flags); 1311 td->td_pflags2 |= flags; 1312 return (save); 1313 } 1314 1315 static __inline void 1316 curthread_pflags2_restore(int save) 1317 { 1318 1319 curthread->td_pflags2 &= save; 1320 } 1321 1322 static __inline __pure2 struct td_sched * 1323 td_get_sched(struct thread *td) 1324 { 1325 1326 return ((struct td_sched *)&td[1]); 1327 } 1328 1329 #define PROC_ID_PID 0 1330 #define PROC_ID_GROUP 1 1331 #define PROC_ID_SESSION 2 1332 #define PROC_ID_REAP 3 1333 1334 void proc_id_set(int type, pid_t id); 1335 void proc_id_set_cond(int type, pid_t id); 1336 void proc_id_clear(int type, pid_t id); 1337 1338 EVENTHANDLER_LIST_DECLARE(process_ctor); 1339 EVENTHANDLER_LIST_DECLARE(process_dtor); 1340 EVENTHANDLER_LIST_DECLARE(process_init); 1341 EVENTHANDLER_LIST_DECLARE(process_fini); 1342 EVENTHANDLER_LIST_DECLARE(process_exit); 1343 EVENTHANDLER_LIST_DECLARE(process_fork); 1344 EVENTHANDLER_LIST_DECLARE(process_exec); 1345 1346 EVENTHANDLER_LIST_DECLARE(thread_ctor); 1347 EVENTHANDLER_LIST_DECLARE(thread_dtor); 1348 EVENTHANDLER_LIST_DECLARE(thread_init); 1349 1350 #endif /* _KERNEL */ 1351 1352 #endif /* !_SYS_PROC_H_ */ 1353