1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_SCHED_SIGNAL_H 3 #define _LINUX_SCHED_SIGNAL_H 4 5 #include <linux/rculist.h> 6 #include <linux/signal.h> 7 #include <linux/sched.h> 8 #include <linux/sched/jobctl.h> 9 #include <linux/sched/task.h> 10 #include <linux/cred.h> 11 #include <linux/refcount.h> 12 #include <linux/pid.h> 13 #include <linux/posix-timers.h> 14 #include <linux/mm_types.h> 15 #include <asm/ptrace.h> 16 17 /* 18 * Types defining task->signal and task->sighand and APIs using them: 19 */ 20 21 struct sighand_struct { 22 spinlock_t siglock; 23 refcount_t count; 24 wait_queue_head_t signalfd_wqh; 25 struct k_sigaction action[_NSIG]; 26 }; 27 28 /* 29 * Per-process accounting stats: 30 */ 31 struct pacct_struct { 32 int ac_flag; 33 long ac_exitcode; 34 unsigned long ac_mem; 35 u64 ac_utime, ac_stime; 36 unsigned long ac_minflt, ac_majflt; 37 }; 38 39 struct cpu_itimer { 40 u64 expires; 41 u64 incr; 42 }; 43 44 /* 45 * This is the atomic variant of task_cputime, which can be used for 46 * storing and updating task_cputime statistics without locking. 47 */ 48 struct task_cputime_atomic { 49 atomic64_t utime; 50 atomic64_t stime; 51 atomic64_t sum_exec_runtime; 52 }; 53 54 #define INIT_CPUTIME_ATOMIC \ 55 (struct task_cputime_atomic) { \ 56 .utime = ATOMIC64_INIT(0), \ 57 .stime = ATOMIC64_INIT(0), \ 58 .sum_exec_runtime = ATOMIC64_INIT(0), \ 59 } 60 /** 61 * struct thread_group_cputimer - thread group interval timer counts 62 * @cputime_atomic: atomic thread group interval timers. 63 * 64 * This structure contains the version of task_cputime, above, that is 65 * used for thread group CPU timer calculations. 66 */ 67 struct thread_group_cputimer { 68 struct task_cputime_atomic cputime_atomic; 69 }; 70 71 struct multiprocess_signals { 72 sigset_t signal; 73 struct hlist_node node; 74 }; 75 76 struct core_thread { 77 struct task_struct *task; 78 struct core_thread *next; 79 }; 80 81 struct core_state { 82 atomic_t nr_threads; 83 struct core_thread dumper; 84 struct completion startup; 85 }; 86 87 /* 88 * NOTE! "signal_struct" does not have its own 89 * locking, because a shared signal_struct always 90 * implies a shared sighand_struct, so locking 91 * sighand_struct is always a proper superset of 92 * the locking of signal_struct. 93 */ 94 struct signal_struct { 95 refcount_t sigcnt; 96 atomic_t live; 97 int nr_threads; 98 int quick_threads; 99 struct list_head thread_head; 100 101 wait_queue_head_t wait_chldexit; /* for wait4() */ 102 103 /* current thread group signal load-balancing target: */ 104 struct task_struct *curr_target; 105 106 /* shared signal handling: */ 107 struct sigpending shared_pending; 108 109 /* For collecting multiprocess signals during fork */ 110 struct hlist_head multiprocess; 111 112 /* thread group exit support */ 113 int group_exit_code; 114 /* notify group_exec_task when notify_count is less or equal to 0 */ 115 int notify_count; 116 struct task_struct *group_exec_task; 117 118 /* thread group stop support, overloads group_exit_code too */ 119 int group_stop_count; 120 unsigned int flags; /* see SIGNAL_* flags below */ 121 122 struct core_state *core_state; /* coredumping support */ 123 124 /* 125 * PR_SET_CHILD_SUBREAPER marks a process, like a service 126 * manager, to re-parent orphan (double-forking) child processes 127 * to this process instead of 'init'. The service manager is 128 * able to receive SIGCHLD signals and is able to investigate 129 * the process until it calls wait(). All children of this 130 * process will inherit a flag if they should look for a 131 * child_subreaper process at exit. 132 */ 133 unsigned int is_child_subreaper:1; 134 unsigned int has_child_subreaper:1; 135 unsigned int autoreap:1; 136 137 #ifdef CONFIG_POSIX_TIMERS 138 139 /* POSIX.1b Interval Timers */ 140 unsigned int timer_create_restore_ids:1; 141 atomic_t next_posix_timer_id; 142 struct hlist_head posix_timers; 143 struct hlist_head ignored_posix_timers; 144 145 /* ITIMER_REAL timer for the process */ 146 struct hrtimer real_timer; 147 ktime_t it_real_incr; 148 149 /* 150 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use 151 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these 152 * values are defined to 0 and 1 respectively 153 */ 154 struct cpu_itimer it[2]; 155 156 /* 157 * Thread group totals for process CPU timers. 158 * See thread_group_cputimer(), et al, for details. 159 */ 160 struct thread_group_cputimer cputimer; 161 162 #endif 163 /* Empty if CONFIG_POSIX_TIMERS=n */ 164 struct posix_cputimers posix_cputimers; 165 166 /* PID/PID hash table linkage. */ 167 struct pid *pids[PIDTYPE_MAX]; 168 169 #ifdef CONFIG_NO_HZ_FULL 170 atomic_t tick_dep_mask; 171 #endif 172 173 struct pid *tty_old_pgrp; 174 175 /* boolean value for session group leader */ 176 int leader; 177 178 struct tty_struct *tty; /* NULL if no tty */ 179 180 #ifdef CONFIG_SCHED_AUTOGROUP 181 struct autogroup *autogroup; 182 #endif 183 /* 184 * Cumulative resource counters for dead threads in the group, 185 * and for reaped dead child processes forked by this group. 186 * Live threads maintain their own counters and add to these 187 * in __exit_signal, except for the group leader. 188 */ 189 seqlock_t stats_lock; 190 u64 utime, stime, cutime, cstime; 191 u64 gtime; 192 u64 cgtime; 193 struct prev_cputime prev_cputime; 194 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 195 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 196 unsigned long inblock, oublock, cinblock, coublock; 197 unsigned long maxrss, cmaxrss; 198 struct task_io_accounting ioac; 199 200 /* 201 * Cumulative ns of schedule CPU time fo dead threads in the 202 * group, not including a zombie group leader, (This only differs 203 * from jiffies_to_ns(utime + stime) if sched_clock uses something 204 * other than jiffies.) 205 */ 206 unsigned long long sum_sched_runtime; 207 208 /* 209 * We don't bother to synchronize most readers of this at all, 210 * because there is no reader checking a limit that actually needs 211 * to get both rlim_cur and rlim_max atomically, and either one 212 * alone is a single word that can safely be read normally. 213 * getrlimit/setrlimit use task_lock(current->group_leader) to 214 * protect this instead of the siglock, because they really 215 * have no need to disable irqs. 216 */ 217 struct rlimit rlim[RLIM_NLIMITS]; 218 219 #ifdef CONFIG_BSD_PROCESS_ACCT 220 struct pacct_struct pacct; /* per-process accounting information */ 221 #endif 222 #ifdef CONFIG_TASKSTATS 223 struct taskstats *stats; 224 #endif 225 #ifdef CONFIG_AUDIT 226 unsigned audit_tty; 227 struct tty_audit_buf *tty_audit_buf; 228 #endif 229 230 #ifdef CONFIG_CGROUPS 231 struct rw_semaphore cgroup_threadgroup_rwsem; 232 #endif 233 234 /* 235 * Thread is the potential origin of an oom condition; kill first on 236 * oom 237 */ 238 bool oom_flag_origin; 239 short oom_score_adj; /* OOM kill score adjustment */ 240 short oom_score_adj_min; /* OOM kill score adjustment min value. 241 * Only settable by CAP_SYS_RESOURCE. */ 242 struct mm_struct *oom_mm; /* recorded mm when the thread group got 243 * killed by the oom killer */ 244 245 struct mutex cred_guard_mutex; /* guard against foreign influences on 246 * credential calculations 247 * (notably. ptrace) 248 * Deprecated do not use in new code. 249 * Use exec_update_lock instead. 250 */ 251 struct rw_semaphore exec_update_lock; /* Held while task_struct is 252 * being updated during exec, 253 * and may have inconsistent 254 * permissions. 255 */ 256 } __randomize_layout; 257 258 /* 259 * Bits in flags field of signal_struct. 260 */ 261 #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ 262 #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ 263 #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ 264 /* 265 * Pending notifications to parent. 266 */ 267 #define SIGNAL_CLD_STOPPED 0x00000010 268 #define SIGNAL_CLD_CONTINUED 0x00000020 269 #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) 270 271 #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ 272 273 #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ 274 SIGNAL_STOP_CONTINUED) 275 276 static inline void signal_set_stop_flags(struct signal_struct *sig, 277 unsigned int flags) 278 { 279 WARN_ON(sig->flags & SIGNAL_GROUP_EXIT); 280 sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; 281 } 282 283 extern void flush_signals(struct task_struct *); 284 extern void ignore_signals(struct task_struct *); 285 extern void flush_signal_handlers(struct task_struct *, int force_default); 286 extern int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type); 287 288 static inline int kernel_dequeue_signal(void) 289 { 290 struct task_struct *task = current; 291 kernel_siginfo_t __info; 292 enum pid_type __type; 293 int ret; 294 295 spin_lock_irq(&task->sighand->siglock); 296 ret = dequeue_signal(&task->blocked, &__info, &__type); 297 spin_unlock_irq(&task->sighand->siglock); 298 299 return ret; 300 } 301 302 static inline void kernel_signal_stop(void) 303 { 304 spin_lock_irq(¤t->sighand->siglock); 305 if (current->jobctl & JOBCTL_STOP_DEQUEUED) { 306 current->jobctl |= JOBCTL_STOPPED; 307 set_special_state(TASK_STOPPED); 308 } 309 spin_unlock_irq(¤t->sighand->siglock); 310 311 schedule(); 312 } 313 314 int force_sig_fault_to_task(int sig, int code, void __user *addr, 315 struct task_struct *t); 316 int force_sig_fault(int sig, int code, void __user *addr); 317 int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t); 318 319 int force_sig_mceerr(int code, void __user *, short); 320 int send_sig_mceerr(int code, void __user *, short, struct task_struct *); 321 322 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); 323 int force_sig_pkuerr(void __user *addr, u32 pkey); 324 int send_sig_perf(void __user *addr, u32 type, u64 sig_data); 325 326 int force_sig_ptrace_errno_trap(int errno, void __user *addr); 327 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno); 328 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno, 329 struct task_struct *t); 330 int force_sig_seccomp(int syscall, int reason, bool force_coredump); 331 332 extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); 333 extern void force_sigsegv(int sig); 334 extern int force_sig_info(struct kernel_siginfo *); 335 extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp); 336 extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid); 337 extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *, 338 const struct cred *); 339 extern int kill_pgrp(struct pid *pid, int sig, int priv); 340 extern int kill_pid(struct pid *pid, int sig, int priv); 341 extern __must_check bool do_notify_parent(struct task_struct *, int); 342 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); 343 extern void force_sig(int); 344 extern void force_fatal_sig(int); 345 extern void force_exit_sig(int); 346 extern int send_sig(int, struct task_struct *, int); 347 extern int zap_other_threads(struct task_struct *p); 348 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); 349 350 static inline void clear_notify_signal(void) 351 { 352 clear_thread_flag(TIF_NOTIFY_SIGNAL); 353 smp_mb__after_atomic(); 354 } 355 356 /* 357 * Returns 'true' if kick_process() is needed to force a transition from 358 * user -> kernel to guarantee expedient run of TWA_SIGNAL based task_work. 359 */ 360 static inline bool __set_notify_signal(struct task_struct *task) 361 { 362 return !test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) && 363 !wake_up_state(task, TASK_INTERRUPTIBLE); 364 } 365 366 /* 367 * Called to break out of interruptible wait loops, and enter the 368 * exit_to_user_mode_loop(). 369 */ 370 static inline void set_notify_signal(struct task_struct *task) 371 { 372 if (__set_notify_signal(task)) 373 kick_process(task); 374 } 375 376 static inline int restart_syscall(void) 377 { 378 set_tsk_thread_flag(current, TIF_SIGPENDING); 379 return -ERESTARTNOINTR; 380 } 381 382 static inline int task_sigpending(struct task_struct *p) 383 { 384 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 385 } 386 387 static inline int signal_pending(struct task_struct *p) 388 { 389 /* 390 * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same 391 * behavior in terms of ensuring that we break out of wait loops 392 * so that notify signal callbacks can be processed. 393 */ 394 if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL))) 395 return 1; 396 return task_sigpending(p); 397 } 398 399 static inline int __fatal_signal_pending(struct task_struct *p) 400 { 401 return unlikely(sigismember(&p->pending.signal, SIGKILL)); 402 } 403 404 static inline int fatal_signal_pending(struct task_struct *p) 405 { 406 return task_sigpending(p) && __fatal_signal_pending(p); 407 } 408 409 static inline int signal_pending_state(unsigned int state, struct task_struct *p) 410 { 411 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) 412 return 0; 413 if (!signal_pending(p)) 414 return 0; 415 416 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); 417 } 418 419 /* 420 * This should only be used in fault handlers to decide whether we 421 * should stop the current fault routine to handle the signals 422 * instead, especially with the case where we've got interrupted with 423 * a VM_FAULT_RETRY. 424 */ 425 static inline bool fault_signal_pending(vm_fault_t fault_flags, 426 struct pt_regs *regs) 427 { 428 return unlikely((fault_flags & VM_FAULT_RETRY) && 429 (fatal_signal_pending(current) || 430 (user_mode(regs) && signal_pending(current)))); 431 } 432 433 /* 434 * Reevaluate whether the task has signals pending delivery. 435 * Wake the task if so. 436 * This is required every time the blocked sigset_t changes. 437 * callers must hold sighand->siglock. 438 */ 439 extern void recalc_sigpending(void); 440 extern void calculate_sigpending(void); 441 442 extern void signal_wake_up_state(struct task_struct *t, unsigned int state); 443 444 static inline void signal_wake_up(struct task_struct *t, bool fatal) 445 { 446 unsigned int state = 0; 447 if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) { 448 t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED); 449 state = TASK_WAKEKILL | __TASK_TRACED; 450 } 451 signal_wake_up_state(t, state); 452 } 453 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) 454 { 455 unsigned int state = 0; 456 if (resume) { 457 t->jobctl &= ~JOBCTL_TRACED; 458 state = __TASK_TRACED; 459 } 460 signal_wake_up_state(t, state); 461 } 462 463 void task_join_group_stop(struct task_struct *task); 464 465 #ifdef TIF_RESTORE_SIGMASK 466 /* 467 * Legacy restore_sigmask accessors. These are inefficient on 468 * SMP architectures because they require atomic operations. 469 */ 470 471 /** 472 * set_restore_sigmask() - make sure saved_sigmask processing gets done 473 * 474 * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code 475 * will run before returning to user mode, to process the flag. For 476 * all callers, TIF_SIGPENDING is already set or it's no harm to set 477 * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the 478 * arch code will notice on return to user mode, in case those bits 479 * are scarce. We set TIF_SIGPENDING here to ensure that the arch 480 * signal code always gets run when TIF_RESTORE_SIGMASK is set. 481 */ 482 static inline void set_restore_sigmask(void) 483 { 484 set_thread_flag(TIF_RESTORE_SIGMASK); 485 } 486 487 static inline void clear_tsk_restore_sigmask(struct task_struct *task) 488 { 489 clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); 490 } 491 492 static inline void clear_restore_sigmask(void) 493 { 494 clear_thread_flag(TIF_RESTORE_SIGMASK); 495 } 496 static inline bool test_tsk_restore_sigmask(struct task_struct *task) 497 { 498 return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); 499 } 500 static inline bool test_restore_sigmask(void) 501 { 502 return test_thread_flag(TIF_RESTORE_SIGMASK); 503 } 504 static inline bool test_and_clear_restore_sigmask(void) 505 { 506 return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); 507 } 508 509 #else /* TIF_RESTORE_SIGMASK */ 510 511 /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ 512 static inline void set_restore_sigmask(void) 513 { 514 current->restore_sigmask = true; 515 } 516 static inline void clear_tsk_restore_sigmask(struct task_struct *task) 517 { 518 task->restore_sigmask = false; 519 } 520 static inline void clear_restore_sigmask(void) 521 { 522 current->restore_sigmask = false; 523 } 524 static inline bool test_restore_sigmask(void) 525 { 526 return current->restore_sigmask; 527 } 528 static inline bool test_tsk_restore_sigmask(struct task_struct *task) 529 { 530 return task->restore_sigmask; 531 } 532 static inline bool test_and_clear_restore_sigmask(void) 533 { 534 if (!current->restore_sigmask) 535 return false; 536 current->restore_sigmask = false; 537 return true; 538 } 539 #endif 540 541 static inline void restore_saved_sigmask(void) 542 { 543 if (test_and_clear_restore_sigmask()) 544 __set_current_blocked(¤t->saved_sigmask); 545 } 546 547 extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize); 548 549 static inline void restore_saved_sigmask_unless(bool interrupted) 550 { 551 if (interrupted) 552 WARN_ON(!signal_pending(current)); 553 else 554 restore_saved_sigmask(); 555 } 556 557 static inline sigset_t *sigmask_to_save(void) 558 { 559 sigset_t *res = ¤t->blocked; 560 if (unlikely(test_restore_sigmask())) 561 res = ¤t->saved_sigmask; 562 return res; 563 } 564 565 static inline int kill_cad_pid(int sig, int priv) 566 { 567 return kill_pid(cad_pid, sig, priv); 568 } 569 570 /* These can be the second arg to send_sig_info/send_group_sig_info. */ 571 #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0) 572 #define SEND_SIG_PRIV ((struct kernel_siginfo *) 1) 573 574 static inline int __on_sig_stack(unsigned long sp) 575 { 576 #ifdef CONFIG_STACK_GROWSUP 577 return sp >= current->sas_ss_sp && 578 sp - current->sas_ss_sp < current->sas_ss_size; 579 #else 580 return sp > current->sas_ss_sp && 581 sp - current->sas_ss_sp <= current->sas_ss_size; 582 #endif 583 } 584 585 /* 586 * True if we are on the alternate signal stack. 587 */ 588 static inline int on_sig_stack(unsigned long sp) 589 { 590 /* 591 * If the signal stack is SS_AUTODISARM then, by construction, we 592 * can't be on the signal stack unless user code deliberately set 593 * SS_AUTODISARM when we were already on it. 594 * 595 * This improves reliability: if user state gets corrupted such that 596 * the stack pointer points very close to the end of the signal stack, 597 * then this check will enable the signal to be handled anyway. 598 */ 599 if (current->sas_ss_flags & SS_AUTODISARM) 600 return 0; 601 602 return __on_sig_stack(sp); 603 } 604 605 static inline int sas_ss_flags(unsigned long sp) 606 { 607 if (!current->sas_ss_size) 608 return SS_DISABLE; 609 610 return on_sig_stack(sp) ? SS_ONSTACK : 0; 611 } 612 613 static inline void sas_ss_reset(struct task_struct *p) 614 { 615 p->sas_ss_sp = 0; 616 p->sas_ss_size = 0; 617 p->sas_ss_flags = SS_DISABLE; 618 } 619 620 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) 621 { 622 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) 623 #ifdef CONFIG_STACK_GROWSUP 624 return current->sas_ss_sp; 625 #else 626 return current->sas_ss_sp + current->sas_ss_size; 627 #endif 628 return sp; 629 } 630 631 extern void __cleanup_sighand(struct sighand_struct *); 632 extern void flush_itimer_signals(void); 633 634 #define tasklist_empty() \ 635 list_empty(&init_task.tasks) 636 637 #define next_task(p) \ 638 list_entry_rcu((p)->tasks.next, struct task_struct, tasks) 639 640 #define for_each_process(p) \ 641 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 642 643 extern bool current_is_single_threaded(void); 644 645 /* 646 * Without tasklist/siglock it is only rcu-safe if g can't exit/exec, 647 * otherwise next_thread(t) will never reach g after list_del_rcu(g). 648 */ 649 #define while_each_thread(g, t) \ 650 while ((t = next_thread(t)) != g) 651 652 #define for_other_threads(p, t) \ 653 for (t = p; (t = next_thread(t)) != p; ) 654 655 #define __for_each_thread(signal, t) \ 656 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \ 657 lockdep_is_held(&tasklist_lock)) 658 659 #define for_each_thread(p, t) \ 660 __for_each_thread((p)->signal, t) 661 662 /* Careful: this is a double loop, 'break' won't work as expected. */ 663 #define for_each_process_thread(p, t) \ 664 for_each_process(p) for_each_thread(p, t) 665 666 typedef int (*proc_visitor)(struct task_struct *p, void *data); 667 void walk_process_tree(struct task_struct *top, proc_visitor, void *); 668 669 static inline 670 struct pid *task_pid_type(struct task_struct *task, enum pid_type type) 671 { 672 struct pid *pid; 673 if (type == PIDTYPE_PID) 674 pid = task_pid(task); 675 else 676 pid = task->signal->pids[type]; 677 return pid; 678 } 679 680 static inline struct pid *task_tgid(struct task_struct *task) 681 { 682 return task->signal->pids[PIDTYPE_TGID]; 683 } 684 685 /* 686 * Without tasklist or RCU lock it is not safe to dereference 687 * the result of task_pgrp/task_session even if task == current, 688 * we can race with another thread doing sys_setsid/sys_setpgid. 689 */ 690 static inline struct pid *task_pgrp(struct task_struct *task) 691 { 692 return task->signal->pids[PIDTYPE_PGID]; 693 } 694 695 static inline struct pid *task_session(struct task_struct *task) 696 { 697 return task->signal->pids[PIDTYPE_SID]; 698 } 699 700 static inline int get_nr_threads(struct task_struct *task) 701 { 702 return task->signal->nr_threads; 703 } 704 705 static inline bool thread_group_leader(struct task_struct *p) 706 { 707 return p->exit_signal >= 0; 708 } 709 710 static inline 711 bool same_thread_group(struct task_struct *p1, struct task_struct *p2) 712 { 713 return p1->signal == p2->signal; 714 } 715 716 /* 717 * returns NULL if p is the last thread in the thread group 718 */ 719 static inline struct task_struct *__next_thread(struct task_struct *p) 720 { 721 return list_next_or_null_rcu(&p->signal->thread_head, 722 &p->thread_node, 723 struct task_struct, 724 thread_node); 725 } 726 727 static inline struct task_struct *next_thread(struct task_struct *p) 728 { 729 return __next_thread(p) ?: p->group_leader; 730 } 731 732 static inline int thread_group_empty(struct task_struct *p) 733 { 734 return thread_group_leader(p) && 735 list_is_last(&p->thread_node, &p->signal->thread_head); 736 } 737 738 #define delay_group_leader(p) \ 739 (thread_group_leader(p) && !thread_group_empty(p)) 740 741 extern struct sighand_struct *lock_task_sighand(struct task_struct *task, 742 unsigned long *flags) 743 __cond_acquires(nonnull, &task->sighand->siglock); 744 745 static inline void unlock_task_sighand(struct task_struct *task, 746 unsigned long *flags) 747 __releases(&task->sighand->siglock) 748 { 749 spin_unlock_irqrestore(&task->sighand->siglock, *flags); 750 } 751 752 #ifdef CONFIG_LOCKDEP 753 extern void lockdep_assert_task_sighand_held(struct task_struct *task); 754 #else 755 static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { } 756 #endif 757 758 static inline unsigned long task_rlimit(const struct task_struct *task, 759 unsigned int limit) 760 { 761 return READ_ONCE(task->signal->rlim[limit].rlim_cur); 762 } 763 764 static inline unsigned long task_rlimit_max(const struct task_struct *task, 765 unsigned int limit) 766 { 767 return READ_ONCE(task->signal->rlim[limit].rlim_max); 768 } 769 770 static inline unsigned long rlimit(unsigned int limit) 771 { 772 return task_rlimit(current, limit); 773 } 774 775 static inline unsigned long rlimit_max(unsigned int limit) 776 { 777 return task_rlimit_max(current, limit); 778 } 779 780 #endif /* _LINUX_SCHED_SIGNAL_H */ 781