1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_SCHED_SIGNAL_H 3 #define _LINUX_SCHED_SIGNAL_H 4 5 #include <linux/rculist.h> 6 #include <linux/signal.h> 7 #include <linux/sched.h> 8 #include <linux/sched/jobctl.h> 9 #include <linux/sched/task.h> 10 #include <linux/cred.h> 11 #include <linux/refcount.h> 12 #include <linux/posix-timers.h> 13 #include <linux/mm_types.h> 14 #include <asm/ptrace.h> 15 16 /* 17 * Types defining task->signal and task->sighand and APIs using them: 18 */ 19 20 struct sighand_struct { 21 spinlock_t siglock; 22 refcount_t count; 23 wait_queue_head_t signalfd_wqh; 24 struct k_sigaction action[_NSIG]; 25 }; 26 27 /* 28 * Per-process accounting stats: 29 */ 30 struct pacct_struct { 31 int ac_flag; 32 long ac_exitcode; 33 unsigned long ac_mem; 34 u64 ac_utime, ac_stime; 35 unsigned long ac_minflt, ac_majflt; 36 }; 37 38 struct cpu_itimer { 39 u64 expires; 40 u64 incr; 41 }; 42 43 /* 44 * This is the atomic variant of task_cputime, which can be used for 45 * storing and updating task_cputime statistics without locking. 46 */ 47 struct task_cputime_atomic { 48 atomic64_t utime; 49 atomic64_t stime; 50 atomic64_t sum_exec_runtime; 51 }; 52 53 #define INIT_CPUTIME_ATOMIC \ 54 (struct task_cputime_atomic) { \ 55 .utime = ATOMIC64_INIT(0), \ 56 .stime = ATOMIC64_INIT(0), \ 57 .sum_exec_runtime = ATOMIC64_INIT(0), \ 58 } 59 /** 60 * struct thread_group_cputimer - thread group interval timer counts 61 * @cputime_atomic: atomic thread group interval timers. 62 * 63 * This structure contains the version of task_cputime, above, that is 64 * used for thread group CPU timer calculations. 65 */ 66 struct thread_group_cputimer { 67 struct task_cputime_atomic cputime_atomic; 68 }; 69 70 struct multiprocess_signals { 71 sigset_t signal; 72 struct hlist_node node; 73 }; 74 75 struct core_thread { 76 struct task_struct *task; 77 struct core_thread *next; 78 }; 79 80 struct core_state { 81 atomic_t nr_threads; 82 struct core_thread dumper; 83 struct completion startup; 84 }; 85 86 /* 87 * NOTE! "signal_struct" does not have its own 88 * locking, because a shared signal_struct always 89 * implies a shared sighand_struct, so locking 90 * sighand_struct is always a proper superset of 91 * the locking of signal_struct. 92 */ 93 struct signal_struct { 94 refcount_t sigcnt; 95 atomic_t live; 96 int nr_threads; 97 int quick_threads; 98 struct list_head thread_head; 99 100 wait_queue_head_t wait_chldexit; /* for wait4() */ 101 102 /* current thread group signal load-balancing target: */ 103 struct task_struct *curr_target; 104 105 /* shared signal handling: */ 106 struct sigpending shared_pending; 107 108 /* For collecting multiprocess signals during fork */ 109 struct hlist_head multiprocess; 110 111 /* thread group exit support */ 112 int group_exit_code; 113 /* notify group_exec_task when notify_count is less or equal to 0 */ 114 int notify_count; 115 struct task_struct *group_exec_task; 116 117 /* thread group stop support, overloads group_exit_code too */ 118 int group_stop_count; 119 unsigned int flags; /* see SIGNAL_* flags below */ 120 121 struct core_state *core_state; /* coredumping support */ 122 123 /* 124 * PR_SET_CHILD_SUBREAPER marks a process, like a service 125 * manager, to re-parent orphan (double-forking) child processes 126 * to this process instead of 'init'. The service manager is 127 * able to receive SIGCHLD signals and is able to investigate 128 * the process until it calls wait(). All children of this 129 * process will inherit a flag if they should look for a 130 * child_subreaper process at exit. 131 */ 132 unsigned int is_child_subreaper:1; 133 unsigned int has_child_subreaper:1; 134 135 #ifdef CONFIG_POSIX_TIMERS 136 137 /* POSIX.1b Interval Timers */ 138 int posix_timer_id; 139 struct list_head posix_timers; 140 141 /* ITIMER_REAL timer for the process */ 142 struct hrtimer real_timer; 143 ktime_t it_real_incr; 144 145 /* 146 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use 147 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these 148 * values are defined to 0 and 1 respectively 149 */ 150 struct cpu_itimer it[2]; 151 152 /* 153 * Thread group totals for process CPU timers. 154 * See thread_group_cputimer(), et al, for details. 155 */ 156 struct thread_group_cputimer cputimer; 157 158 #endif 159 /* Empty if CONFIG_POSIX_TIMERS=n */ 160 struct posix_cputimers posix_cputimers; 161 162 /* PID/PID hash table linkage. */ 163 struct pid *pids[PIDTYPE_MAX]; 164 165 #ifdef CONFIG_NO_HZ_FULL 166 atomic_t tick_dep_mask; 167 #endif 168 169 struct pid *tty_old_pgrp; 170 171 /* boolean value for session group leader */ 172 int leader; 173 174 struct tty_struct *tty; /* NULL if no tty */ 175 176 #ifdef CONFIG_SCHED_AUTOGROUP 177 struct autogroup *autogroup; 178 #endif 179 /* 180 * Cumulative resource counters for dead threads in the group, 181 * and for reaped dead child processes forked by this group. 182 * Live threads maintain their own counters and add to these 183 * in __exit_signal, except for the group leader. 184 */ 185 seqlock_t stats_lock; 186 u64 utime, stime, cutime, cstime; 187 u64 gtime; 188 u64 cgtime; 189 struct prev_cputime prev_cputime; 190 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 191 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; 192 unsigned long inblock, oublock, cinblock, coublock; 193 unsigned long maxrss, cmaxrss; 194 struct task_io_accounting ioac; 195 196 /* 197 * Cumulative ns of schedule CPU time fo dead threads in the 198 * group, not including a zombie group leader, (This only differs 199 * from jiffies_to_ns(utime + stime) if sched_clock uses something 200 * other than jiffies.) 201 */ 202 unsigned long long sum_sched_runtime; 203 204 /* 205 * We don't bother to synchronize most readers of this at all, 206 * because there is no reader checking a limit that actually needs 207 * to get both rlim_cur and rlim_max atomically, and either one 208 * alone is a single word that can safely be read normally. 209 * getrlimit/setrlimit use task_lock(current->group_leader) to 210 * protect this instead of the siglock, because they really 211 * have no need to disable irqs. 212 */ 213 struct rlimit rlim[RLIM_NLIMITS]; 214 215 #ifdef CONFIG_BSD_PROCESS_ACCT 216 struct pacct_struct pacct; /* per-process accounting information */ 217 #endif 218 #ifdef CONFIG_TASKSTATS 219 struct taskstats *stats; 220 #endif 221 #ifdef CONFIG_AUDIT 222 unsigned audit_tty; 223 struct tty_audit_buf *tty_audit_buf; 224 #endif 225 226 /* 227 * Thread is the potential origin of an oom condition; kill first on 228 * oom 229 */ 230 bool oom_flag_origin; 231 short oom_score_adj; /* OOM kill score adjustment */ 232 short oom_score_adj_min; /* OOM kill score adjustment min value. 233 * Only settable by CAP_SYS_RESOURCE. */ 234 struct mm_struct *oom_mm; /* recorded mm when the thread group got 235 * killed by the oom killer */ 236 237 struct mutex cred_guard_mutex; /* guard against foreign influences on 238 * credential calculations 239 * (notably. ptrace) 240 * Deprecated do not use in new code. 241 * Use exec_update_lock instead. 242 */ 243 struct rw_semaphore exec_update_lock; /* Held while task_struct is 244 * being updated during exec, 245 * and may have inconsistent 246 * permissions. 247 */ 248 } __randomize_layout; 249 250 /* 251 * Bits in flags field of signal_struct. 252 */ 253 #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ 254 #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ 255 #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ 256 /* 257 * Pending notifications to parent. 258 */ 259 #define SIGNAL_CLD_STOPPED 0x00000010 260 #define SIGNAL_CLD_CONTINUED 0x00000020 261 #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) 262 263 #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ 264 265 #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ 266 SIGNAL_STOP_CONTINUED) 267 268 static inline void signal_set_stop_flags(struct signal_struct *sig, 269 unsigned int flags) 270 { 271 WARN_ON(sig->flags & SIGNAL_GROUP_EXIT); 272 sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; 273 } 274 275 extern void flush_signals(struct task_struct *); 276 extern void ignore_signals(struct task_struct *); 277 extern void flush_signal_handlers(struct task_struct *, int force_default); 278 extern int dequeue_signal(struct task_struct *task, sigset_t *mask, 279 kernel_siginfo_t *info, enum pid_type *type); 280 281 static inline int kernel_dequeue_signal(void) 282 { 283 struct task_struct *task = current; 284 kernel_siginfo_t __info; 285 enum pid_type __type; 286 int ret; 287 288 spin_lock_irq(&task->sighand->siglock); 289 ret = dequeue_signal(task, &task->blocked, &__info, &__type); 290 spin_unlock_irq(&task->sighand->siglock); 291 292 return ret; 293 } 294 295 static inline void kernel_signal_stop(void) 296 { 297 spin_lock_irq(¤t->sighand->siglock); 298 if (current->jobctl & JOBCTL_STOP_DEQUEUED) { 299 current->jobctl |= JOBCTL_STOPPED; 300 set_special_state(TASK_STOPPED); 301 } 302 spin_unlock_irq(¤t->sighand->siglock); 303 304 schedule(); 305 } 306 #ifdef __ia64__ 307 # define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3 308 #else 309 # define ___ARCH_SI_IA64(_a1, _a2, _a3) 310 #endif 311 312 int force_sig_fault_to_task(int sig, int code, void __user *addr 313 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) 314 , struct task_struct *t); 315 int force_sig_fault(int sig, int code, void __user *addr 316 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)); 317 int send_sig_fault(int sig, int code, void __user *addr 318 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) 319 , struct task_struct *t); 320 321 int force_sig_mceerr(int code, void __user *, short); 322 int send_sig_mceerr(int code, void __user *, short, struct task_struct *); 323 324 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); 325 int force_sig_pkuerr(void __user *addr, u32 pkey); 326 int send_sig_perf(void __user *addr, u32 type, u64 sig_data); 327 328 int force_sig_ptrace_errno_trap(int errno, void __user *addr); 329 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno); 330 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno, 331 struct task_struct *t); 332 int force_sig_seccomp(int syscall, int reason, bool force_coredump); 333 334 extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); 335 extern void force_sigsegv(int sig); 336 extern int force_sig_info(struct kernel_siginfo *); 337 extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp); 338 extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid); 339 extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *, 340 const struct cred *); 341 extern int kill_pgrp(struct pid *pid, int sig, int priv); 342 extern int kill_pid(struct pid *pid, int sig, int priv); 343 extern __must_check bool do_notify_parent(struct task_struct *, int); 344 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); 345 extern void force_sig(int); 346 extern void force_fatal_sig(int); 347 extern void force_exit_sig(int); 348 extern int send_sig(int, struct task_struct *, int); 349 extern int zap_other_threads(struct task_struct *p); 350 extern struct sigqueue *sigqueue_alloc(void); 351 extern void sigqueue_free(struct sigqueue *); 352 extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type); 353 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); 354 355 static inline void clear_notify_signal(void) 356 { 357 clear_thread_flag(TIF_NOTIFY_SIGNAL); 358 smp_mb__after_atomic(); 359 } 360 361 /* 362 * Returns 'true' if kick_process() is needed to force a transition from 363 * user -> kernel to guarantee expedient run of TWA_SIGNAL based task_work. 364 */ 365 static inline bool __set_notify_signal(struct task_struct *task) 366 { 367 return !test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) && 368 !wake_up_state(task, TASK_INTERRUPTIBLE); 369 } 370 371 /* 372 * Called to break out of interruptible wait loops, and enter the 373 * exit_to_user_mode_loop(). 374 */ 375 static inline void set_notify_signal(struct task_struct *task) 376 { 377 if (__set_notify_signal(task)) 378 kick_process(task); 379 } 380 381 static inline int restart_syscall(void) 382 { 383 set_tsk_thread_flag(current, TIF_SIGPENDING); 384 return -ERESTARTNOINTR; 385 } 386 387 static inline int task_sigpending(struct task_struct *p) 388 { 389 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); 390 } 391 392 static inline int signal_pending(struct task_struct *p) 393 { 394 /* 395 * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same 396 * behavior in terms of ensuring that we break out of wait loops 397 * so that notify signal callbacks can be processed. 398 */ 399 if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL))) 400 return 1; 401 return task_sigpending(p); 402 } 403 404 static inline int __fatal_signal_pending(struct task_struct *p) 405 { 406 return unlikely(sigismember(&p->pending.signal, SIGKILL)); 407 } 408 409 static inline int fatal_signal_pending(struct task_struct *p) 410 { 411 return task_sigpending(p) && __fatal_signal_pending(p); 412 } 413 414 static inline int signal_pending_state(unsigned int state, struct task_struct *p) 415 { 416 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) 417 return 0; 418 if (!signal_pending(p)) 419 return 0; 420 421 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); 422 } 423 424 /* 425 * This should only be used in fault handlers to decide whether we 426 * should stop the current fault routine to handle the signals 427 * instead, especially with the case where we've got interrupted with 428 * a VM_FAULT_RETRY. 429 */ 430 static inline bool fault_signal_pending(vm_fault_t fault_flags, 431 struct pt_regs *regs) 432 { 433 return unlikely((fault_flags & VM_FAULT_RETRY) && 434 (fatal_signal_pending(current) || 435 (user_mode(regs) && signal_pending(current)))); 436 } 437 438 /* 439 * Reevaluate whether the task has signals pending delivery. 440 * Wake the task if so. 441 * This is required every time the blocked sigset_t changes. 442 * callers must hold sighand->siglock. 443 */ 444 extern void recalc_sigpending_and_wake(struct task_struct *t); 445 extern void recalc_sigpending(void); 446 extern void calculate_sigpending(void); 447 448 extern void signal_wake_up_state(struct task_struct *t, unsigned int state); 449 450 static inline void signal_wake_up(struct task_struct *t, bool fatal) 451 { 452 unsigned int state = 0; 453 if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) { 454 t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED); 455 state = TASK_WAKEKILL | __TASK_TRACED; 456 } 457 signal_wake_up_state(t, state); 458 } 459 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) 460 { 461 unsigned int state = 0; 462 if (resume) { 463 t->jobctl &= ~JOBCTL_TRACED; 464 state = __TASK_TRACED; 465 } 466 signal_wake_up_state(t, state); 467 } 468 469 void task_join_group_stop(struct task_struct *task); 470 471 #ifdef TIF_RESTORE_SIGMASK 472 /* 473 * Legacy restore_sigmask accessors. These are inefficient on 474 * SMP architectures because they require atomic operations. 475 */ 476 477 /** 478 * set_restore_sigmask() - make sure saved_sigmask processing gets done 479 * 480 * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code 481 * will run before returning to user mode, to process the flag. For 482 * all callers, TIF_SIGPENDING is already set or it's no harm to set 483 * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the 484 * arch code will notice on return to user mode, in case those bits 485 * are scarce. We set TIF_SIGPENDING here to ensure that the arch 486 * signal code always gets run when TIF_RESTORE_SIGMASK is set. 487 */ 488 static inline void set_restore_sigmask(void) 489 { 490 set_thread_flag(TIF_RESTORE_SIGMASK); 491 } 492 493 static inline void clear_tsk_restore_sigmask(struct task_struct *task) 494 { 495 clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); 496 } 497 498 static inline void clear_restore_sigmask(void) 499 { 500 clear_thread_flag(TIF_RESTORE_SIGMASK); 501 } 502 static inline bool test_tsk_restore_sigmask(struct task_struct *task) 503 { 504 return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); 505 } 506 static inline bool test_restore_sigmask(void) 507 { 508 return test_thread_flag(TIF_RESTORE_SIGMASK); 509 } 510 static inline bool test_and_clear_restore_sigmask(void) 511 { 512 return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); 513 } 514 515 #else /* TIF_RESTORE_SIGMASK */ 516 517 /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ 518 static inline void set_restore_sigmask(void) 519 { 520 current->restore_sigmask = true; 521 } 522 static inline void clear_tsk_restore_sigmask(struct task_struct *task) 523 { 524 task->restore_sigmask = false; 525 } 526 static inline void clear_restore_sigmask(void) 527 { 528 current->restore_sigmask = false; 529 } 530 static inline bool test_restore_sigmask(void) 531 { 532 return current->restore_sigmask; 533 } 534 static inline bool test_tsk_restore_sigmask(struct task_struct *task) 535 { 536 return task->restore_sigmask; 537 } 538 static inline bool test_and_clear_restore_sigmask(void) 539 { 540 if (!current->restore_sigmask) 541 return false; 542 current->restore_sigmask = false; 543 return true; 544 } 545 #endif 546 547 static inline void restore_saved_sigmask(void) 548 { 549 if (test_and_clear_restore_sigmask()) 550 __set_current_blocked(¤t->saved_sigmask); 551 } 552 553 extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize); 554 555 static inline void restore_saved_sigmask_unless(bool interrupted) 556 { 557 if (interrupted) 558 WARN_ON(!signal_pending(current)); 559 else 560 restore_saved_sigmask(); 561 } 562 563 static inline sigset_t *sigmask_to_save(void) 564 { 565 sigset_t *res = ¤t->blocked; 566 if (unlikely(test_restore_sigmask())) 567 res = ¤t->saved_sigmask; 568 return res; 569 } 570 571 static inline int kill_cad_pid(int sig, int priv) 572 { 573 return kill_pid(cad_pid, sig, priv); 574 } 575 576 /* These can be the second arg to send_sig_info/send_group_sig_info. */ 577 #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0) 578 #define SEND_SIG_PRIV ((struct kernel_siginfo *) 1) 579 580 static inline int __on_sig_stack(unsigned long sp) 581 { 582 #ifdef CONFIG_STACK_GROWSUP 583 return sp >= current->sas_ss_sp && 584 sp - current->sas_ss_sp < current->sas_ss_size; 585 #else 586 return sp > current->sas_ss_sp && 587 sp - current->sas_ss_sp <= current->sas_ss_size; 588 #endif 589 } 590 591 /* 592 * True if we are on the alternate signal stack. 593 */ 594 static inline int on_sig_stack(unsigned long sp) 595 { 596 /* 597 * If the signal stack is SS_AUTODISARM then, by construction, we 598 * can't be on the signal stack unless user code deliberately set 599 * SS_AUTODISARM when we were already on it. 600 * 601 * This improves reliability: if user state gets corrupted such that 602 * the stack pointer points very close to the end of the signal stack, 603 * then this check will enable the signal to be handled anyway. 604 */ 605 if (current->sas_ss_flags & SS_AUTODISARM) 606 return 0; 607 608 return __on_sig_stack(sp); 609 } 610 611 static inline int sas_ss_flags(unsigned long sp) 612 { 613 if (!current->sas_ss_size) 614 return SS_DISABLE; 615 616 return on_sig_stack(sp) ? SS_ONSTACK : 0; 617 } 618 619 static inline void sas_ss_reset(struct task_struct *p) 620 { 621 p->sas_ss_sp = 0; 622 p->sas_ss_size = 0; 623 p->sas_ss_flags = SS_DISABLE; 624 } 625 626 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) 627 { 628 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) 629 #ifdef CONFIG_STACK_GROWSUP 630 return current->sas_ss_sp; 631 #else 632 return current->sas_ss_sp + current->sas_ss_size; 633 #endif 634 return sp; 635 } 636 637 extern void __cleanup_sighand(struct sighand_struct *); 638 extern void flush_itimer_signals(void); 639 640 #define tasklist_empty() \ 641 list_empty(&init_task.tasks) 642 643 #define next_task(p) \ 644 list_entry_rcu((p)->tasks.next, struct task_struct, tasks) 645 646 #define for_each_process(p) \ 647 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 648 649 extern bool current_is_single_threaded(void); 650 651 /* 652 * Careful: do_each_thread/while_each_thread is a double loop so 653 * 'break' will not work as expected - use goto instead. 654 */ 655 #define do_each_thread(g, t) \ 656 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do 657 658 #define while_each_thread(g, t) \ 659 while ((t = next_thread(t)) != g) 660 661 #define __for_each_thread(signal, t) \ 662 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) 663 664 #define for_each_thread(p, t) \ 665 __for_each_thread((p)->signal, t) 666 667 /* Careful: this is a double loop, 'break' won't work as expected. */ 668 #define for_each_process_thread(p, t) \ 669 for_each_process(p) for_each_thread(p, t) 670 671 typedef int (*proc_visitor)(struct task_struct *p, void *data); 672 void walk_process_tree(struct task_struct *top, proc_visitor, void *); 673 674 static inline 675 struct pid *task_pid_type(struct task_struct *task, enum pid_type type) 676 { 677 struct pid *pid; 678 if (type == PIDTYPE_PID) 679 pid = task_pid(task); 680 else 681 pid = task->signal->pids[type]; 682 return pid; 683 } 684 685 static inline struct pid *task_tgid(struct task_struct *task) 686 { 687 return task->signal->pids[PIDTYPE_TGID]; 688 } 689 690 /* 691 * Without tasklist or RCU lock it is not safe to dereference 692 * the result of task_pgrp/task_session even if task == current, 693 * we can race with another thread doing sys_setsid/sys_setpgid. 694 */ 695 static inline struct pid *task_pgrp(struct task_struct *task) 696 { 697 return task->signal->pids[PIDTYPE_PGID]; 698 } 699 700 static inline struct pid *task_session(struct task_struct *task) 701 { 702 return task->signal->pids[PIDTYPE_SID]; 703 } 704 705 static inline int get_nr_threads(struct task_struct *task) 706 { 707 return task->signal->nr_threads; 708 } 709 710 static inline bool thread_group_leader(struct task_struct *p) 711 { 712 return p->exit_signal >= 0; 713 } 714 715 static inline 716 bool same_thread_group(struct task_struct *p1, struct task_struct *p2) 717 { 718 return p1->signal == p2->signal; 719 } 720 721 static inline struct task_struct *next_thread(const struct task_struct *p) 722 { 723 return list_entry_rcu(p->thread_group.next, 724 struct task_struct, thread_group); 725 } 726 727 static inline int thread_group_empty(struct task_struct *p) 728 { 729 return list_empty(&p->thread_group); 730 } 731 732 #define delay_group_leader(p) \ 733 (thread_group_leader(p) && !thread_group_empty(p)) 734 735 extern bool thread_group_exited(struct pid *pid); 736 737 extern struct sighand_struct *__lock_task_sighand(struct task_struct *task, 738 unsigned long *flags); 739 740 static inline struct sighand_struct *lock_task_sighand(struct task_struct *task, 741 unsigned long *flags) 742 { 743 struct sighand_struct *ret; 744 745 ret = __lock_task_sighand(task, flags); 746 (void)__cond_lock(&task->sighand->siglock, ret); 747 return ret; 748 } 749 750 static inline void unlock_task_sighand(struct task_struct *task, 751 unsigned long *flags) 752 { 753 spin_unlock_irqrestore(&task->sighand->siglock, *flags); 754 } 755 756 #ifdef CONFIG_LOCKDEP 757 extern void lockdep_assert_task_sighand_held(struct task_struct *task); 758 #else 759 static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { } 760 #endif 761 762 static inline unsigned long task_rlimit(const struct task_struct *task, 763 unsigned int limit) 764 { 765 return READ_ONCE(task->signal->rlim[limit].rlim_cur); 766 } 767 768 static inline unsigned long task_rlimit_max(const struct task_struct *task, 769 unsigned int limit) 770 { 771 return READ_ONCE(task->signal->rlim[limit].rlim_max); 772 } 773 774 static inline unsigned long rlimit(unsigned int limit) 775 { 776 return task_rlimit(current, limit); 777 } 778 779 static inline unsigned long rlimit_max(unsigned int limit) 780 { 781 return task_rlimit_max(current, limit); 782 } 783 784 #endif /* _LINUX_SCHED_SIGNAL_H */ 785