1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 21da177e4SLinus Torvalds #ifndef _LINUX_SCHED_H 31da177e4SLinus Torvalds #define _LINUX_SCHED_H 41da177e4SLinus Torvalds 55eca1c10SIngo Molnar /* 65eca1c10SIngo Molnar * Define 'struct task_struct' and provide the main scheduler 75eca1c10SIngo Molnar * APIs (schedule(), wakeup variants, etc.) 85eca1c10SIngo Molnar */ 95eca1c10SIngo Molnar 10607ca46eSDavid Howells #include <uapi/linux/sched.h> 11b7b3c76aSDavid Woodhouse 1270b8157eSIngo Molnar #include <asm/current.h> 1370b8157eSIngo Molnar 145eca1c10SIngo Molnar #include <linux/pid.h> 155eca1c10SIngo Molnar #include <linux/sem.h> 165eca1c10SIngo Molnar #include <linux/shm.h> 17f80be457SAlexander Potapenko #include <linux/kmsan_types.h> 185eca1c10SIngo Molnar #include <linux/mutex.h> 195eca1c10SIngo Molnar #include <linux/plist.h> 205eca1c10SIngo Molnar #include <linux/hrtimer.h> 210584df9cSMarco Elver #include <linux/irqflags.h> 225eca1c10SIngo Molnar #include <linux/seccomp.h> 235eca1c10SIngo Molnar #include <linux/nodemask.h> 245eca1c10SIngo Molnar #include <linux/rcupdate.h> 25ec1d2819SElena Reshetova #include <linux/refcount.h> 265eca1c10SIngo Molnar #include <linux/resource.h> 275eca1c10SIngo Molnar #include <linux/latencytop.h> 285eca1c10SIngo Molnar #include <linux/sched/prio.h> 299eacb5c7SThomas Gleixner #include <linux/sched/types.h> 305eca1c10SIngo Molnar #include <linux/signal_types.h> 311446e1dfSGabriel Krisman Bertazi #include <linux/syscall_user_dispatch.h> 325eca1c10SIngo Molnar #include <linux/mm_types_task.h> 335eca1c10SIngo Molnar #include <linux/task_io_accounting.h> 342b69942fSThomas Gleixner #include <linux/posix-timers.h> 35d7822b1eSMathieu Desnoyers #include <linux/rseq.h> 360cd39f46SPeter Zijlstra #include <linux/seqlock.h> 37dfd402a4SMarco Elver #include <linux/kcsan.h> 38102227b9SDaniel Bristot de Oliveira #include <linux/rv.h> 395fbda3ecSThomas Gleixner #include <asm/kmap_size.h> 405eca1c10SIngo Molnar 415eca1c10SIngo Molnar /* task_struct member predeclarations (sorted alphabetically): */ 42c7af7877SIngo Molnar struct audit_context; 43c7af7877SIngo Molnar struct backing_dev_info; 44c7af7877SIngo Molnar struct bio_list; 45c7af7877SIngo Molnar struct blk_plug; 46a10787e6SSong Liu struct bpf_local_storage; 47c7603cfaSAndrii Nakryiko struct bpf_run_ctx; 483c93a0c0SQais Yousef struct capture_control; 49c7af7877SIngo Molnar struct cfs_rq; 50c7af7877SIngo Molnar struct fs_struct; 51c7af7877SIngo Molnar struct futex_pi_state; 52c7af7877SIngo Molnar struct io_context; 531875dc5bSPeter Oskolkov struct io_uring_task; 54c7af7877SIngo Molnar struct mempolicy; 55c7af7877SIngo Molnar struct nameidata; 56c7af7877SIngo Molnar struct nsproxy; 57c7af7877SIngo Molnar struct perf_event_context; 58c7af7877SIngo Molnar struct pid_namespace; 59c7af7877SIngo Molnar struct pipe_inode_info; 60c7af7877SIngo Molnar struct rcu_node; 61c7af7877SIngo Molnar struct reclaim_state; 62c7af7877SIngo Molnar struct robust_list_head; 633c93a0c0SQais Yousef struct root_domain; 643c93a0c0SQais Yousef struct rq; 65e2d1e2aeSIngo Molnar struct sched_attr; 66e2d1e2aeSIngo Molnar struct sched_param; 6743ae34cbSIngo Molnar struct seq_file; 68c7af7877SIngo Molnar struct sighand_struct; 69c7af7877SIngo Molnar struct signal_struct; 70c7af7877SIngo Molnar struct task_delay_info; 714cf86d77SIngo Molnar struct task_group; 72*fd593511SBeau Belgrave struct user_event_mm; 731da177e4SLinus Torvalds 744a8342d2SLinus Torvalds /* 754a8342d2SLinus Torvalds * Task state bitmask. NOTE! These bits are also 764a8342d2SLinus Torvalds * encoded in fs/proc/array.c: get_task_state(). 774a8342d2SLinus Torvalds * 784a8342d2SLinus Torvalds * We have two separate sets of flags: task->state 794a8342d2SLinus Torvalds * is about runnability, while task->exit_state are 804a8342d2SLinus Torvalds * about the task exiting. Confusing, but this way 814a8342d2SLinus Torvalds * modifying one set can't modify the other one by 824a8342d2SLinus Torvalds * mistake. 834a8342d2SLinus Torvalds */ 845eca1c10SIngo Molnar 855eca1c10SIngo Molnar /* Used in tsk->state: */ 869963e444SPeter Zijlstra #define TASK_RUNNING 0x00000000 879963e444SPeter Zijlstra #define TASK_INTERRUPTIBLE 0x00000001 889963e444SPeter Zijlstra #define TASK_UNINTERRUPTIBLE 0x00000002 899963e444SPeter Zijlstra #define __TASK_STOPPED 0x00000004 909963e444SPeter Zijlstra #define __TASK_TRACED 0x00000008 915eca1c10SIngo Molnar /* Used in tsk->exit_state: */ 929963e444SPeter Zijlstra #define EXIT_DEAD 0x00000010 939963e444SPeter Zijlstra #define EXIT_ZOMBIE 0x00000020 94abd50b39SOleg Nesterov #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 955eca1c10SIngo Molnar /* Used in tsk->state again: */ 969963e444SPeter Zijlstra #define TASK_PARKED 0x00000040 979963e444SPeter Zijlstra #define TASK_DEAD 0x00000080 989963e444SPeter Zijlstra #define TASK_WAKEKILL 0x00000100 999963e444SPeter Zijlstra #define TASK_WAKING 0x00000200 1009963e444SPeter Zijlstra #define TASK_NOLOAD 0x00000400 1019963e444SPeter Zijlstra #define TASK_NEW 0x00000800 1029963e444SPeter Zijlstra #define TASK_RTLOCK_WAIT 0x00001000 103f5d39b02SPeter Zijlstra #define TASK_FREEZABLE 0x00002000 104f5d39b02SPeter Zijlstra #define __TASK_FREEZABLE_UNSAFE (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP)) 105f5d39b02SPeter Zijlstra #define TASK_FROZEN 0x00008000 106f5d39b02SPeter Zijlstra #define TASK_STATE_MAX 0x00010000 107f021a3c2SMatthew Wilcox 108f9fc8cadSPeter Zijlstra #define TASK_ANY (TASK_STATE_MAX-1) 109f9fc8cadSPeter Zijlstra 110f5d39b02SPeter Zijlstra /* 111f5d39b02SPeter Zijlstra * DO NOT ADD ANY NEW USERS ! 112f5d39b02SPeter Zijlstra */ 113f5d39b02SPeter Zijlstra #define TASK_FREEZABLE_UNSAFE (TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE) 114f021a3c2SMatthew Wilcox 1155eca1c10SIngo Molnar /* Convenience macros for the sake of set_current_state: */ 116f021a3c2SMatthew Wilcox #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 117f021a3c2SMatthew Wilcox #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 1182500ad1cSEric W. Biederman #define TASK_TRACED __TASK_TRACED 1191da177e4SLinus Torvalds 12080ed87c8SPeter Zijlstra #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) 12180ed87c8SPeter Zijlstra 1225eca1c10SIngo Molnar /* Convenience macros for the sake of wake_up(): */ 12392a1f4bcSMatthew Wilcox #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 12492a1f4bcSMatthew Wilcox 1255eca1c10SIngo Molnar /* get_task_state(): */ 12692a1f4bcSMatthew Wilcox #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 127f021a3c2SMatthew Wilcox TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 1288ef9925bSPeter Zijlstra __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ 1298ef9925bSPeter Zijlstra TASK_PARKED) 13092a1f4bcSMatthew Wilcox 1312f064a59SPeter Zijlstra #define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING) 1325eca1c10SIngo Molnar 13331cae1eaSPeter Zijlstra #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0) 13431cae1eaSPeter Zijlstra #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0) 13531cae1eaSPeter Zijlstra #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0) 1365eca1c10SIngo Molnar 137b5bf9a90SPeter Zijlstra /* 138b5bf9a90SPeter Zijlstra * Special states are those that do not use the normal wait-loop pattern. See 139b5bf9a90SPeter Zijlstra * the comment with set_special_state(). 140b5bf9a90SPeter Zijlstra */ 141b5bf9a90SPeter Zijlstra #define is_special_task_state(state) \ 1421cef1150SPeter Zijlstra ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) 143b5bf9a90SPeter Zijlstra 14485019c16SThomas Gleixner #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 14585019c16SThomas Gleixner # define debug_normal_state_change(state_value) \ 1468eb23b9fSPeter Zijlstra do { \ 147b5bf9a90SPeter Zijlstra WARN_ON_ONCE(is_special_task_state(state_value)); \ 1488eb23b9fSPeter Zijlstra current->task_state_change = _THIS_IP_; \ 1498eb23b9fSPeter Zijlstra } while (0) 150b5bf9a90SPeter Zijlstra 15185019c16SThomas Gleixner # define debug_special_state_change(state_value) \ 1528eb23b9fSPeter Zijlstra do { \ 153b5bf9a90SPeter Zijlstra WARN_ON_ONCE(!is_special_task_state(state_value)); \ 154b5bf9a90SPeter Zijlstra current->task_state_change = _THIS_IP_; \ 155b5bf9a90SPeter Zijlstra } while (0) 15685019c16SThomas Gleixner 1575f220be2SThomas Gleixner # define debug_rtlock_wait_set_state() \ 1585f220be2SThomas Gleixner do { \ 1595f220be2SThomas Gleixner current->saved_state_change = current->task_state_change;\ 1605f220be2SThomas Gleixner current->task_state_change = _THIS_IP_; \ 1615f220be2SThomas Gleixner } while (0) 1625f220be2SThomas Gleixner 1635f220be2SThomas Gleixner # define debug_rtlock_wait_restore_state() \ 1645f220be2SThomas Gleixner do { \ 1655f220be2SThomas Gleixner current->task_state_change = current->saved_state_change;\ 1665f220be2SThomas Gleixner } while (0) 1675f220be2SThomas Gleixner 1688eb23b9fSPeter Zijlstra #else 16985019c16SThomas Gleixner # define debug_normal_state_change(cond) do { } while (0) 17085019c16SThomas Gleixner # define debug_special_state_change(cond) do { } while (0) 1715f220be2SThomas Gleixner # define debug_rtlock_wait_set_state() do { } while (0) 1725f220be2SThomas Gleixner # define debug_rtlock_wait_restore_state() do { } while (0) 17385019c16SThomas Gleixner #endif 17485019c16SThomas Gleixner 175498d0c57SAndrew Morton /* 176498d0c57SAndrew Morton * set_current_state() includes a barrier so that the write of current->state 177498d0c57SAndrew Morton * is correctly serialised wrt the caller's subsequent test of whether to 178498d0c57SAndrew Morton * actually sleep: 179498d0c57SAndrew Morton * 180a2250238SPeter Zijlstra * for (;;) { 181498d0c57SAndrew Morton * set_current_state(TASK_UNINTERRUPTIBLE); 18258877d34SPeter Zijlstra * if (CONDITION) 183a2250238SPeter Zijlstra * break; 184498d0c57SAndrew Morton * 185a2250238SPeter Zijlstra * schedule(); 186a2250238SPeter Zijlstra * } 187a2250238SPeter Zijlstra * __set_current_state(TASK_RUNNING); 188a2250238SPeter Zijlstra * 189a2250238SPeter Zijlstra * If the caller does not need such serialisation (because, for instance, the 19058877d34SPeter Zijlstra * CONDITION test and condition change and wakeup are under the same lock) then 191a2250238SPeter Zijlstra * use __set_current_state(). 192a2250238SPeter Zijlstra * 193a2250238SPeter Zijlstra * The above is typically ordered against the wakeup, which does: 194a2250238SPeter Zijlstra * 19558877d34SPeter Zijlstra * CONDITION = 1; 196a2250238SPeter Zijlstra * wake_up_state(p, TASK_UNINTERRUPTIBLE); 197a2250238SPeter Zijlstra * 19858877d34SPeter Zijlstra * where wake_up_state()/try_to_wake_up() executes a full memory barrier before 19958877d34SPeter Zijlstra * accessing p->state. 200a2250238SPeter Zijlstra * 201a2250238SPeter Zijlstra * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, 202a2250238SPeter Zijlstra * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a 203a2250238SPeter Zijlstra * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). 204a2250238SPeter Zijlstra * 205b5bf9a90SPeter Zijlstra * However, with slightly different timing the wakeup TASK_RUNNING store can 206dfcb245eSIngo Molnar * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not 207b5bf9a90SPeter Zijlstra * a problem either because that will result in one extra go around the loop 208b5bf9a90SPeter Zijlstra * and our @cond test will save the day. 209a2250238SPeter Zijlstra * 210a2250238SPeter Zijlstra * Also see the comments of try_to_wake_up(). 211498d0c57SAndrew Morton */ 212b5bf9a90SPeter Zijlstra #define __set_current_state(state_value) \ 21385019c16SThomas Gleixner do { \ 21485019c16SThomas Gleixner debug_normal_state_change((state_value)); \ 21585019c16SThomas Gleixner WRITE_ONCE(current->__state, (state_value)); \ 21685019c16SThomas Gleixner } while (0) 217b5bf9a90SPeter Zijlstra 218b5bf9a90SPeter Zijlstra #define set_current_state(state_value) \ 21985019c16SThomas Gleixner do { \ 22085019c16SThomas Gleixner debug_normal_state_change((state_value)); \ 22185019c16SThomas Gleixner smp_store_mb(current->__state, (state_value)); \ 22285019c16SThomas Gleixner } while (0) 223b5bf9a90SPeter Zijlstra 224b5bf9a90SPeter Zijlstra /* 225b5bf9a90SPeter Zijlstra * set_special_state() should be used for those states when the blocking task 226b5bf9a90SPeter Zijlstra * can not use the regular condition based wait-loop. In that case we must 22785019c16SThomas Gleixner * serialize against wakeups such that any possible in-flight TASK_RUNNING 22885019c16SThomas Gleixner * stores will not collide with our state change. 229b5bf9a90SPeter Zijlstra */ 230b5bf9a90SPeter Zijlstra #define set_special_state(state_value) \ 231b5bf9a90SPeter Zijlstra do { \ 232b5bf9a90SPeter Zijlstra unsigned long flags; /* may shadow */ \ 23385019c16SThomas Gleixner \ 234b5bf9a90SPeter Zijlstra raw_spin_lock_irqsave(¤t->pi_lock, flags); \ 23585019c16SThomas Gleixner debug_special_state_change((state_value)); \ 2362f064a59SPeter Zijlstra WRITE_ONCE(current->__state, (state_value)); \ 237b5bf9a90SPeter Zijlstra raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ 238b5bf9a90SPeter Zijlstra } while (0) 239b5bf9a90SPeter Zijlstra 2405f220be2SThomas Gleixner /* 2415f220be2SThomas Gleixner * PREEMPT_RT specific variants for "sleeping" spin/rwlocks 2425f220be2SThomas Gleixner * 2435f220be2SThomas Gleixner * RT's spin/rwlock substitutions are state preserving. The state of the 2445f220be2SThomas Gleixner * task when blocking on the lock is saved in task_struct::saved_state and 2455f220be2SThomas Gleixner * restored after the lock has been acquired. These operations are 2465f220be2SThomas Gleixner * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT 2475f220be2SThomas Gleixner * lock related wakeups while the task is blocked on the lock are 2485f220be2SThomas Gleixner * redirected to operate on task_struct::saved_state to ensure that these 2495f220be2SThomas Gleixner * are not dropped. On restore task_struct::saved_state is set to 2505f220be2SThomas Gleixner * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail. 2515f220be2SThomas Gleixner * 2525f220be2SThomas Gleixner * The lock operation looks like this: 2535f220be2SThomas Gleixner * 2545f220be2SThomas Gleixner * current_save_and_set_rtlock_wait_state(); 2555f220be2SThomas Gleixner * for (;;) { 2565f220be2SThomas Gleixner * if (try_lock()) 2575f220be2SThomas Gleixner * break; 2585f220be2SThomas Gleixner * raw_spin_unlock_irq(&lock->wait_lock); 2595f220be2SThomas Gleixner * schedule_rtlock(); 2605f220be2SThomas Gleixner * raw_spin_lock_irq(&lock->wait_lock); 2615f220be2SThomas Gleixner * set_current_state(TASK_RTLOCK_WAIT); 2625f220be2SThomas Gleixner * } 2635f220be2SThomas Gleixner * current_restore_rtlock_saved_state(); 2645f220be2SThomas Gleixner */ 2655f220be2SThomas Gleixner #define current_save_and_set_rtlock_wait_state() \ 2665f220be2SThomas Gleixner do { \ 2675f220be2SThomas Gleixner lockdep_assert_irqs_disabled(); \ 2685f220be2SThomas Gleixner raw_spin_lock(¤t->pi_lock); \ 2695f220be2SThomas Gleixner current->saved_state = current->__state; \ 2705f220be2SThomas Gleixner debug_rtlock_wait_set_state(); \ 2715f220be2SThomas Gleixner WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \ 2725f220be2SThomas Gleixner raw_spin_unlock(¤t->pi_lock); \ 2735f220be2SThomas Gleixner } while (0); 2745f220be2SThomas Gleixner 2755f220be2SThomas Gleixner #define current_restore_rtlock_saved_state() \ 2765f220be2SThomas Gleixner do { \ 2775f220be2SThomas Gleixner lockdep_assert_irqs_disabled(); \ 2785f220be2SThomas Gleixner raw_spin_lock(¤t->pi_lock); \ 2795f220be2SThomas Gleixner debug_rtlock_wait_restore_state(); \ 2805f220be2SThomas Gleixner WRITE_ONCE(current->__state, current->saved_state); \ 2815f220be2SThomas Gleixner current->saved_state = TASK_RUNNING; \ 2825f220be2SThomas Gleixner raw_spin_unlock(¤t->pi_lock); \ 2835f220be2SThomas Gleixner } while (0); 2848eb23b9fSPeter Zijlstra 2852f064a59SPeter Zijlstra #define get_current_state() READ_ONCE(current->__state) 286d6c23bb3SPeter Zijlstra 2873087c61eSYafang Shao /* 2883087c61eSYafang Shao * Define the task command name length as enum, then it can be visible to 2893087c61eSYafang Shao * BPF programs. 2903087c61eSYafang Shao */ 2913087c61eSYafang Shao enum { 2923087c61eSYafang Shao TASK_COMM_LEN = 16, 2933087c61eSYafang Shao }; 2941da177e4SLinus Torvalds 2951da177e4SLinus Torvalds extern void scheduler_tick(void); 2961da177e4SLinus Torvalds 2971da177e4SLinus Torvalds #define MAX_SCHEDULE_TIMEOUT LONG_MAX 2985eca1c10SIngo Molnar 2995eca1c10SIngo Molnar extern long schedule_timeout(long timeout); 3005eca1c10SIngo Molnar extern long schedule_timeout_interruptible(long timeout); 3015eca1c10SIngo Molnar extern long schedule_timeout_killable(long timeout); 3025eca1c10SIngo Molnar extern long schedule_timeout_uninterruptible(long timeout); 3035eca1c10SIngo Molnar extern long schedule_timeout_idle(long timeout); 3041da177e4SLinus Torvalds asmlinkage void schedule(void); 305c5491ea7SThomas Gleixner extern void schedule_preempt_disabled(void); 30619c95f26SJulien Thierry asmlinkage void preempt_schedule_irq(void); 3076991436cSThomas Gleixner #ifdef CONFIG_PREEMPT_RT 3086991436cSThomas Gleixner extern void schedule_rtlock(void); 3096991436cSThomas Gleixner #endif 3101da177e4SLinus Torvalds 31110ab5643STejun Heo extern int __must_check io_schedule_prepare(void); 31210ab5643STejun Heo extern void io_schedule_finish(int token); 3139cff8adeSNeilBrown extern long io_schedule_timeout(long timeout); 31410ab5643STejun Heo extern void io_schedule(void); 3159cff8adeSNeilBrown 316f06febc9SFrank Mayhar /** 3170ba42a59SMasanari Iida * struct prev_cputime - snapshot of system and user cputime 318d37f761dSFrederic Weisbecker * @utime: time spent in user mode 319d37f761dSFrederic Weisbecker * @stime: time spent in system mode 3209d7fb042SPeter Zijlstra * @lock: protects the above two fields 321d37f761dSFrederic Weisbecker * 3229d7fb042SPeter Zijlstra * Stores previous user/system time values such that we can guarantee 3239d7fb042SPeter Zijlstra * monotonicity. 324d37f761dSFrederic Weisbecker */ 3259d7fb042SPeter Zijlstra struct prev_cputime { 3269d7fb042SPeter Zijlstra #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 3275613fda9SFrederic Weisbecker u64 utime; 3285613fda9SFrederic Weisbecker u64 stime; 3299d7fb042SPeter Zijlstra raw_spinlock_t lock; 3309d7fb042SPeter Zijlstra #endif 331d37f761dSFrederic Weisbecker }; 332d37f761dSFrederic Weisbecker 333bac5b6b6SFrederic Weisbecker enum vtime_state { 334bac5b6b6SFrederic Weisbecker /* Task is sleeping or running in a CPU with VTIME inactive: */ 335bac5b6b6SFrederic Weisbecker VTIME_INACTIVE = 0, 33614faf6fcSFrederic Weisbecker /* Task is idle */ 33714faf6fcSFrederic Weisbecker VTIME_IDLE, 338bac5b6b6SFrederic Weisbecker /* Task runs in kernelspace in a CPU with VTIME active: */ 339bac5b6b6SFrederic Weisbecker VTIME_SYS, 34014faf6fcSFrederic Weisbecker /* Task runs in userspace in a CPU with VTIME active: */ 34114faf6fcSFrederic Weisbecker VTIME_USER, 342e6d5bf3eSFrederic Weisbecker /* Task runs as guests in a CPU with VTIME active: */ 343e6d5bf3eSFrederic Weisbecker VTIME_GUEST, 344bac5b6b6SFrederic Weisbecker }; 345bac5b6b6SFrederic Weisbecker 346bac5b6b6SFrederic Weisbecker struct vtime { 347bac5b6b6SFrederic Weisbecker seqcount_t seqcount; 348bac5b6b6SFrederic Weisbecker unsigned long long starttime; 349bac5b6b6SFrederic Weisbecker enum vtime_state state; 350802f4a82SFrederic Weisbecker unsigned int cpu; 3512a42eb95SWanpeng Li u64 utime; 3522a42eb95SWanpeng Li u64 stime; 3532a42eb95SWanpeng Li u64 gtime; 354bac5b6b6SFrederic Weisbecker }; 355bac5b6b6SFrederic Weisbecker 35669842cbaSPatrick Bellasi /* 35769842cbaSPatrick Bellasi * Utilization clamp constraints. 35869842cbaSPatrick Bellasi * @UCLAMP_MIN: Minimum utilization 35969842cbaSPatrick Bellasi * @UCLAMP_MAX: Maximum utilization 36069842cbaSPatrick Bellasi * @UCLAMP_CNT: Utilization clamp constraints count 36169842cbaSPatrick Bellasi */ 36269842cbaSPatrick Bellasi enum uclamp_id { 36369842cbaSPatrick Bellasi UCLAMP_MIN = 0, 36469842cbaSPatrick Bellasi UCLAMP_MAX, 36569842cbaSPatrick Bellasi UCLAMP_CNT 36669842cbaSPatrick Bellasi }; 36769842cbaSPatrick Bellasi 368f9a25f77SMathieu Poirier #ifdef CONFIG_SMP 369f9a25f77SMathieu Poirier extern struct root_domain def_root_domain; 370f9a25f77SMathieu Poirier extern struct mutex sched_domains_mutex; 371f9a25f77SMathieu Poirier #endif 372f9a25f77SMathieu Poirier 3731da177e4SLinus Torvalds struct sched_info { 3747f5f8e8dSIngo Molnar #ifdef CONFIG_SCHED_INFO 3755eca1c10SIngo Molnar /* Cumulative counters: */ 3761da177e4SLinus Torvalds 3775eca1c10SIngo Molnar /* # of times we have run on this CPU: */ 3785eca1c10SIngo Molnar unsigned long pcount; 3795eca1c10SIngo Molnar 3805eca1c10SIngo Molnar /* Time spent waiting on a runqueue: */ 3815eca1c10SIngo Molnar unsigned long long run_delay; 3825eca1c10SIngo Molnar 3835eca1c10SIngo Molnar /* Timestamps: */ 3845eca1c10SIngo Molnar 3855eca1c10SIngo Molnar /* When did we last run on a CPU? */ 3865eca1c10SIngo Molnar unsigned long long last_arrival; 3875eca1c10SIngo Molnar 3885eca1c10SIngo Molnar /* When were we last queued to run? */ 3895eca1c10SIngo Molnar unsigned long long last_queued; 3905eca1c10SIngo Molnar 391f6db8347SNaveen N. Rao #endif /* CONFIG_SCHED_INFO */ 3927f5f8e8dSIngo Molnar }; 3931da177e4SLinus Torvalds 3941da177e4SLinus Torvalds /* 3956ecdd749SYuyang Du * Integer metrics need fixed point arithmetic, e.g., sched/fair 3966ecdd749SYuyang Du * has a few: load, load_avg, util_avg, freq, and capacity. 3976ecdd749SYuyang Du * 3986ecdd749SYuyang Du * We define a basic fixed point arithmetic range, and then formalize 3996ecdd749SYuyang Du * all these metrics based on that basic range. 4006ecdd749SYuyang Du */ 4016ecdd749SYuyang Du # define SCHED_FIXEDPOINT_SHIFT 10 4026ecdd749SYuyang Du # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) 4036ecdd749SYuyang Du 40469842cbaSPatrick Bellasi /* Increase resolution of cpu_capacity calculations */ 40569842cbaSPatrick Bellasi # define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT 40669842cbaSPatrick Bellasi # define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) 40769842cbaSPatrick Bellasi 40820b8a59fSIngo Molnar struct load_weight { 4099dbdb155SPeter Zijlstra unsigned long weight; 4109dbdb155SPeter Zijlstra u32 inv_weight; 41120b8a59fSIngo Molnar }; 41220b8a59fSIngo Molnar 4137f65ea42SPatrick Bellasi /** 4147f65ea42SPatrick Bellasi * struct util_est - Estimation utilization of FAIR tasks 4157f65ea42SPatrick Bellasi * @enqueued: instantaneous estimated utilization of a task/cpu 4167f65ea42SPatrick Bellasi * @ewma: the Exponential Weighted Moving Average (EWMA) 4177f65ea42SPatrick Bellasi * utilization of a task 4187f65ea42SPatrick Bellasi * 4197f65ea42SPatrick Bellasi * Support data structure to track an Exponential Weighted Moving Average 4207f65ea42SPatrick Bellasi * (EWMA) of a FAIR task's utilization. New samples are added to the moving 4217f65ea42SPatrick Bellasi * average each time a task completes an activation. Sample's weight is chosen 4227f65ea42SPatrick Bellasi * so that the EWMA will be relatively insensitive to transient changes to the 4237f65ea42SPatrick Bellasi * task's workload. 4247f65ea42SPatrick Bellasi * 4257f65ea42SPatrick Bellasi * The enqueued attribute has a slightly different meaning for tasks and cpus: 4267f65ea42SPatrick Bellasi * - task: the task's util_avg at last task dequeue time 4277f65ea42SPatrick Bellasi * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU 4287f65ea42SPatrick Bellasi * Thus, the util_est.enqueued of a task represents the contribution on the 4297f65ea42SPatrick Bellasi * estimated utilization of the CPU where that task is currently enqueued. 4307f65ea42SPatrick Bellasi * 4317f65ea42SPatrick Bellasi * Only for tasks we track a moving average of the past instantaneous 4327f65ea42SPatrick Bellasi * estimated utilization. This allows to absorb sporadic drops in utilization 4337f65ea42SPatrick Bellasi * of an otherwise almost periodic task. 43468d7a190SDietmar Eggemann * 43568d7a190SDietmar Eggemann * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg 43668d7a190SDietmar Eggemann * updates. When a task is dequeued, its util_est should not be updated if its 43768d7a190SDietmar Eggemann * util_avg has not been updated in the meantime. 43868d7a190SDietmar Eggemann * This information is mapped into the MSB bit of util_est.enqueued at dequeue 43968d7a190SDietmar Eggemann * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg 44068d7a190SDietmar Eggemann * for a task) it is safe to use MSB. 4417f65ea42SPatrick Bellasi */ 4427f65ea42SPatrick Bellasi struct util_est { 4437f65ea42SPatrick Bellasi unsigned int enqueued; 4447f65ea42SPatrick Bellasi unsigned int ewma; 4457f65ea42SPatrick Bellasi #define UTIL_EST_WEIGHT_SHIFT 2 44668d7a190SDietmar Eggemann #define UTIL_AVG_UNCHANGED 0x80000000 447317d359dSPeter Zijlstra } __attribute__((__aligned__(sizeof(u64)))); 4487f65ea42SPatrick Bellasi 4499d89c257SYuyang Du /* 4509f683953SVincent Guittot * The load/runnable/util_avg accumulates an infinite geometric series 4510dacee1bSVincent Guittot * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c). 4527b595334SYuyang Du * 4537b595334SYuyang Du * [load_avg definition] 4547b595334SYuyang Du * 4557b595334SYuyang Du * load_avg = runnable% * scale_load_down(load) 4567b595334SYuyang Du * 4579f683953SVincent Guittot * [runnable_avg definition] 4589f683953SVincent Guittot * 4599f683953SVincent Guittot * runnable_avg = runnable% * SCHED_CAPACITY_SCALE 4607b595334SYuyang Du * 4617b595334SYuyang Du * [util_avg definition] 4627b595334SYuyang Du * 4637b595334SYuyang Du * util_avg = running% * SCHED_CAPACITY_SCALE 4647b595334SYuyang Du * 4659f683953SVincent Guittot * where runnable% is the time ratio that a sched_entity is runnable and 4669f683953SVincent Guittot * running% the time ratio that a sched_entity is running. 4677b595334SYuyang Du * 4689f683953SVincent Guittot * For cfs_rq, they are the aggregated values of all runnable and blocked 4699f683953SVincent Guittot * sched_entities. 4709f683953SVincent Guittot * 471c1b7b8d4S王文虎 * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU 4729f683953SVincent Guittot * capacity scaling. The scaling is done through the rq_clock_pelt that is used 4739f683953SVincent Guittot * for computing those signals (see update_rq_clock_pelt()) 4747b595334SYuyang Du * 47523127296SVincent Guittot * N.B., the above ratios (runnable% and running%) themselves are in the 47623127296SVincent Guittot * range of [0, 1]. To do fixed point arithmetics, we therefore scale them 47723127296SVincent Guittot * to as large a range as necessary. This is for example reflected by 47823127296SVincent Guittot * util_avg's SCHED_CAPACITY_SCALE. 4797b595334SYuyang Du * 4807b595334SYuyang Du * [Overflow issue] 4817b595334SYuyang Du * 4827b595334SYuyang Du * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities 4837b595334SYuyang Du * with the highest load (=88761), always runnable on a single cfs_rq, 4847b595334SYuyang Du * and should not overflow as the number already hits PID_MAX_LIMIT. 4857b595334SYuyang Du * 4867b595334SYuyang Du * For all other cases (including 32-bit kernels), struct load_weight's 4877b595334SYuyang Du * weight will overflow first before we do, because: 4887b595334SYuyang Du * 4897b595334SYuyang Du * Max(load_avg) <= Max(load.weight) 4907b595334SYuyang Du * 4917b595334SYuyang Du * Then it is the load_weight's responsibility to consider overflow 4927b595334SYuyang Du * issues. 4939d89c257SYuyang Du */ 4949d85f21cSPaul Turner struct sched_avg { 4955eca1c10SIngo Molnar u64 last_update_time; 4965eca1c10SIngo Molnar u64 load_sum; 4979f683953SVincent Guittot u64 runnable_sum; 4985eca1c10SIngo Molnar u32 util_sum; 4995eca1c10SIngo Molnar u32 period_contrib; 5005eca1c10SIngo Molnar unsigned long load_avg; 5019f683953SVincent Guittot unsigned long runnable_avg; 5025eca1c10SIngo Molnar unsigned long util_avg; 5037f65ea42SPatrick Bellasi struct util_est util_est; 504317d359dSPeter Zijlstra } ____cacheline_aligned; 5059d85f21cSPaul Turner 50641acab88SLucas De Marchi struct sched_statistics { 5077f5f8e8dSIngo Molnar #ifdef CONFIG_SCHEDSTATS 50894c18227SIngo Molnar u64 wait_start; 50994c18227SIngo Molnar u64 wait_max; 5106d082592SArjan van de Ven u64 wait_count; 5116d082592SArjan van de Ven u64 wait_sum; 5128f0dfc34SArjan van de Ven u64 iowait_count; 5138f0dfc34SArjan van de Ven u64 iowait_sum; 51494c18227SIngo Molnar 51594c18227SIngo Molnar u64 sleep_start; 51620b8a59fSIngo Molnar u64 sleep_max; 51794c18227SIngo Molnar s64 sum_sleep_runtime; 51894c18227SIngo Molnar 51994c18227SIngo Molnar u64 block_start; 52020b8a59fSIngo Molnar u64 block_max; 521847fc0cdSYafang Shao s64 sum_block_runtime; 522847fc0cdSYafang Shao 52320b8a59fSIngo Molnar u64 exec_max; 524eba1ed4bSIngo Molnar u64 slice_max; 525cc367732SIngo Molnar 526cc367732SIngo Molnar u64 nr_migrations_cold; 527cc367732SIngo Molnar u64 nr_failed_migrations_affine; 528cc367732SIngo Molnar u64 nr_failed_migrations_running; 529cc367732SIngo Molnar u64 nr_failed_migrations_hot; 530cc367732SIngo Molnar u64 nr_forced_migrations; 531cc367732SIngo Molnar 532cc367732SIngo Molnar u64 nr_wakeups; 533cc367732SIngo Molnar u64 nr_wakeups_sync; 534cc367732SIngo Molnar u64 nr_wakeups_migrate; 535cc367732SIngo Molnar u64 nr_wakeups_local; 536cc367732SIngo Molnar u64 nr_wakeups_remote; 537cc367732SIngo Molnar u64 nr_wakeups_affine; 538cc367732SIngo Molnar u64 nr_wakeups_affine_attempts; 539cc367732SIngo Molnar u64 nr_wakeups_passive; 540cc367732SIngo Molnar u64 nr_wakeups_idle; 5414feee7d1SJosh Don 5424feee7d1SJosh Don #ifdef CONFIG_SCHED_CORE 5434feee7d1SJosh Don u64 core_forceidle_sum; 54441acab88SLucas De Marchi #endif 5454feee7d1SJosh Don #endif /* CONFIG_SCHEDSTATS */ 546ceeadb83SYafang Shao } ____cacheline_aligned; 54741acab88SLucas De Marchi 54841acab88SLucas De Marchi struct sched_entity { 5495eca1c10SIngo Molnar /* For load-balancing: */ 5505eca1c10SIngo Molnar struct load_weight load; 55141acab88SLucas De Marchi struct rb_node run_node; 55241acab88SLucas De Marchi struct list_head group_node; 55341acab88SLucas De Marchi unsigned int on_rq; 55441acab88SLucas De Marchi 55541acab88SLucas De Marchi u64 exec_start; 55641acab88SLucas De Marchi u64 sum_exec_runtime; 55741acab88SLucas De Marchi u64 vruntime; 55841acab88SLucas De Marchi u64 prev_sum_exec_runtime; 55941acab88SLucas De Marchi 56041acab88SLucas De Marchi u64 nr_migrations; 56141acab88SLucas De Marchi 56220b8a59fSIngo Molnar #ifdef CONFIG_FAIR_GROUP_SCHED 563fed14d45SPeter Zijlstra int depth; 56420b8a59fSIngo Molnar struct sched_entity *parent; 56520b8a59fSIngo Molnar /* rq on which this entity is (to be) queued: */ 56620b8a59fSIngo Molnar struct cfs_rq *cfs_rq; 56720b8a59fSIngo Molnar /* rq "owned" by this entity/group: */ 56820b8a59fSIngo Molnar struct cfs_rq *my_q; 5699f683953SVincent Guittot /* cached value of my_q->h_nr_running */ 5709f683953SVincent Guittot unsigned long runnable_weight; 57120b8a59fSIngo Molnar #endif 5728bd75c77SClark Williams 573141965c7SAlex Shi #ifdef CONFIG_SMP 5745a107804SJiri Olsa /* 5755a107804SJiri Olsa * Per entity load average tracking. 5765a107804SJiri Olsa * 5775a107804SJiri Olsa * Put into separate cache line so it does not 5785a107804SJiri Olsa * collide with read-mostly values above. 5795a107804SJiri Olsa */ 580317d359dSPeter Zijlstra struct sched_avg avg; 5819d85f21cSPaul Turner #endif 58220b8a59fSIngo Molnar }; 58370b97a7fSIngo Molnar 584fa717060SPeter Zijlstra struct sched_rt_entity { 585fa717060SPeter Zijlstra struct list_head run_list; 58678f2c7dbSPeter Zijlstra unsigned long timeout; 58757d2aa00SYing Xue unsigned long watchdog_stamp; 588bee367edSRichard Kennedy unsigned int time_slice; 589ff77e468SPeter Zijlstra unsigned short on_rq; 590ff77e468SPeter Zijlstra unsigned short on_list; 5916f505b16SPeter Zijlstra 59258d6c2d7SPeter Zijlstra struct sched_rt_entity *back; 593052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 5946f505b16SPeter Zijlstra struct sched_rt_entity *parent; 5956f505b16SPeter Zijlstra /* rq on which this entity is (to be) queued: */ 5966f505b16SPeter Zijlstra struct rt_rq *rt_rq; 5976f505b16SPeter Zijlstra /* rq "owned" by this entity/group: */ 5986f505b16SPeter Zijlstra struct rt_rq *my_q; 5996f505b16SPeter Zijlstra #endif 6003859a271SKees Cook } __randomize_layout; 601fa717060SPeter Zijlstra 602aab03e05SDario Faggioli struct sched_dl_entity { 603aab03e05SDario Faggioli struct rb_node rb_node; 604aab03e05SDario Faggioli 605aab03e05SDario Faggioli /* 606aab03e05SDario Faggioli * Original scheduling parameters. Copied here from sched_attr 6074027d080Sxiaofeng.yan * during sched_setattr(), they will remain the same until 6084027d080Sxiaofeng.yan * the next sched_setattr(). 609aab03e05SDario Faggioli */ 6105eca1c10SIngo Molnar u64 dl_runtime; /* Maximum runtime for each instance */ 6115eca1c10SIngo Molnar u64 dl_deadline; /* Relative deadline of each instance */ 6125eca1c10SIngo Molnar u64 dl_period; /* Separation of two instances (period) */ 61354d6d303SDaniel Bristot de Oliveira u64 dl_bw; /* dl_runtime / dl_period */ 6143effcb42SDaniel Bristot de Oliveira u64 dl_density; /* dl_runtime / dl_deadline */ 615aab03e05SDario Faggioli 616aab03e05SDario Faggioli /* 617aab03e05SDario Faggioli * Actual scheduling parameters. Initialized with the values above, 618dfcb245eSIngo Molnar * they are continuously updated during task execution. Note that 619aab03e05SDario Faggioli * the remaining runtime could be < 0 in case we are in overrun. 620aab03e05SDario Faggioli */ 6215eca1c10SIngo Molnar s64 runtime; /* Remaining runtime for this instance */ 6225eca1c10SIngo Molnar u64 deadline; /* Absolute deadline for this instance */ 6235eca1c10SIngo Molnar unsigned int flags; /* Specifying the scheduler behaviour */ 624aab03e05SDario Faggioli 625aab03e05SDario Faggioli /* 626aab03e05SDario Faggioli * Some bool flags: 627aab03e05SDario Faggioli * 628aab03e05SDario Faggioli * @dl_throttled tells if we exhausted the runtime. If so, the 629aab03e05SDario Faggioli * task has to wait for a replenishment to be performed at the 630aab03e05SDario Faggioli * next firing of dl_timer. 631aab03e05SDario Faggioli * 6325eca1c10SIngo Molnar * @dl_yielded tells if task gave up the CPU before consuming 6335bfd126eSJuri Lelli * all its available runtime during the last job. 634209a0cbdSLuca Abeni * 635209a0cbdSLuca Abeni * @dl_non_contending tells if the task is inactive while still 636209a0cbdSLuca Abeni * contributing to the active utilization. In other words, it 637209a0cbdSLuca Abeni * indicates if the inactive timer has been armed and its handler 638209a0cbdSLuca Abeni * has not been executed yet. This flag is useful to avoid race 639209a0cbdSLuca Abeni * conditions between the inactive timer handler and the wakeup 640209a0cbdSLuca Abeni * code. 64134be3930SJuri Lelli * 64234be3930SJuri Lelli * @dl_overrun tells if the task asked to be informed about runtime 64334be3930SJuri Lelli * overruns. 644aab03e05SDario Faggioli */ 645aa5222e9SDan Carpenter unsigned int dl_throttled : 1; 646aa5222e9SDan Carpenter unsigned int dl_yielded : 1; 647aa5222e9SDan Carpenter unsigned int dl_non_contending : 1; 64834be3930SJuri Lelli unsigned int dl_overrun : 1; 649aab03e05SDario Faggioli 650aab03e05SDario Faggioli /* 651aab03e05SDario Faggioli * Bandwidth enforcement timer. Each -deadline task has its 652aab03e05SDario Faggioli * own bandwidth to be enforced, thus we need one timer per task. 653aab03e05SDario Faggioli */ 654aab03e05SDario Faggioli struct hrtimer dl_timer; 655209a0cbdSLuca Abeni 656209a0cbdSLuca Abeni /* 657209a0cbdSLuca Abeni * Inactive timer, responsible for decreasing the active utilization 658209a0cbdSLuca Abeni * at the "0-lag time". When a -deadline task blocks, it contributes 659209a0cbdSLuca Abeni * to GRUB's active utilization until the "0-lag time", hence a 660209a0cbdSLuca Abeni * timer is needed to decrease the active utilization at the correct 661209a0cbdSLuca Abeni * time. 662209a0cbdSLuca Abeni */ 663209a0cbdSLuca Abeni struct hrtimer inactive_timer; 6642279f540SJuri Lelli 6652279f540SJuri Lelli #ifdef CONFIG_RT_MUTEXES 6662279f540SJuri Lelli /* 6672279f540SJuri Lelli * Priority Inheritance. When a DEADLINE scheduling entity is boosted 6682279f540SJuri Lelli * pi_se points to the donor, otherwise points to the dl_se it belongs 6692279f540SJuri Lelli * to (the original one/itself). 6702279f540SJuri Lelli */ 6712279f540SJuri Lelli struct sched_dl_entity *pi_se; 6722279f540SJuri Lelli #endif 673aab03e05SDario Faggioli }; 6748bd75c77SClark Williams 67569842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 67669842cbaSPatrick Bellasi /* Number of utilization clamp buckets (shorter alias) */ 67769842cbaSPatrick Bellasi #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT 67869842cbaSPatrick Bellasi 67969842cbaSPatrick Bellasi /* 68069842cbaSPatrick Bellasi * Utilization clamp for a scheduling entity 68169842cbaSPatrick Bellasi * @value: clamp value "assigned" to a se 68269842cbaSPatrick Bellasi * @bucket_id: bucket index corresponding to the "assigned" value 683e8f14172SPatrick Bellasi * @active: the se is currently refcounted in a rq's bucket 684a509a7cdSPatrick Bellasi * @user_defined: the requested clamp value comes from user-space 68569842cbaSPatrick Bellasi * 68669842cbaSPatrick Bellasi * The bucket_id is the index of the clamp bucket matching the clamp value 68769842cbaSPatrick Bellasi * which is pre-computed and stored to avoid expensive integer divisions from 68869842cbaSPatrick Bellasi * the fast path. 689e8f14172SPatrick Bellasi * 690e8f14172SPatrick Bellasi * The active bit is set whenever a task has got an "effective" value assigned, 691e8f14172SPatrick Bellasi * which can be different from the clamp value "requested" from user-space. 692e8f14172SPatrick Bellasi * This allows to know a task is refcounted in the rq's bucket corresponding 693e8f14172SPatrick Bellasi * to the "effective" bucket_id. 694a509a7cdSPatrick Bellasi * 695a509a7cdSPatrick Bellasi * The user_defined bit is set whenever a task has got a task-specific clamp 696a509a7cdSPatrick Bellasi * value requested from userspace, i.e. the system defaults apply to this task 697a509a7cdSPatrick Bellasi * just as a restriction. This allows to relax default clamps when a less 698a509a7cdSPatrick Bellasi * restrictive task-specific value has been requested, thus allowing to 699a509a7cdSPatrick Bellasi * implement a "nice" semantic. For example, a task running with a 20% 700a509a7cdSPatrick Bellasi * default boost can still drop its own boosting to 0%. 70169842cbaSPatrick Bellasi */ 70269842cbaSPatrick Bellasi struct uclamp_se { 70369842cbaSPatrick Bellasi unsigned int value : bits_per(SCHED_CAPACITY_SCALE); 70469842cbaSPatrick Bellasi unsigned int bucket_id : bits_per(UCLAMP_BUCKETS); 705e8f14172SPatrick Bellasi unsigned int active : 1; 706a509a7cdSPatrick Bellasi unsigned int user_defined : 1; 70769842cbaSPatrick Bellasi }; 70869842cbaSPatrick Bellasi #endif /* CONFIG_UCLAMP_TASK */ 70969842cbaSPatrick Bellasi 7101d082fd0SPaul E. McKenney union rcu_special { 7111d082fd0SPaul E. McKenney struct { 7128203d6d0SPaul E. McKenney u8 blocked; 7138203d6d0SPaul E. McKenney u8 need_qs; 71405f41571SPaul E. McKenney u8 exp_hint; /* Hint for performance. */ 715276c4104SPaul E. McKenney u8 need_mb; /* Readers need smp_mb(). */ 7168203d6d0SPaul E. McKenney } b; /* Bits. */ 71705f41571SPaul E. McKenney u32 s; /* Set of bits. */ 7181d082fd0SPaul E. McKenney }; 71986848966SPaul E. McKenney 7208dc85d54SPeter Zijlstra enum perf_event_task_context { 7218dc85d54SPeter Zijlstra perf_invalid_context = -1, 7228dc85d54SPeter Zijlstra perf_hw_context = 0, 72389a1e187SPeter Zijlstra perf_sw_context, 7248dc85d54SPeter Zijlstra perf_nr_task_contexts, 7258dc85d54SPeter Zijlstra }; 7268dc85d54SPeter Zijlstra 727eb61baf6SIngo Molnar struct wake_q_node { 728eb61baf6SIngo Molnar struct wake_q_node *next; 729eb61baf6SIngo Molnar }; 730eb61baf6SIngo Molnar 7315fbda3ecSThomas Gleixner struct kmap_ctrl { 7325fbda3ecSThomas Gleixner #ifdef CONFIG_KMAP_LOCAL 7335fbda3ecSThomas Gleixner int idx; 7345fbda3ecSThomas Gleixner pte_t pteval[KM_MAX_IDX]; 7355fbda3ecSThomas Gleixner #endif 7365fbda3ecSThomas Gleixner }; 7375fbda3ecSThomas Gleixner 7381da177e4SLinus Torvalds struct task_struct { 739c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 740c65eacbeSAndy Lutomirski /* 741c65eacbeSAndy Lutomirski * For reasons of header soup (see current_thread_info()), this 742c65eacbeSAndy Lutomirski * must be the first element of task_struct. 743c65eacbeSAndy Lutomirski */ 744c65eacbeSAndy Lutomirski struct thread_info thread_info; 745c65eacbeSAndy Lutomirski #endif 7462f064a59SPeter Zijlstra unsigned int __state; 74729e48ce8SKees Cook 7485f220be2SThomas Gleixner #ifdef CONFIG_PREEMPT_RT 7495f220be2SThomas Gleixner /* saved state for "spinlock sleepers" */ 7505f220be2SThomas Gleixner unsigned int saved_state; 7515f220be2SThomas Gleixner #endif 7525f220be2SThomas Gleixner 75329e48ce8SKees Cook /* 75429e48ce8SKees Cook * This begins the randomizable portion of task_struct. Only 75529e48ce8SKees Cook * scheduling-critical items should be added above here. 75629e48ce8SKees Cook */ 75729e48ce8SKees Cook randomized_struct_fields_start 75829e48ce8SKees Cook 759f7e4217bSRoman Zippel void *stack; 760ec1d2819SElena Reshetova refcount_t usage; 7615eca1c10SIngo Molnar /* Per task flags (PF_*), defined further below: */ 7625eca1c10SIngo Molnar unsigned int flags; 76397dc32cdSWilliam Cohen unsigned int ptrace; 7641da177e4SLinus Torvalds 7652dd73a4fSPeter Williams #ifdef CONFIG_SMP 7663ca7a440SPeter Zijlstra int on_cpu; 7678c4890d1SPeter Zijlstra struct __call_single_node wake_entry; 76863b0e9edSMike Galbraith unsigned int wakee_flips; 76962470419SMichael Wang unsigned long wakee_flip_decay_ts; 77063b0e9edSMike Galbraith struct task_struct *last_wakee; 771ac66f547SPeter Zijlstra 77232e839ddSMel Gorman /* 77332e839ddSMel Gorman * recent_used_cpu is initially set as the last CPU used by a task 77432e839ddSMel Gorman * that wakes affine another task. Waker/wakee relationships can 77532e839ddSMel Gorman * push tasks around a CPU where each wakeup moves to the next one. 77632e839ddSMel Gorman * Tracking a recently used CPU allows a quick search for a recently 77732e839ddSMel Gorman * used CPU that may be idle. 77832e839ddSMel Gorman */ 77932e839ddSMel Gorman int recent_used_cpu; 780ac66f547SPeter Zijlstra int wake_cpu; 7814866cde0SNick Piggin #endif 782fd2f4419SPeter Zijlstra int on_rq; 78350e645a8SIngo Molnar 7845eca1c10SIngo Molnar int prio; 7855eca1c10SIngo Molnar int static_prio; 7865eca1c10SIngo Molnar int normal_prio; 787c7aceabaSRichard Kennedy unsigned int rt_priority; 7885eca1c10SIngo Molnar 78920b8a59fSIngo Molnar struct sched_entity se; 790fa717060SPeter Zijlstra struct sched_rt_entity rt; 7918a311c74SPeter Zijlstra struct sched_dl_entity dl; 792804bccbaSKees Cook const struct sched_class *sched_class; 7938a311c74SPeter Zijlstra 7948a311c74SPeter Zijlstra #ifdef CONFIG_SCHED_CORE 7958a311c74SPeter Zijlstra struct rb_node core_node; 7968a311c74SPeter Zijlstra unsigned long core_cookie; 797d2dfa17bSPeter Zijlstra unsigned int core_occupation; 7988a311c74SPeter Zijlstra #endif 7998a311c74SPeter Zijlstra 8008323f26cSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED 8018323f26cSPeter Zijlstra struct task_group *sched_task_group; 8028323f26cSPeter Zijlstra #endif 8031da177e4SLinus Torvalds 80469842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 80513685c4aSQais Yousef /* 80613685c4aSQais Yousef * Clamp values requested for a scheduling entity. 80713685c4aSQais Yousef * Must be updated with task_rq_lock() held. 80813685c4aSQais Yousef */ 809e8f14172SPatrick Bellasi struct uclamp_se uclamp_req[UCLAMP_CNT]; 81013685c4aSQais Yousef /* 81113685c4aSQais Yousef * Effective clamp values used for a scheduling entity. 81213685c4aSQais Yousef * Must be updated with task_rq_lock() held. 81313685c4aSQais Yousef */ 81469842cbaSPatrick Bellasi struct uclamp_se uclamp[UCLAMP_CNT]; 81569842cbaSPatrick Bellasi #endif 81669842cbaSPatrick Bellasi 817ceeadb83SYafang Shao struct sched_statistics stats; 818ceeadb83SYafang Shao 819e107be36SAvi Kivity #ifdef CONFIG_PREEMPT_NOTIFIERS 8205eca1c10SIngo Molnar /* List of struct preempt_notifier: */ 821e107be36SAvi Kivity struct hlist_head preempt_notifiers; 822e107be36SAvi Kivity #endif 823e107be36SAvi Kivity 8246c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 8252056a782SJens Axboe unsigned int btrace_seq; 8266c5c9341SAlexey Dobriyan #endif 8271da177e4SLinus Torvalds 82897dc32cdSWilliam Cohen unsigned int policy; 82929baa747SPeter Zijlstra int nr_cpus_allowed; 8303bd37062SSebastian Andrzej Siewior const cpumask_t *cpus_ptr; 831b90ca8baSWill Deacon cpumask_t *user_cpus_ptr; 8323bd37062SSebastian Andrzej Siewior cpumask_t cpus_mask; 8336d337eabSPeter Zijlstra void *migration_pending; 83474d862b6SThomas Gleixner #ifdef CONFIG_SMP 835a7c81556SPeter Zijlstra unsigned short migration_disabled; 836af449901SPeter Zijlstra #endif 837a7c81556SPeter Zijlstra unsigned short migration_flags; 8381da177e4SLinus Torvalds 839a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU 840e260be67SPaul E. McKenney int rcu_read_lock_nesting; 8411d082fd0SPaul E. McKenney union rcu_special rcu_read_unlock_special; 842f41d911fSPaul E. McKenney struct list_head rcu_node_entry; 843a57eb940SPaul E. McKenney struct rcu_node *rcu_blocked_node; 84428f6569aSPranith Kumar #endif /* #ifdef CONFIG_PREEMPT_RCU */ 8455eca1c10SIngo Molnar 8468315f422SPaul E. McKenney #ifdef CONFIG_TASKS_RCU 8478315f422SPaul E. McKenney unsigned long rcu_tasks_nvcsw; 848ccdd29ffSPaul E. McKenney u8 rcu_tasks_holdout; 849ccdd29ffSPaul E. McKenney u8 rcu_tasks_idx; 850176f8f7aSPaul E. McKenney int rcu_tasks_idle_cpu; 851ccdd29ffSPaul E. McKenney struct list_head rcu_tasks_holdout_list; 8528315f422SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_RCU */ 853e260be67SPaul E. McKenney 854d5f177d3SPaul E. McKenney #ifdef CONFIG_TASKS_TRACE_RCU 855d5f177d3SPaul E. McKenney int trc_reader_nesting; 856d5f177d3SPaul E. McKenney int trc_ipi_to_cpu; 857276c4104SPaul E. McKenney union rcu_special trc_reader_special; 858d5f177d3SPaul E. McKenney struct list_head trc_holdout_list; 859434c9eefSPaul E. McKenney struct list_head trc_blkd_node; 860434c9eefSPaul E. McKenney int trc_blkd_cpu; 861d5f177d3SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ 862d5f177d3SPaul E. McKenney 8631da177e4SLinus Torvalds struct sched_info sched_info; 8641da177e4SLinus Torvalds 8651da177e4SLinus Torvalds struct list_head tasks; 866806c09a7SDario Faggioli #ifdef CONFIG_SMP 867917b627dSGregory Haskins struct plist_node pushable_tasks; 8681baca4ceSJuri Lelli struct rb_node pushable_dl_tasks; 869806c09a7SDario Faggioli #endif 8701da177e4SLinus Torvalds 8715eca1c10SIngo Molnar struct mm_struct *mm; 8725eca1c10SIngo Molnar struct mm_struct *active_mm; 873314ff785SIngo Molnar 87497dc32cdSWilliam Cohen int exit_state; 8755eca1c10SIngo Molnar int exit_code; 8765eca1c10SIngo Molnar int exit_signal; 8775eca1c10SIngo Molnar /* The signal sent when the parent dies: */ 8785eca1c10SIngo Molnar int pdeath_signal; 8795eca1c10SIngo Molnar /* JOBCTL_*, siglock protected: */ 8805eca1c10SIngo Molnar unsigned long jobctl; 8819b89f6baSAndrei Epure 8825eca1c10SIngo Molnar /* Used for emulating ABI behavior of previous Linux versions: */ 88397dc32cdSWilliam Cohen unsigned int personality; 8849b89f6baSAndrei Epure 8855eca1c10SIngo Molnar /* Scheduler bits, serialized by scheduler locks: */ 886ca94c442SLennart Poettering unsigned sched_reset_on_fork:1; 887a8e4f2eaSPeter Zijlstra unsigned sched_contributes_to_load:1; 888ff303e66SPeter Zijlstra unsigned sched_migrated:1; 889eb414681SJohannes Weiner 8905eca1c10SIngo Molnar /* Force alignment to the next boundary: */ 8915eca1c10SIngo Molnar unsigned :0; 892be958bdcSPeter Zijlstra 8935eca1c10SIngo Molnar /* Unserialized, strictly 'current' */ 8945eca1c10SIngo Molnar 895f97bb527SPeter Zijlstra /* 896f97bb527SPeter Zijlstra * This field must not be in the scheduler word above due to wakelist 897f97bb527SPeter Zijlstra * queueing no longer being serialized by p->on_cpu. However: 898f97bb527SPeter Zijlstra * 899f97bb527SPeter Zijlstra * p->XXX = X; ttwu() 900f97bb527SPeter Zijlstra * schedule() if (p->on_rq && ..) // false 901f97bb527SPeter Zijlstra * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true 902f97bb527SPeter Zijlstra * deactivate_task() ttwu_queue_wakelist()) 903f97bb527SPeter Zijlstra * p->on_rq = 0; p->sched_remote_wakeup = Y; 904f97bb527SPeter Zijlstra * 905f97bb527SPeter Zijlstra * guarantees all stores of 'current' are visible before 906f97bb527SPeter Zijlstra * ->sched_remote_wakeup gets used, so it can be in this word. 907f97bb527SPeter Zijlstra */ 908f97bb527SPeter Zijlstra unsigned sched_remote_wakeup:1; 909f97bb527SPeter Zijlstra 9105eca1c10SIngo Molnar /* Bit to tell LSMs we're in execve(): */ 9115eca1c10SIngo Molnar unsigned in_execve:1; 912be958bdcSPeter Zijlstra unsigned in_iowait:1; 9135eca1c10SIngo Molnar #ifndef TIF_RESTORE_SIGMASK 9147e781418SAndy Lutomirski unsigned restore_sigmask:1; 9157e781418SAndy Lutomirski #endif 916626ebc41STejun Heo #ifdef CONFIG_MEMCG 91729ef680aSMichal Hocko unsigned in_user_fault:1; 918127424c8SJohannes Weiner #endif 919ec1c86b2SYu Zhao #ifdef CONFIG_LRU_GEN 920ec1c86b2SYu Zhao /* whether the LRU algorithm may apply to this access */ 921ec1c86b2SYu Zhao unsigned in_lru_fault:1; 922ec1c86b2SYu Zhao #endif 923ff303e66SPeter Zijlstra #ifdef CONFIG_COMPAT_BRK 924ff303e66SPeter Zijlstra unsigned brk_randomized:1; 925ff303e66SPeter Zijlstra #endif 92677f88796STejun Heo #ifdef CONFIG_CGROUPS 92777f88796STejun Heo /* disallow userland-initiated cgroup migration */ 92877f88796STejun Heo unsigned no_cgroup_migration:1; 92976f969e8SRoman Gushchin /* task is frozen/stopped (used by the cgroup freezer) */ 93076f969e8SRoman Gushchin unsigned frozen:1; 93177f88796STejun Heo #endif 932d09d8df3SJosef Bacik #ifdef CONFIG_BLK_CGROUP 933d09d8df3SJosef Bacik unsigned use_memdelay:1; 934d09d8df3SJosef Bacik #endif 9351066d1b6SYafang Shao #ifdef CONFIG_PSI 9361066d1b6SYafang Shao /* Stalled due to lack of memory */ 9371066d1b6SYafang Shao unsigned in_memstall:1; 9381066d1b6SYafang Shao #endif 9398e9b16c4SSergei Trofimovich #ifdef CONFIG_PAGE_OWNER 9408e9b16c4SSergei Trofimovich /* Used by page_owner=on to detect recursion in page tracking. */ 9418e9b16c4SSergei Trofimovich unsigned in_page_owner:1; 9428e9b16c4SSergei Trofimovich #endif 943b542e383SThomas Gleixner #ifdef CONFIG_EVENTFD 944b542e383SThomas Gleixner /* Recursion prevention for eventfd_signal() */ 9459f0deaa1SDylan Yudaken unsigned in_eventfd:1; 946b542e383SThomas Gleixner #endif 947a3d29e82SPeter Zijlstra #ifdef CONFIG_IOMMU_SVA 948a3d29e82SPeter Zijlstra unsigned pasid_activated:1; 949a3d29e82SPeter Zijlstra #endif 950b041b525STony Luck #ifdef CONFIG_CPU_SUP_INTEL 951b041b525STony Luck unsigned reported_split_lock:1; 952b041b525STony Luck #endif 953aa1cf99bSYang Yang #ifdef CONFIG_TASK_DELAY_ACCT 954aa1cf99bSYang Yang /* delay due to memory thrashing */ 955aa1cf99bSYang Yang unsigned in_thrashing:1; 956aa1cf99bSYang Yang #endif 9576f185c29SVladimir Davydov 9585eca1c10SIngo Molnar unsigned long atomic_flags; /* Flags requiring atomic access. */ 9591d4457f9SKees Cook 960f56141e3SAndy Lutomirski struct restart_block restart_block; 961f56141e3SAndy Lutomirski 9621da177e4SLinus Torvalds pid_t pid; 9631da177e4SLinus Torvalds pid_t tgid; 9640a425405SArjan van de Ven 965050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR 9665eca1c10SIngo Molnar /* Canary value for the -fstack-protector GCC feature: */ 9670a425405SArjan van de Ven unsigned long stack_canary; 9681314562aSHiroshi Shimamoto #endif 9691da177e4SLinus Torvalds /* 9705eca1c10SIngo Molnar * Pointers to the (original) parent process, youngest child, younger sibling, 9711da177e4SLinus Torvalds * older sibling, respectively. (p->father can be replaced with 972f470021aSRoland McGrath * p->real_parent->pid) 9731da177e4SLinus Torvalds */ 9745eca1c10SIngo Molnar 9755eca1c10SIngo Molnar /* Real parent process: */ 9765eca1c10SIngo Molnar struct task_struct __rcu *real_parent; 9775eca1c10SIngo Molnar 9785eca1c10SIngo Molnar /* Recipient of SIGCHLD, wait4() reports: */ 9795eca1c10SIngo Molnar struct task_struct __rcu *parent; 9801da177e4SLinus Torvalds 981f470021aSRoland McGrath /* 9825eca1c10SIngo Molnar * Children/sibling form the list of natural children: 9835eca1c10SIngo Molnar */ 9845eca1c10SIngo Molnar struct list_head children; 9855eca1c10SIngo Molnar struct list_head sibling; 9865eca1c10SIngo Molnar struct task_struct *group_leader; 9875eca1c10SIngo Molnar 9885eca1c10SIngo Molnar /* 9895eca1c10SIngo Molnar * 'ptraced' is the list of tasks this task is using ptrace() on. 9905eca1c10SIngo Molnar * 991f470021aSRoland McGrath * This includes both natural children and PTRACE_ATTACH targets. 9925eca1c10SIngo Molnar * 'ptrace_entry' is this task's link on the p->parent->ptraced list. 993f470021aSRoland McGrath */ 994f470021aSRoland McGrath struct list_head ptraced; 995f470021aSRoland McGrath struct list_head ptrace_entry; 996f470021aSRoland McGrath 9971da177e4SLinus Torvalds /* PID/PID hash table linkage. */ 9982c470475SEric W. Biederman struct pid *thread_pid; 9992c470475SEric W. Biederman struct hlist_node pid_links[PIDTYPE_MAX]; 100047e65328SOleg Nesterov struct list_head thread_group; 10010c740d0aSOleg Nesterov struct list_head thread_node; 10021da177e4SLinus Torvalds 10035eca1c10SIngo Molnar struct completion *vfork_done; 10041da177e4SLinus Torvalds 10055eca1c10SIngo Molnar /* CLONE_CHILD_SETTID: */ 10065eca1c10SIngo Molnar int __user *set_child_tid; 10075eca1c10SIngo Molnar 10085eca1c10SIngo Molnar /* CLONE_CHILD_CLEARTID: */ 10095eca1c10SIngo Molnar int __user *clear_child_tid; 10105eca1c10SIngo Molnar 1011e32cf5dfSEric W. Biederman /* PF_KTHREAD | PF_IO_WORKER */ 1012e32cf5dfSEric W. Biederman void *worker_private; 10133bfe6106SJens Axboe 10145eca1c10SIngo Molnar u64 utime; 10155eca1c10SIngo Molnar u64 stime; 101640565b5aSStanislaw Gruszka #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 10175eca1c10SIngo Molnar u64 utimescaled; 10185eca1c10SIngo Molnar u64 stimescaled; 101940565b5aSStanislaw Gruszka #endif 102016a6d9beSFrederic Weisbecker u64 gtime; 10219d7fb042SPeter Zijlstra struct prev_cputime prev_cputime; 10226a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 1023bac5b6b6SFrederic Weisbecker struct vtime vtime; 10246a61671bSFrederic Weisbecker #endif 1025d027d45dSFrederic Weisbecker 1026d027d45dSFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL 1027f009a7a7SFrederic Weisbecker atomic_t tick_dep_mask; 1028d027d45dSFrederic Weisbecker #endif 10295eca1c10SIngo Molnar /* Context switch counts: */ 10305eca1c10SIngo Molnar unsigned long nvcsw; 10315eca1c10SIngo Molnar unsigned long nivcsw; 10325eca1c10SIngo Molnar 10335eca1c10SIngo Molnar /* Monotonic time in nsecs: */ 10345eca1c10SIngo Molnar u64 start_time; 10355eca1c10SIngo Molnar 10365eca1c10SIngo Molnar /* Boot based time in nsecs: */ 1037cf25e24dSPeter Zijlstra u64 start_boottime; 10385eca1c10SIngo Molnar 10395eca1c10SIngo Molnar /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ 10405eca1c10SIngo Molnar unsigned long min_flt; 10415eca1c10SIngo Molnar unsigned long maj_flt; 10421da177e4SLinus Torvalds 10432b69942fSThomas Gleixner /* Empty if CONFIG_POSIX_CPUTIMERS=n */ 10442b69942fSThomas Gleixner struct posix_cputimers posix_cputimers; 10451da177e4SLinus Torvalds 10461fb497ddSThomas Gleixner #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK 10471fb497ddSThomas Gleixner struct posix_cputimers_work posix_cputimers_work; 10481fb497ddSThomas Gleixner #endif 10491fb497ddSThomas Gleixner 10505eca1c10SIngo Molnar /* Process credentials: */ 10515eca1c10SIngo Molnar 10525eca1c10SIngo Molnar /* Tracer's credentials at attach: */ 10535eca1c10SIngo Molnar const struct cred __rcu *ptracer_cred; 10545eca1c10SIngo Molnar 10555eca1c10SIngo Molnar /* Objective and real subjective task credentials (COW): */ 10565eca1c10SIngo Molnar const struct cred __rcu *real_cred; 10575eca1c10SIngo Molnar 10585eca1c10SIngo Molnar /* Effective (overridable) subjective task credentials (COW): */ 10595eca1c10SIngo Molnar const struct cred __rcu *cred; 10605eca1c10SIngo Molnar 10617743c48eSDavid Howells #ifdef CONFIG_KEYS 10627743c48eSDavid Howells /* Cached requested key. */ 10637743c48eSDavid Howells struct key *cached_requested_key; 10647743c48eSDavid Howells #endif 10657743c48eSDavid Howells 10665eca1c10SIngo Molnar /* 10675eca1c10SIngo Molnar * executable name, excluding path. 10685eca1c10SIngo Molnar * 10695eca1c10SIngo Molnar * - normally initialized setup_new_exec() 10705eca1c10SIngo Molnar * - access it with [gs]et_task_comm() 10715eca1c10SIngo Molnar * - lock it with task_lock() 10725eca1c10SIngo Molnar */ 10735eca1c10SIngo Molnar char comm[TASK_COMM_LEN]; 10745eca1c10SIngo Molnar 1075756daf26SNeilBrown struct nameidata *nameidata; 10765eca1c10SIngo Molnar 10773d5b6fccSAlexey Dobriyan #ifdef CONFIG_SYSVIPC 10781da177e4SLinus Torvalds struct sysv_sem sysvsem; 1079ab602f79SJack Miller struct sysv_shm sysvshm; 10803d5b6fccSAlexey Dobriyan #endif 1081e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK 108282a1fcb9SIngo Molnar unsigned long last_switch_count; 1083a2e51445SDmitry Vyukov unsigned long last_switch_time; 108482a1fcb9SIngo Molnar #endif 10855eca1c10SIngo Molnar /* Filesystem information: */ 10861da177e4SLinus Torvalds struct fs_struct *fs; 10875eca1c10SIngo Molnar 10885eca1c10SIngo Molnar /* Open file information: */ 10891da177e4SLinus Torvalds struct files_struct *files; 10905eca1c10SIngo Molnar 10910f212204SJens Axboe #ifdef CONFIG_IO_URING 10920f212204SJens Axboe struct io_uring_task *io_uring; 10930f212204SJens Axboe #endif 10940f212204SJens Axboe 10955eca1c10SIngo Molnar /* Namespaces: */ 1096ab516013SSerge E. Hallyn struct nsproxy *nsproxy; 10975eca1c10SIngo Molnar 10985eca1c10SIngo Molnar /* Signal handlers: */ 10991da177e4SLinus Torvalds struct signal_struct *signal; 1100913292c9SMadhuparna Bhowmik struct sighand_struct __rcu *sighand; 11015eca1c10SIngo Molnar sigset_t blocked; 11025eca1c10SIngo Molnar sigset_t real_blocked; 11035eca1c10SIngo Molnar /* Restored if set_restore_sigmask() was used: */ 11045eca1c10SIngo Molnar sigset_t saved_sigmask; 11051da177e4SLinus Torvalds struct sigpending pending; 11061da177e4SLinus Torvalds unsigned long sas_ss_sp; 11071da177e4SLinus Torvalds size_t sas_ss_size; 11085eca1c10SIngo Molnar unsigned int sas_ss_flags; 11092e01fabeSOleg Nesterov 111067d12145SAl Viro struct callback_head *task_works; 1111e73f8959SOleg Nesterov 11124b7d248bSRichard Guy Briggs #ifdef CONFIG_AUDIT 1113bfef93a5SAl Viro #ifdef CONFIG_AUDITSYSCALL 11145f3d544fSRichard Guy Briggs struct audit_context *audit_context; 11155f3d544fSRichard Guy Briggs #endif 1116e1760bd5SEric W. Biederman kuid_t loginuid; 11174746ec5bSEric Paris unsigned int sessionid; 1118bfef93a5SAl Viro #endif 1119932ecebbSWill Drewry struct seccomp seccomp; 11201446e1dfSGabriel Krisman Bertazi struct syscall_user_dispatch syscall_dispatch; 11211da177e4SLinus Torvalds 11225eca1c10SIngo Molnar /* Thread group tracking: */ 1123d1e7fd64SEric W. Biederman u64 parent_exec_id; 1124d1e7fd64SEric W. Biederman u64 self_exec_id; 11255eca1c10SIngo Molnar 11265eca1c10SIngo Molnar /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ 11271da177e4SLinus Torvalds spinlock_t alloc_lock; 11281da177e4SLinus Torvalds 1129b29739f9SIngo Molnar /* Protection of the PI data structures: */ 11301d615482SThomas Gleixner raw_spinlock_t pi_lock; 1131b29739f9SIngo Molnar 113276751049SPeter Zijlstra struct wake_q_node wake_q; 113376751049SPeter Zijlstra 113423f78d4aSIngo Molnar #ifdef CONFIG_RT_MUTEXES 11355eca1c10SIngo Molnar /* PI waiters blocked on a rt_mutex held by this task: */ 1136a23ba907SDavidlohr Bueso struct rb_root_cached pi_waiters; 1137e96a7705SXunlei Pang /* Updated under owner's pi_lock and rq lock */ 1138e96a7705SXunlei Pang struct task_struct *pi_top_task; 11395eca1c10SIngo Molnar /* Deadlock detection and priority inheritance handling: */ 114023f78d4aSIngo Molnar struct rt_mutex_waiter *pi_blocked_on; 114123f78d4aSIngo Molnar #endif 114223f78d4aSIngo Molnar 1143408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES 11445eca1c10SIngo Molnar /* Mutex deadlock detection: */ 1145408894eeSIngo Molnar struct mutex_waiter *blocked_on; 1146408894eeSIngo Molnar #endif 11475eca1c10SIngo Molnar 1148312364f3SDaniel Vetter #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 1149312364f3SDaniel Vetter int non_block_count; 1150312364f3SDaniel Vetter #endif 1151312364f3SDaniel Vetter 1152de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS 11530584df9cSMarco Elver struct irqtrace_events irqtrace; 1154de8f5e4fSPeter Zijlstra unsigned int hardirq_threaded; 1155c86e9b98SPeter Zijlstra u64 hardirq_chain_key; 1156fa1452e8SHiroshi Shimamoto int softirqs_enabled; 1157de30a2b3SIngo Molnar int softirq_context; 115840db1739SSebastian Andrzej Siewior int irq_config; 1159de30a2b3SIngo Molnar #endif 1160728b478dSThomas Gleixner #ifdef CONFIG_PREEMPT_RT 1161728b478dSThomas Gleixner int softirq_disable_cnt; 1162728b478dSThomas Gleixner #endif 11635eca1c10SIngo Molnar 1164fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP 1165bdb9441eSPeter Zijlstra # define MAX_LOCK_DEPTH 48UL 1166fbb9ce95SIngo Molnar u64 curr_chain_key; 1167fbb9ce95SIngo Molnar int lockdep_depth; 1168fbb9ce95SIngo Molnar unsigned int lockdep_recursion; 1169c7aceabaSRichard Kennedy struct held_lock held_locks[MAX_LOCK_DEPTH]; 1170fbb9ce95SIngo Molnar #endif 11715eca1c10SIngo Molnar 11725cf53f3cSElena Petrova #if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP) 1173c6d30853SAndrey Ryabinin unsigned int in_ubsan; 1174c6d30853SAndrey Ryabinin #endif 1175408894eeSIngo Molnar 11765eca1c10SIngo Molnar /* Journalling filesystem info: */ 11771da177e4SLinus Torvalds void *journal_info; 11781da177e4SLinus Torvalds 11795eca1c10SIngo Molnar /* Stacked block device info: */ 1180bddd87c7SAkinobu Mita struct bio_list *bio_list; 1181d89d8796SNeil Brown 11825eca1c10SIngo Molnar /* Stack plugging: */ 118373c10101SJens Axboe struct blk_plug *plug; 118473c10101SJens Axboe 11855eca1c10SIngo Molnar /* VM state: */ 11861da177e4SLinus Torvalds struct reclaim_state *reclaim_state; 11871da177e4SLinus Torvalds 11881da177e4SLinus Torvalds struct backing_dev_info *backing_dev_info; 11891da177e4SLinus Torvalds 11901da177e4SLinus Torvalds struct io_context *io_context; 11911da177e4SLinus Torvalds 11925e1f0f09SMel Gorman #ifdef CONFIG_COMPACTION 11935e1f0f09SMel Gorman struct capture_control *capture_control; 11945e1f0f09SMel Gorman #endif 11955eca1c10SIngo Molnar /* Ptrace state: */ 11961da177e4SLinus Torvalds unsigned long ptrace_message; 1197ae7795bcSEric W. Biederman kernel_siginfo_t *last_siginfo; 11985eca1c10SIngo Molnar 11997c3ab738SAndrew Morton struct task_io_accounting ioac; 1200eb414681SJohannes Weiner #ifdef CONFIG_PSI 1201eb414681SJohannes Weiner /* Pressure stall state */ 1202eb414681SJohannes Weiner unsigned int psi_flags; 1203eb414681SJohannes Weiner #endif 12045eca1c10SIngo Molnar #ifdef CONFIG_TASK_XACCT 12055eca1c10SIngo Molnar /* Accumulated RSS usage: */ 12065eca1c10SIngo Molnar u64 acct_rss_mem1; 12075eca1c10SIngo Molnar /* Accumulated virtual memory usage: */ 12085eca1c10SIngo Molnar u64 acct_vm_mem1; 12095eca1c10SIngo Molnar /* stime + utime since last update: */ 12105eca1c10SIngo Molnar u64 acct_timexpd; 12111da177e4SLinus Torvalds #endif 12121da177e4SLinus Torvalds #ifdef CONFIG_CPUSETS 12135eca1c10SIngo Molnar /* Protected by ->alloc_lock: */ 12145eca1c10SIngo Molnar nodemask_t mems_allowed; 12153b03706fSIngo Molnar /* Sequence number to catch updates: */ 1216b7505861SAhmed S. Darwish seqcount_spinlock_t mems_allowed_seq; 1217825a46afSPaul Jackson int cpuset_mem_spread_rotor; 12186adef3ebSJack Steiner int cpuset_slab_spread_rotor; 12191da177e4SLinus Torvalds #endif 1220ddbcc7e8SPaul Menage #ifdef CONFIG_CGROUPS 12215eca1c10SIngo Molnar /* Control Group info protected by css_set_lock: */ 12222c392b8cSArnd Bergmann struct css_set __rcu *cgroups; 12235eca1c10SIngo Molnar /* cg_list protected by css_set_lock and tsk->alloc_lock: */ 1224817929ecSPaul Menage struct list_head cg_list; 1225ddbcc7e8SPaul Menage #endif 1226e6d42931SJohannes Weiner #ifdef CONFIG_X86_CPU_RESCTRL 12270734ded1SVikas Shivappa u32 closid; 1228d6aaba61SVikas Shivappa u32 rmid; 1229e02737d5SFenghua Yu #endif 123042b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX 12310771dfefSIngo Molnar struct robust_list_head __user *robust_list; 123234f192c6SIngo Molnar #ifdef CONFIG_COMPAT 123334f192c6SIngo Molnar struct compat_robust_list_head __user *compat_robust_list; 123434f192c6SIngo Molnar #endif 1235c87e2837SIngo Molnar struct list_head pi_state_list; 1236c87e2837SIngo Molnar struct futex_pi_state *pi_state_cache; 12373f186d97SThomas Gleixner struct mutex futex_exit_mutex; 12383d4775dfSThomas Gleixner unsigned int futex_state; 123942b2dd0aSAlexey Dobriyan #endif 1240cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS 1241bd275681SPeter Zijlstra struct perf_event_context *perf_event_ctxp; 1242cdd6c482SIngo Molnar struct mutex perf_event_mutex; 1243cdd6c482SIngo Molnar struct list_head perf_event_list; 1244a63eaf34SPaul Mackerras #endif 12458f47b187SThomas Gleixner #ifdef CONFIG_DEBUG_PREEMPT 12468f47b187SThomas Gleixner unsigned long preempt_disable_ip; 12478f47b187SThomas Gleixner #endif 1248c7aceabaSRichard Kennedy #ifdef CONFIG_NUMA 12495eca1c10SIngo Molnar /* Protected by alloc_lock: */ 12505eca1c10SIngo Molnar struct mempolicy *mempolicy; 125145816682SVlastimil Babka short il_prev; 1252207205a2SEric Dumazet short pref_node_fork; 1253c7aceabaSRichard Kennedy #endif 1254cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING 1255cbee9f88SPeter Zijlstra int numa_scan_seq; 1256cbee9f88SPeter Zijlstra unsigned int numa_scan_period; 1257598f0ec0SMel Gorman unsigned int numa_scan_period_max; 1258de1c9ce6SRik van Riel int numa_preferred_nid; 12596b9a7460SMel Gorman unsigned long numa_migrate_retry; 12605eca1c10SIngo Molnar /* Migration stamp: */ 12615eca1c10SIngo Molnar u64 node_stamp; 12627e2703e6SRik van Riel u64 last_task_numa_placement; 12637e2703e6SRik van Riel u64 last_sum_exec_runtime; 1264cbee9f88SPeter Zijlstra struct callback_head numa_work; 1265f809ca9aSMel Gorman 1266cb361d8cSJann Horn /* 1267cb361d8cSJann Horn * This pointer is only modified for current in syscall and 1268cb361d8cSJann Horn * pagefault context (and for tasks being destroyed), so it can be read 1269cb361d8cSJann Horn * from any of the following contexts: 1270cb361d8cSJann Horn * - RCU read-side critical section 1271cb361d8cSJann Horn * - current->numa_group from everywhere 1272cb361d8cSJann Horn * - task's runqueue locked, task not running 1273cb361d8cSJann Horn */ 1274cb361d8cSJann Horn struct numa_group __rcu *numa_group; 12758c8a743cSPeter Zijlstra 1276745d6147SMel Gorman /* 127744dba3d5SIulia Manda * numa_faults is an array split into four regions: 127844dba3d5SIulia Manda * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer 127944dba3d5SIulia Manda * in this precise order. 128044dba3d5SIulia Manda * 128144dba3d5SIulia Manda * faults_memory: Exponential decaying average of faults on a per-node 128244dba3d5SIulia Manda * basis. Scheduling placement decisions are made based on these 128344dba3d5SIulia Manda * counts. The values remain static for the duration of a PTE scan. 128444dba3d5SIulia Manda * faults_cpu: Track the nodes the process was running on when a NUMA 128544dba3d5SIulia Manda * hinting fault was incurred. 128644dba3d5SIulia Manda * faults_memory_buffer and faults_cpu_buffer: Record faults per node 128744dba3d5SIulia Manda * during the current scan window. When the scan completes, the counts 128844dba3d5SIulia Manda * in faults_memory and faults_cpu decay and these values are copied. 1289745d6147SMel Gorman */ 129044dba3d5SIulia Manda unsigned long *numa_faults; 129183e1d2cdSMel Gorman unsigned long total_numa_faults; 1292745d6147SMel Gorman 1293745d6147SMel Gorman /* 129404bb2f94SRik van Riel * numa_faults_locality tracks if faults recorded during the last 1295074c2381SMel Gorman * scan window were remote/local or failed to migrate. The task scan 1296074c2381SMel Gorman * period is adapted based on the locality of the faults with different 1297074c2381SMel Gorman * weights depending on whether they were shared or private faults 129804bb2f94SRik van Riel */ 1299074c2381SMel Gorman unsigned long numa_faults_locality[3]; 130004bb2f94SRik van Riel 1301b32e86b4SIngo Molnar unsigned long numa_pages_migrated; 1302cbee9f88SPeter Zijlstra #endif /* CONFIG_NUMA_BALANCING */ 1303cbee9f88SPeter Zijlstra 1304d7822b1eSMathieu Desnoyers #ifdef CONFIG_RSEQ 1305d7822b1eSMathieu Desnoyers struct rseq __user *rseq; 1306ee3e3ac0SMathieu Desnoyers u32 rseq_len; 1307d7822b1eSMathieu Desnoyers u32 rseq_sig; 1308d7822b1eSMathieu Desnoyers /* 1309d7822b1eSMathieu Desnoyers * RmW on rseq_event_mask must be performed atomically 1310d7822b1eSMathieu Desnoyers * with respect to preemption. 1311d7822b1eSMathieu Desnoyers */ 1312d7822b1eSMathieu Desnoyers unsigned long rseq_event_mask; 1313d7822b1eSMathieu Desnoyers #endif 1314d7822b1eSMathieu Desnoyers 1315af7f588dSMathieu Desnoyers #ifdef CONFIG_SCHED_MM_CID 1316af7f588dSMathieu Desnoyers int mm_cid; /* Current cid in mm */ 1317af7f588dSMathieu Desnoyers int mm_cid_active; /* Whether cid bitmap is active */ 1318af7f588dSMathieu Desnoyers #endif 1319af7f588dSMathieu Desnoyers 132072b252aeSMel Gorman struct tlbflush_unmap_batch tlb_ubc; 132172b252aeSMel Gorman 13223fbd7ee2SEric W. Biederman union { 13233fbd7ee2SEric W. Biederman refcount_t rcu_users; 1324e56d0903SIngo Molnar struct rcu_head rcu; 13253fbd7ee2SEric W. Biederman }; 1326b92ce558SJens Axboe 13275eca1c10SIngo Molnar /* Cache last used pipe for splice(): */ 1328b92ce558SJens Axboe struct pipe_inode_info *splice_pipe; 13295640f768SEric Dumazet 13305640f768SEric Dumazet struct page_frag task_frag; 13315640f768SEric Dumazet 1332ca74e92bSShailabh Nagar #ifdef CONFIG_TASK_DELAY_ACCT 1333ca74e92bSShailabh Nagar struct task_delay_info *delays; 1334ca74e92bSShailabh Nagar #endif 133547913d4eSIngo Molnar 1336f4f154fdSAkinobu Mita #ifdef CONFIG_FAULT_INJECTION 1337f4f154fdSAkinobu Mita int make_it_fail; 13389049f2f6SAkinobu Mita unsigned int fail_nth; 1339f4f154fdSAkinobu Mita #endif 13409d823e8fSWu Fengguang /* 13415eca1c10SIngo Molnar * When (nr_dirtied >= nr_dirtied_pause), it's time to call 13425eca1c10SIngo Molnar * balance_dirty_pages() for a dirty throttling pause: 13439d823e8fSWu Fengguang */ 13449d823e8fSWu Fengguang int nr_dirtied; 13459d823e8fSWu Fengguang int nr_dirtied_pause; 13465eca1c10SIngo Molnar /* Start of a write-and-pause period: */ 13475eca1c10SIngo Molnar unsigned long dirty_paused_when; 13489d823e8fSWu Fengguang 13499745512cSArjan van de Ven #ifdef CONFIG_LATENCYTOP 13509745512cSArjan van de Ven int latency_record_count; 13519745512cSArjan van de Ven struct latency_record latency_record[LT_SAVECOUNT]; 13529745512cSArjan van de Ven #endif 13536976675dSArjan van de Ven /* 13545eca1c10SIngo Molnar * Time slack values; these are used to round up poll() and 13556976675dSArjan van de Ven * select() etc timeout values. These are in nanoseconds. 13566976675dSArjan van de Ven */ 1357da8b44d5SJohn Stultz u64 timer_slack_ns; 1358da8b44d5SJohn Stultz u64 default_timer_slack_ns; 1359f8d570a4SDavid Miller 1360d73b4936SAndrey Konovalov #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 13610b24beccSAndrey Ryabinin unsigned int kasan_depth; 13620b24beccSAndrey Ryabinin #endif 136392c209acSMarco Elver 1364dfd402a4SMarco Elver #ifdef CONFIG_KCSAN 1365dfd402a4SMarco Elver struct kcsan_ctx kcsan_ctx; 136692c209acSMarco Elver #ifdef CONFIG_TRACE_IRQFLAGS 136792c209acSMarco Elver struct irqtrace_events kcsan_save_irqtrace; 136892c209acSMarco Elver #endif 136969562e49SMarco Elver #ifdef CONFIG_KCSAN_WEAK_MEMORY 137069562e49SMarco Elver int kcsan_stack_depth; 137169562e49SMarco Elver #endif 1372dfd402a4SMarco Elver #endif 13735eca1c10SIngo Molnar 1374f80be457SAlexander Potapenko #ifdef CONFIG_KMSAN 1375f80be457SAlexander Potapenko struct kmsan_ctx kmsan_ctx; 1376f80be457SAlexander Potapenko #endif 1377f80be457SAlexander Potapenko 1378393824f6SPatricia Alfonso #if IS_ENABLED(CONFIG_KUNIT) 1379393824f6SPatricia Alfonso struct kunit *kunit_test; 1380393824f6SPatricia Alfonso #endif 1381393824f6SPatricia Alfonso 1382fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER 13835eca1c10SIngo Molnar /* Index of current stored address in ret_stack: */ 1384f201ae23SFrederic Weisbecker int curr_ret_stack; 138539eb456dSSteven Rostedt (VMware) int curr_ret_depth; 13865eca1c10SIngo Molnar 13875eca1c10SIngo Molnar /* Stack of return addresses for return function tracing: */ 1388f201ae23SFrederic Weisbecker struct ftrace_ret_stack *ret_stack; 13895eca1c10SIngo Molnar 13905eca1c10SIngo Molnar /* Timestamp for last schedule: */ 13918aef2d28SSteven Rostedt unsigned long long ftrace_timestamp; 13925eca1c10SIngo Molnar 1393f201ae23SFrederic Weisbecker /* 1394f201ae23SFrederic Weisbecker * Number of functions that haven't been traced 13955eca1c10SIngo Molnar * because of depth overrun: 1396f201ae23SFrederic Weisbecker */ 1397f201ae23SFrederic Weisbecker atomic_t trace_overrun; 13985eca1c10SIngo Molnar 13995eca1c10SIngo Molnar /* Pause tracing: */ 1400380c4b14SFrederic Weisbecker atomic_t tracing_graph_pause; 1401f201ae23SFrederic Weisbecker #endif 14025eca1c10SIngo Molnar 1403ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING 14045eca1c10SIngo Molnar /* Bitmask and counter of trace recursion: */ 1405261842b7SSteven Rostedt unsigned long trace_recursion; 1406261842b7SSteven Rostedt #endif /* CONFIG_TRACING */ 14075eca1c10SIngo Molnar 14085c9a8750SDmitry Vyukov #ifdef CONFIG_KCOV 1409eec028c9SAndrey Konovalov /* See kernel/kcov.c for more details. */ 1410eec028c9SAndrey Konovalov 14115eca1c10SIngo Molnar /* Coverage collection mode enabled for this task (0 if disabled): */ 14120ed557aaSMark Rutland unsigned int kcov_mode; 14135eca1c10SIngo Molnar 14145eca1c10SIngo Molnar /* Size of the kcov_area: */ 14155eca1c10SIngo Molnar unsigned int kcov_size; 14165eca1c10SIngo Molnar 14175eca1c10SIngo Molnar /* Buffer for coverage collection: */ 14185c9a8750SDmitry Vyukov void *kcov_area; 14195eca1c10SIngo Molnar 14205eca1c10SIngo Molnar /* KCOV descriptor wired with this task or NULL: */ 14215c9a8750SDmitry Vyukov struct kcov *kcov; 1422eec028c9SAndrey Konovalov 1423eec028c9SAndrey Konovalov /* KCOV common handle for remote coverage collection: */ 1424eec028c9SAndrey Konovalov u64 kcov_handle; 1425eec028c9SAndrey Konovalov 1426eec028c9SAndrey Konovalov /* KCOV sequence number: */ 1427eec028c9SAndrey Konovalov int kcov_sequence; 14285ff3b30aSAndrey Konovalov 14295ff3b30aSAndrey Konovalov /* Collect coverage from softirq context: */ 14305ff3b30aSAndrey Konovalov unsigned int kcov_softirq; 14315c9a8750SDmitry Vyukov #endif 14325eca1c10SIngo Molnar 14336f185c29SVladimir Davydov #ifdef CONFIG_MEMCG 1434626ebc41STejun Heo struct mem_cgroup *memcg_in_oom; 1435626ebc41STejun Heo gfp_t memcg_oom_gfp_mask; 1436626ebc41STejun Heo int memcg_oom_order; 1437b23afb93STejun Heo 14385eca1c10SIngo Molnar /* Number of pages to reclaim on returning to userland: */ 1439b23afb93STejun Heo unsigned int memcg_nr_pages_over_high; 1440d46eb14bSShakeel Butt 1441d46eb14bSShakeel Butt /* Used by memcontrol for targeted memcg charge: */ 1442d46eb14bSShakeel Butt struct mem_cgroup *active_memcg; 1443569b846dSKAMEZAWA Hiroyuki #endif 14445eca1c10SIngo Molnar 1445d09d8df3SJosef Bacik #ifdef CONFIG_BLK_CGROUP 1446f05837edSChristoph Hellwig struct gendisk *throttle_disk; 1447d09d8df3SJosef Bacik #endif 1448d09d8df3SJosef Bacik 14490326f5a9SSrikar Dronamraju #ifdef CONFIG_UPROBES 14500326f5a9SSrikar Dronamraju struct uprobe_task *utask; 14510326f5a9SSrikar Dronamraju #endif 1452cafe5635SKent Overstreet #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) 1453cafe5635SKent Overstreet unsigned int sequential_io; 1454cafe5635SKent Overstreet unsigned int sequential_io_avg; 1455cafe5635SKent Overstreet #endif 14565fbda3ecSThomas Gleixner struct kmap_ctrl kmap_ctrl; 14578eb23b9fSPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 14588eb23b9fSPeter Zijlstra unsigned long task_state_change; 14595f220be2SThomas Gleixner # ifdef CONFIG_PREEMPT_RT 14605f220be2SThomas Gleixner unsigned long saved_state_change; 14615f220be2SThomas Gleixner # endif 14628eb23b9fSPeter Zijlstra #endif 14638bcbde54SDavid Hildenbrand int pagefault_disabled; 146403049269SMichal Hocko #ifdef CONFIG_MMU 146529c696e1SVladimir Davydov struct task_struct *oom_reaper_list; 1466e4a38402SNico Pache struct timer_list oom_reaper_timer; 146703049269SMichal Hocko #endif 1468ba14a194SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 1469ba14a194SAndy Lutomirski struct vm_struct *stack_vm_area; 1470ba14a194SAndy Lutomirski #endif 147168f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 14725eca1c10SIngo Molnar /* A live task holds one reference: */ 1473f0b89d39SElena Reshetova refcount_t stack_refcount; 147468f24b08SAndy Lutomirski #endif 1475d83a7cb3SJosh Poimboeuf #ifdef CONFIG_LIVEPATCH 1476d83a7cb3SJosh Poimboeuf int patch_state; 1477d83a7cb3SJosh Poimboeuf #endif 1478e4e55b47STetsuo Handa #ifdef CONFIG_SECURITY 1479e4e55b47STetsuo Handa /* Used by LSM modules for access restriction: */ 1480e4e55b47STetsuo Handa void *security; 1481e4e55b47STetsuo Handa #endif 1482a10787e6SSong Liu #ifdef CONFIG_BPF_SYSCALL 1483a10787e6SSong Liu /* Used by BPF task local storage */ 1484a10787e6SSong Liu struct bpf_local_storage __rcu *bpf_storage; 1485c7603cfaSAndrii Nakryiko /* Used for BPF run context */ 1486c7603cfaSAndrii Nakryiko struct bpf_run_ctx *bpf_ctx; 1487a10787e6SSong Liu #endif 148829e48ce8SKees Cook 1489afaef01cSAlexander Popov #ifdef CONFIG_GCC_PLUGIN_STACKLEAK 1490afaef01cSAlexander Popov unsigned long lowest_stack; 1491c8d12627SAlexander Popov unsigned long prev_lowest_stack; 1492afaef01cSAlexander Popov #endif 1493afaef01cSAlexander Popov 14945567d11cSPeter Zijlstra #ifdef CONFIG_X86_MCE 1495c0ab7ffcSTony Luck void __user *mce_vaddr; 1496c0ab7ffcSTony Luck __u64 mce_kflags; 14975567d11cSPeter Zijlstra u64 mce_addr; 149817fae129STony Luck __u64 mce_ripv : 1, 149917fae129STony Luck mce_whole_page : 1, 150017fae129STony Luck __mce_reserved : 62; 15015567d11cSPeter Zijlstra struct callback_head mce_kill_me; 150281065b35STony Luck int mce_count; 15035567d11cSPeter Zijlstra #endif 15045567d11cSPeter Zijlstra 1505d741bf41SPeter Zijlstra #ifdef CONFIG_KRETPROBES 1506d741bf41SPeter Zijlstra struct llist_head kretprobe_instances; 1507d741bf41SPeter Zijlstra #endif 150854ecbe6fSMasami Hiramatsu #ifdef CONFIG_RETHOOK 150954ecbe6fSMasami Hiramatsu struct llist_head rethooks; 151054ecbe6fSMasami Hiramatsu #endif 1511d741bf41SPeter Zijlstra 151258e106e7SBalbir Singh #ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH 151358e106e7SBalbir Singh /* 151458e106e7SBalbir Singh * If L1D flush is supported on mm context switch 151558e106e7SBalbir Singh * then we use this callback head to queue kill work 151658e106e7SBalbir Singh * to kill tasks that are not running on SMT disabled 151758e106e7SBalbir Singh * cores 151858e106e7SBalbir Singh */ 151958e106e7SBalbir Singh struct callback_head l1d_flush_kill; 152058e106e7SBalbir Singh #endif 152158e106e7SBalbir Singh 1522102227b9SDaniel Bristot de Oliveira #ifdef CONFIG_RV 1523102227b9SDaniel Bristot de Oliveira /* 1524102227b9SDaniel Bristot de Oliveira * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS. 1525102227b9SDaniel Bristot de Oliveira * If we find justification for more monitors, we can think 1526102227b9SDaniel Bristot de Oliveira * about adding more or developing a dynamic method. So far, 1527102227b9SDaniel Bristot de Oliveira * none of these are justified. 1528102227b9SDaniel Bristot de Oliveira */ 1529102227b9SDaniel Bristot de Oliveira union rv_task_monitor rv[RV_PER_TASK_MONITORS]; 1530102227b9SDaniel Bristot de Oliveira #endif 1531102227b9SDaniel Bristot de Oliveira 1532*fd593511SBeau Belgrave #ifdef CONFIG_USER_EVENTS 1533*fd593511SBeau Belgrave struct user_event_mm *user_event_mm; 1534*fd593511SBeau Belgrave #endif 1535*fd593511SBeau Belgrave 153629e48ce8SKees Cook /* 153729e48ce8SKees Cook * New fields for task_struct should be added above here, so that 153829e48ce8SKees Cook * they are included in the randomized portion of task_struct. 153929e48ce8SKees Cook */ 154029e48ce8SKees Cook randomized_struct_fields_end 154129e48ce8SKees Cook 15425eca1c10SIngo Molnar /* CPU-specific state of this task: */ 15430c8c0f03SDave Hansen struct thread_struct thread; 15445eca1c10SIngo Molnar 15450c8c0f03SDave Hansen /* 15460c8c0f03SDave Hansen * WARNING: on x86, 'thread_struct' contains a variable-sized 15470c8c0f03SDave Hansen * structure. It *MUST* be at the end of 'task_struct'. 15480c8c0f03SDave Hansen * 15490c8c0f03SDave Hansen * Do not put anything below here! 15500c8c0f03SDave Hansen */ 15511da177e4SLinus Torvalds }; 15521da177e4SLinus Torvalds 1553e868171aSAlexey Dobriyan static inline struct pid *task_pid(struct task_struct *task) 155422c935f4SEric W. Biederman { 15552c470475SEric W. Biederman return task->thread_pid; 155622c935f4SEric W. Biederman } 155722c935f4SEric W. Biederman 15587af57294SPavel Emelyanov /* 15597af57294SPavel Emelyanov * the helpers to get the task's different pids as they are seen 15607af57294SPavel Emelyanov * from various namespaces 15617af57294SPavel Emelyanov * 15627af57294SPavel Emelyanov * task_xid_nr() : global id, i.e. the id seen from the init namespace; 156344c4e1b2SEric W. Biederman * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of 156444c4e1b2SEric W. Biederman * current. 15657af57294SPavel Emelyanov * task_xid_nr_ns() : id seen from the ns specified; 15667af57294SPavel Emelyanov * 15677af57294SPavel Emelyanov * see also pid_nr() etc in include/linux/pid.h 15687af57294SPavel Emelyanov */ 15695eca1c10SIngo Molnar pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); 15707af57294SPavel Emelyanov 1571e868171aSAlexey Dobriyan static inline pid_t task_pid_nr(struct task_struct *tsk) 15727af57294SPavel Emelyanov { 15737af57294SPavel Emelyanov return tsk->pid; 15747af57294SPavel Emelyanov } 15757af57294SPavel Emelyanov 15765eca1c10SIngo Molnar static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 157752ee2dfdSOleg Nesterov { 157852ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); 157952ee2dfdSOleg Nesterov } 15807af57294SPavel Emelyanov 15817af57294SPavel Emelyanov static inline pid_t task_pid_vnr(struct task_struct *tsk) 15827af57294SPavel Emelyanov { 158352ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); 15847af57294SPavel Emelyanov } 15857af57294SPavel Emelyanov 15867af57294SPavel Emelyanov 1587e868171aSAlexey Dobriyan static inline pid_t task_tgid_nr(struct task_struct *tsk) 15887af57294SPavel Emelyanov { 15897af57294SPavel Emelyanov return tsk->tgid; 15907af57294SPavel Emelyanov } 15917af57294SPavel Emelyanov 15925eca1c10SIngo Molnar /** 15935eca1c10SIngo Molnar * pid_alive - check that a task structure is not stale 15945eca1c10SIngo Molnar * @p: Task structure to be checked. 15955eca1c10SIngo Molnar * 15965eca1c10SIngo Molnar * Test if a process is not yet dead (at most zombie state) 15975eca1c10SIngo Molnar * If pid_alive fails, then pointers within the task structure 15985eca1c10SIngo Molnar * can be stale and must not be dereferenced. 15995eca1c10SIngo Molnar * 16005eca1c10SIngo Molnar * Return: 1 if the process is alive. 0 otherwise. 16015eca1c10SIngo Molnar */ 16025eca1c10SIngo Molnar static inline int pid_alive(const struct task_struct *p) 16035eca1c10SIngo Molnar { 16042c470475SEric W. Biederman return p->thread_pid != NULL; 16055eca1c10SIngo Molnar } 16067af57294SPavel Emelyanov 16075eca1c10SIngo Molnar static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 16087af57294SPavel Emelyanov { 160952ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); 16107af57294SPavel Emelyanov } 16117af57294SPavel Emelyanov 16127af57294SPavel Emelyanov static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 16137af57294SPavel Emelyanov { 161452ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); 16157af57294SPavel Emelyanov } 16167af57294SPavel Emelyanov 16177af57294SPavel Emelyanov 16185eca1c10SIngo Molnar static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 16197af57294SPavel Emelyanov { 162052ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); 16217af57294SPavel Emelyanov } 16227af57294SPavel Emelyanov 16237af57294SPavel Emelyanov static inline pid_t task_session_vnr(struct task_struct *tsk) 16247af57294SPavel Emelyanov { 162552ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); 16267af57294SPavel Emelyanov } 16277af57294SPavel Emelyanov 1628dd1c1f2fSOleg Nesterov static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1629dd1c1f2fSOleg Nesterov { 16306883f81aSEric W. Biederman return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns); 1631dd1c1f2fSOleg Nesterov } 1632dd1c1f2fSOleg Nesterov 1633dd1c1f2fSOleg Nesterov static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1634dd1c1f2fSOleg Nesterov { 16356883f81aSEric W. Biederman return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL); 1636dd1c1f2fSOleg Nesterov } 1637dd1c1f2fSOleg Nesterov 1638dd1c1f2fSOleg Nesterov static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) 1639dd1c1f2fSOleg Nesterov { 1640dd1c1f2fSOleg Nesterov pid_t pid = 0; 1641dd1c1f2fSOleg Nesterov 1642dd1c1f2fSOleg Nesterov rcu_read_lock(); 1643dd1c1f2fSOleg Nesterov if (pid_alive(tsk)) 1644dd1c1f2fSOleg Nesterov pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); 1645dd1c1f2fSOleg Nesterov rcu_read_unlock(); 1646dd1c1f2fSOleg Nesterov 1647dd1c1f2fSOleg Nesterov return pid; 1648dd1c1f2fSOleg Nesterov } 1649dd1c1f2fSOleg Nesterov 1650dd1c1f2fSOleg Nesterov static inline pid_t task_ppid_nr(const struct task_struct *tsk) 1651dd1c1f2fSOleg Nesterov { 1652dd1c1f2fSOleg Nesterov return task_ppid_nr_ns(tsk, &init_pid_ns); 1653dd1c1f2fSOleg Nesterov } 1654dd1c1f2fSOleg Nesterov 16555eca1c10SIngo Molnar /* Obsolete, do not use: */ 16561b0f7ffdSOleg Nesterov static inline pid_t task_pgrp_nr(struct task_struct *tsk) 16571b0f7ffdSOleg Nesterov { 16581b0f7ffdSOleg Nesterov return task_pgrp_nr_ns(tsk, &init_pid_ns); 16591b0f7ffdSOleg Nesterov } 16607af57294SPavel Emelyanov 166106eb6184SPeter Zijlstra #define TASK_REPORT_IDLE (TASK_REPORT + 1) 166206eb6184SPeter Zijlstra #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) 166306eb6184SPeter Zijlstra 1664fa2c3254SValentin Schneider static inline unsigned int __task_state_index(unsigned int tsk_state, 1665fa2c3254SValentin Schneider unsigned int tsk_exit_state) 166620435d84SXie XiuQi { 1667fa2c3254SValentin Schneider unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT; 166820435d84SXie XiuQi 166906eb6184SPeter Zijlstra BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); 167006eb6184SPeter Zijlstra 167106eb6184SPeter Zijlstra if (tsk_state == TASK_IDLE) 167206eb6184SPeter Zijlstra state = TASK_REPORT_IDLE; 167306eb6184SPeter Zijlstra 167425795ef6SValentin Schneider /* 167525795ef6SValentin Schneider * We're lying here, but rather than expose a completely new task state 167625795ef6SValentin Schneider * to userspace, we can make this appear as if the task has gone through 167725795ef6SValentin Schneider * a regular rt_mutex_lock() call. 167825795ef6SValentin Schneider */ 167925795ef6SValentin Schneider if (tsk_state == TASK_RTLOCK_WAIT) 168025795ef6SValentin Schneider state = TASK_UNINTERRUPTIBLE; 168125795ef6SValentin Schneider 16821593baabSPeter Zijlstra return fls(state); 16831593baabSPeter Zijlstra } 168420435d84SXie XiuQi 1685fa2c3254SValentin Schneider static inline unsigned int task_state_index(struct task_struct *tsk) 1686fa2c3254SValentin Schneider { 1687fa2c3254SValentin Schneider return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state); 1688fa2c3254SValentin Schneider } 1689fa2c3254SValentin Schneider 16901d48b080SPeter Zijlstra static inline char task_index_to_char(unsigned int state) 16911593baabSPeter Zijlstra { 16928ef9925bSPeter Zijlstra static const char state_char[] = "RSDTtXZPI"; 16931593baabSPeter Zijlstra 169406eb6184SPeter Zijlstra BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); 16951593baabSPeter Zijlstra 16961593baabSPeter Zijlstra return state_char[state]; 16971593baabSPeter Zijlstra } 16981593baabSPeter Zijlstra 16991593baabSPeter Zijlstra static inline char task_state_to_char(struct task_struct *tsk) 17001593baabSPeter Zijlstra { 17011d48b080SPeter Zijlstra return task_index_to_char(task_state_index(tsk)); 170220435d84SXie XiuQi } 170320435d84SXie XiuQi 17041da177e4SLinus Torvalds /** 1705570f5241SSergey Senozhatsky * is_global_init - check if a task structure is init. Since init 1706570f5241SSergey Senozhatsky * is free to have sub-threads we need to check tgid. 17073260259fSHenne * @tsk: Task structure to be checked. 17083260259fSHenne * 17093260259fSHenne * Check if a task structure is the first user space task the kernel created. 1710e69f6186SYacine Belkadi * 1711e69f6186SYacine Belkadi * Return: 1 if the task structure is init. 0 otherwise. 1712f400e198SSukadev Bhattiprolu */ 1713e868171aSAlexey Dobriyan static inline int is_global_init(struct task_struct *tsk) 1714b461cc03SPavel Emelyanov { 1715570f5241SSergey Senozhatsky return task_tgid_nr(tsk) == 1; 1716b461cc03SPavel Emelyanov } 1717b460cbc5SSerge E. Hallyn 17189ec52099SCedric Le Goater extern struct pid *cad_pid; 17199ec52099SCedric Le Goater 17201da177e4SLinus Torvalds /* 17211da177e4SLinus Torvalds * Per process flags 17221da177e4SLinus Torvalds */ 172301ccf592SSebastian Andrzej Siewior #define PF_VCPU 0x00000001 /* I'm a virtual CPU */ 1724c1de45caSPeter Zijlstra #define PF_IDLE 0x00000002 /* I am an IDLE thread */ 17255eca1c10SIngo Molnar #define PF_EXITING 0x00000004 /* Getting shut down */ 172692307383SEric W. Biederman #define PF_POSTCOREDUMP 0x00000008 /* Coredumps should ignore this task */ 172701ccf592SSebastian Andrzej Siewior #define PF_IO_WORKER 0x00000010 /* Task is an IO worker */ 172821aa9af0STejun Heo #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 17295eca1c10SIngo Molnar #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ 17305eca1c10SIngo Molnar #define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ 17315eca1c10SIngo Molnar #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ 17325eca1c10SIngo Molnar #define PF_DUMPCORE 0x00000200 /* Dumped core */ 17335eca1c10SIngo Molnar #define PF_SIGNALED 0x00000400 /* Killed by a signal */ 17341da177e4SLinus Torvalds #define PF_MEMALLOC 0x00000800 /* Allocating memory */ 17355eca1c10SIngo Molnar #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ 17365eca1c10SIngo Molnar #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ 1737fb04563dSPeter Zijlstra #define PF__HOLE__00004000 0x00004000 17385eca1c10SIngo Molnar #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ 1739fb04563dSPeter Zijlstra #define PF__HOLE__00010000 0x00010000 17407dea19f9SMichal Hocko #define PF_KSWAPD 0x00020000 /* I am kswapd */ 17417dea19f9SMichal Hocko #define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */ 17427dea19f9SMichal Hocko #define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */ 1743a37b0715SNeilBrown #define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to, 1744a37b0715SNeilBrown * I am cleaning dirty pages from some other bdi. */ 1745246bb0b1SOleg Nesterov #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 17465eca1c10SIngo Molnar #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ 1747fb04563dSPeter Zijlstra #define PF__HOLE__00800000 0x00800000 1748fb04563dSPeter Zijlstra #define PF__HOLE__01000000 0x01000000 1749fb04563dSPeter Zijlstra #define PF__HOLE__02000000 0x02000000 17503bd37062SSebastian Andrzej Siewior #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ 17514db96cf0SAndi Kleen #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 17521a08ae36SPavel Tatashin #define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */ 1753fb04563dSPeter Zijlstra #define PF__HOLE__20000000 0x20000000 1754fb04563dSPeter Zijlstra #define PF__HOLE__40000000 0x40000000 17555eca1c10SIngo Molnar #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ 17561da177e4SLinus Torvalds 17571da177e4SLinus Torvalds /* 17581da177e4SLinus Torvalds * Only the _current_ task can read/write to tsk->flags, but other 17591da177e4SLinus Torvalds * tasks can access tsk->flags in readonly mode for example 17601da177e4SLinus Torvalds * with tsk_used_math (like during threaded core dumping). 17611da177e4SLinus Torvalds * There is however an exception to this rule during ptrace 17621da177e4SLinus Torvalds * or during fork: the ptracer task is allowed to write to the 17631da177e4SLinus Torvalds * child->flags of its traced child (same goes for fork, the parent 17641da177e4SLinus Torvalds * can write to the child->flags), because we're guaranteed the 17651da177e4SLinus Torvalds * child is not running and in turn not changing child->flags 17661da177e4SLinus Torvalds * at the same time the parent does it. 17671da177e4SLinus Torvalds */ 17681da177e4SLinus Torvalds #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 17691da177e4SLinus Torvalds #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 17701da177e4SLinus Torvalds #define clear_used_math() clear_stopped_child_used_math(current) 17711da177e4SLinus Torvalds #define set_used_math() set_stopped_child_used_math(current) 17725eca1c10SIngo Molnar 17731da177e4SLinus Torvalds #define conditional_stopped_child_used_math(condition, child) \ 17741da177e4SLinus Torvalds do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 17755eca1c10SIngo Molnar 17765eca1c10SIngo Molnar #define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) 17775eca1c10SIngo Molnar 17781da177e4SLinus Torvalds #define copy_to_stopped_child_used_math(child) \ 17791da177e4SLinus Torvalds do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 17805eca1c10SIngo Molnar 17811da177e4SLinus Torvalds /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 17821da177e4SLinus Torvalds #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 17831da177e4SLinus Torvalds #define used_math() tsk_used_math(current) 17841da177e4SLinus Torvalds 178583d40a61SPeter Zijlstra static __always_inline bool is_percpu_thread(void) 178662ec05ddSThomas Gleixner { 178762ec05ddSThomas Gleixner #ifdef CONFIG_SMP 178862ec05ddSThomas Gleixner return (current->flags & PF_NO_SETAFFINITY) && 178962ec05ddSThomas Gleixner (current->nr_cpus_allowed == 1); 179062ec05ddSThomas Gleixner #else 179162ec05ddSThomas Gleixner return true; 179262ec05ddSThomas Gleixner #endif 179362ec05ddSThomas Gleixner } 179462ec05ddSThomas Gleixner 17951d4457f9SKees Cook /* Per-process atomic flags. */ 1796a2b86f77SZefan Li #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ 17972ad654bcSZefan Li #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ 17982ad654bcSZefan Li #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 1799356e4bffSThomas Gleixner #define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ 1800356e4bffSThomas Gleixner #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ 18019137bb27SThomas Gleixner #define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ 18029137bb27SThomas Gleixner #define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ 180371368af9SWaiman Long #define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */ 18041d4457f9SKees Cook 1805e0e5070bSZefan Li #define TASK_PFA_TEST(name, func) \ 1806e0e5070bSZefan Li static inline bool task_##func(struct task_struct *p) \ 1807e0e5070bSZefan Li { return test_bit(PFA_##name, &p->atomic_flags); } 18085eca1c10SIngo Molnar 1809e0e5070bSZefan Li #define TASK_PFA_SET(name, func) \ 1810e0e5070bSZefan Li static inline void task_set_##func(struct task_struct *p) \ 1811e0e5070bSZefan Li { set_bit(PFA_##name, &p->atomic_flags); } 18125eca1c10SIngo Molnar 1813e0e5070bSZefan Li #define TASK_PFA_CLEAR(name, func) \ 1814e0e5070bSZefan Li static inline void task_clear_##func(struct task_struct *p) \ 1815e0e5070bSZefan Li { clear_bit(PFA_##name, &p->atomic_flags); } 18161d4457f9SKees Cook 1817e0e5070bSZefan Li TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) 1818e0e5070bSZefan Li TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) 18191d4457f9SKees Cook 18202ad654bcSZefan Li TASK_PFA_TEST(SPREAD_PAGE, spread_page) 18212ad654bcSZefan Li TASK_PFA_SET(SPREAD_PAGE, spread_page) 18222ad654bcSZefan Li TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) 18232ad654bcSZefan Li 18242ad654bcSZefan Li TASK_PFA_TEST(SPREAD_SLAB, spread_slab) 18252ad654bcSZefan Li TASK_PFA_SET(SPREAD_SLAB, spread_slab) 18262ad654bcSZefan Li TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) 1827544b2c91STejun Heo 1828356e4bffSThomas Gleixner TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) 1829356e4bffSThomas Gleixner TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) 1830356e4bffSThomas Gleixner TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) 1831356e4bffSThomas Gleixner 183271368af9SWaiman Long TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec) 183371368af9SWaiman Long TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec) 183471368af9SWaiman Long TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec) 183571368af9SWaiman Long 1836356e4bffSThomas Gleixner TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 1837356e4bffSThomas Gleixner TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 1838356e4bffSThomas Gleixner 18399137bb27SThomas Gleixner TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable) 18409137bb27SThomas Gleixner TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable) 18419137bb27SThomas Gleixner TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable) 18429137bb27SThomas Gleixner 18439137bb27SThomas Gleixner TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) 18449137bb27SThomas Gleixner TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) 18459137bb27SThomas Gleixner 18465eca1c10SIngo Molnar static inline void 1847717a94b5SNeilBrown current_restore_flags(unsigned long orig_flags, unsigned long flags) 1848907aed48SMel Gorman { 1849717a94b5SNeilBrown current->flags &= ~flags; 1850717a94b5SNeilBrown current->flags |= orig_flags & flags; 1851907aed48SMel Gorman } 1852907aed48SMel Gorman 18535eca1c10SIngo Molnar extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 1854b6e8d40dSWaiman Long extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus); 18551da177e4SLinus Torvalds #ifdef CONFIG_SMP 18565eca1c10SIngo Molnar extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); 18575eca1c10SIngo Molnar extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); 1858b90ca8baSWill Deacon extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node); 1859b90ca8baSWill Deacon extern void release_user_cpus_ptr(struct task_struct *p); 1860234b8ab6SWill Deacon extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask); 186107ec77a1SWill Deacon extern void force_compatible_cpus_allowed_ptr(struct task_struct *p); 186207ec77a1SWill Deacon extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p); 18631da177e4SLinus Torvalds #else 18645eca1c10SIngo Molnar static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 18651e1b6c51SKOSAKI Motohiro { 18661e1b6c51SKOSAKI Motohiro } 18675eca1c10SIngo Molnar static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 18681da177e4SLinus Torvalds { 186996f874e2SRusty Russell if (!cpumask_test_cpu(0, new_mask)) 18701da177e4SLinus Torvalds return -EINVAL; 18711da177e4SLinus Torvalds return 0; 18721da177e4SLinus Torvalds } 1873b90ca8baSWill Deacon static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node) 1874b90ca8baSWill Deacon { 1875b90ca8baSWill Deacon if (src->user_cpus_ptr) 1876b90ca8baSWill Deacon return -EINVAL; 1877b90ca8baSWill Deacon return 0; 1878b90ca8baSWill Deacon } 1879b90ca8baSWill Deacon static inline void release_user_cpus_ptr(struct task_struct *p) 1880b90ca8baSWill Deacon { 1881b90ca8baSWill Deacon WARN_ON(p->user_cpus_ptr); 1882b90ca8baSWill Deacon } 1883234b8ab6SWill Deacon 1884234b8ab6SWill Deacon static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 1885234b8ab6SWill Deacon { 1886234b8ab6SWill Deacon return 0; 1887234b8ab6SWill Deacon } 18881da177e4SLinus Torvalds #endif 1889e0ad9556SRusty Russell 1890fa93384fSDan Carpenter extern int yield_to(struct task_struct *p, bool preempt); 189136c8b586SIngo Molnar extern void set_user_nice(struct task_struct *p, long nice); 189236c8b586SIngo Molnar extern int task_prio(const struct task_struct *p); 18935eca1c10SIngo Molnar 1894d0ea0268SDongsheng Yang /** 1895d0ea0268SDongsheng Yang * task_nice - return the nice value of a given task. 1896d0ea0268SDongsheng Yang * @p: the task in question. 1897d0ea0268SDongsheng Yang * 1898d0ea0268SDongsheng Yang * Return: The nice value [ -20 ... 0 ... 19 ]. 1899d0ea0268SDongsheng Yang */ 1900d0ea0268SDongsheng Yang static inline int task_nice(const struct task_struct *p) 1901d0ea0268SDongsheng Yang { 1902d0ea0268SDongsheng Yang return PRIO_TO_NICE((p)->static_prio); 1903d0ea0268SDongsheng Yang } 19045eca1c10SIngo Molnar 190536c8b586SIngo Molnar extern int can_nice(const struct task_struct *p, const int nice); 190636c8b586SIngo Molnar extern int task_curr(const struct task_struct *p); 19071da177e4SLinus Torvalds extern int idle_cpu(int cpu); 1908943d355dSRohit Jain extern int available_idle_cpu(int cpu); 19095eca1c10SIngo Molnar extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); 19105eca1c10SIngo Molnar extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); 19118b700983SPeter Zijlstra extern void sched_set_fifo(struct task_struct *p); 19128b700983SPeter Zijlstra extern void sched_set_fifo_low(struct task_struct *p); 19138b700983SPeter Zijlstra extern void sched_set_normal(struct task_struct *p, int nice); 19145eca1c10SIngo Molnar extern int sched_setattr(struct task_struct *, const struct sched_attr *); 1915794a56ebSJuri Lelli extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); 191636c8b586SIngo Molnar extern struct task_struct *idle_task(int cpu); 19175eca1c10SIngo Molnar 1918c4f30608SPaul E. McKenney /** 1919c4f30608SPaul E. McKenney * is_idle_task - is the specified task an idle task? 1920fa757281SRandy Dunlap * @p: the task in question. 1921e69f6186SYacine Belkadi * 1922e69f6186SYacine Belkadi * Return: 1 if @p is an idle task. 0 otherwise. 1923c4f30608SPaul E. McKenney */ 1924c94a88f3SMarco Elver static __always_inline bool is_idle_task(const struct task_struct *p) 1925c4f30608SPaul E. McKenney { 1926c1de45caSPeter Zijlstra return !!(p->flags & PF_IDLE); 1927c4f30608SPaul E. McKenney } 19285eca1c10SIngo Molnar 192936c8b586SIngo Molnar extern struct task_struct *curr_task(int cpu); 1930a458ae2eSPeter Zijlstra extern void ia64_set_curr_task(int cpu, struct task_struct *p); 19311da177e4SLinus Torvalds 19321da177e4SLinus Torvalds void yield(void); 19331da177e4SLinus Torvalds 19341da177e4SLinus Torvalds union thread_union { 19350500871fSDavid Howells #ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK 19360500871fSDavid Howells struct task_struct task; 19370500871fSDavid Howells #endif 1938c65eacbeSAndy Lutomirski #ifndef CONFIG_THREAD_INFO_IN_TASK 19391da177e4SLinus Torvalds struct thread_info thread_info; 1940c65eacbeSAndy Lutomirski #endif 19411da177e4SLinus Torvalds unsigned long stack[THREAD_SIZE/sizeof(long)]; 19421da177e4SLinus Torvalds }; 19431da177e4SLinus Torvalds 19440500871fSDavid Howells #ifndef CONFIG_THREAD_INFO_IN_TASK 19450500871fSDavid Howells extern struct thread_info init_thread_info; 19460500871fSDavid Howells #endif 19470500871fSDavid Howells 19480500871fSDavid Howells extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)]; 19490500871fSDavid Howells 1950f3ac6067SIngo Molnar #ifdef CONFIG_THREAD_INFO_IN_TASK 1951bcf9033eSArd Biesheuvel # define task_thread_info(task) (&(task)->thread_info) 1952f3ac6067SIngo Molnar #elif !defined(__HAVE_THREAD_FUNCTIONS) 1953f3ac6067SIngo Molnar # define task_thread_info(task) ((struct thread_info *)(task)->stack) 1954f3ac6067SIngo Molnar #endif 1955f3ac6067SIngo Molnar 1956198fe21bSPavel Emelyanov /* 1957198fe21bSPavel Emelyanov * find a task by one of its numerical ids 1958198fe21bSPavel Emelyanov * 1959198fe21bSPavel Emelyanov * find_task_by_pid_ns(): 1960198fe21bSPavel Emelyanov * finds a task by its pid in the specified namespace 1961228ebcbeSPavel Emelyanov * find_task_by_vpid(): 1962228ebcbeSPavel Emelyanov * finds a task by its virtual pid 1963198fe21bSPavel Emelyanov * 1964e49859e7SPavel Emelyanov * see also find_vpid() etc in include/linux/pid.h 1965198fe21bSPavel Emelyanov */ 1966198fe21bSPavel Emelyanov 1967228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_vpid(pid_t nr); 19685eca1c10SIngo Molnar extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); 1969198fe21bSPavel Emelyanov 19702ee08260SMike Rapoport /* 19712ee08260SMike Rapoport * find a task by its virtual pid and get the task struct 19722ee08260SMike Rapoport */ 19732ee08260SMike Rapoport extern struct task_struct *find_get_task_by_vpid(pid_t nr); 19742ee08260SMike Rapoport 1975b3c97528SHarvey Harrison extern int wake_up_state(struct task_struct *tsk, unsigned int state); 1976b3c97528SHarvey Harrison extern int wake_up_process(struct task_struct *tsk); 19773e51e3edSSamir Bellabes extern void wake_up_new_task(struct task_struct *tsk); 19785eca1c10SIngo Molnar 19791da177e4SLinus Torvalds #ifdef CONFIG_SMP 19801da177e4SLinus Torvalds extern void kick_process(struct task_struct *tsk); 19811da177e4SLinus Torvalds #else 19821da177e4SLinus Torvalds static inline void kick_process(struct task_struct *tsk) { } 19831da177e4SLinus Torvalds #endif 19841da177e4SLinus Torvalds 198582b89778SAdrian Hunter extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); 19865eca1c10SIngo Molnar 198782b89778SAdrian Hunter static inline void set_task_comm(struct task_struct *tsk, const char *from) 198882b89778SAdrian Hunter { 198982b89778SAdrian Hunter __set_task_comm(tsk, from, false); 199082b89778SAdrian Hunter } 19915eca1c10SIngo Molnar 19923756f640SArnd Bergmann extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); 19933756f640SArnd Bergmann #define get_task_comm(buf, tsk) ({ \ 19943756f640SArnd Bergmann BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \ 19953756f640SArnd Bergmann __get_task_comm(buf, sizeof(buf), tsk); \ 19963756f640SArnd Bergmann }) 19971da177e4SLinus Torvalds 19981da177e4SLinus Torvalds #ifdef CONFIG_SMP 19992a0a24ebSThomas Gleixner static __always_inline void scheduler_ipi(void) 20002a0a24ebSThomas Gleixner { 20012a0a24ebSThomas Gleixner /* 20022a0a24ebSThomas Gleixner * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting 20032a0a24ebSThomas Gleixner * TIF_NEED_RESCHED remotely (for the first time) will also send 20042a0a24ebSThomas Gleixner * this IPI. 20052a0a24ebSThomas Gleixner */ 20062a0a24ebSThomas Gleixner preempt_fold_need_resched(); 20072a0a24ebSThomas Gleixner } 20082f064a59SPeter Zijlstra extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state); 20091da177e4SLinus Torvalds #else 2010184748ccSPeter Zijlstra static inline void scheduler_ipi(void) { } 20112f064a59SPeter Zijlstra static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) 201285ba2d86SRoland McGrath { 201385ba2d86SRoland McGrath return 1; 201485ba2d86SRoland McGrath } 20151da177e4SLinus Torvalds #endif 20161da177e4SLinus Torvalds 20175eca1c10SIngo Molnar /* 20185eca1c10SIngo Molnar * Set thread flags in other task's structures. 20195eca1c10SIngo Molnar * See asm/thread_info.h for TIF_xxxx flags available: 20201da177e4SLinus Torvalds */ 20211da177e4SLinus Torvalds static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 20221da177e4SLinus Torvalds { 2023a1261f54SAl Viro set_ti_thread_flag(task_thread_info(tsk), flag); 20241da177e4SLinus Torvalds } 20251da177e4SLinus Torvalds 20261da177e4SLinus Torvalds static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 20271da177e4SLinus Torvalds { 2028a1261f54SAl Viro clear_ti_thread_flag(task_thread_info(tsk), flag); 20291da177e4SLinus Torvalds } 20301da177e4SLinus Torvalds 203193ee37c2SDave Martin static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag, 203293ee37c2SDave Martin bool value) 203393ee37c2SDave Martin { 203493ee37c2SDave Martin update_ti_thread_flag(task_thread_info(tsk), flag, value); 203593ee37c2SDave Martin } 203693ee37c2SDave Martin 20371da177e4SLinus Torvalds static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 20381da177e4SLinus Torvalds { 2039a1261f54SAl Viro return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 20401da177e4SLinus Torvalds } 20411da177e4SLinus Torvalds 20421da177e4SLinus Torvalds static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 20431da177e4SLinus Torvalds { 2044a1261f54SAl Viro return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 20451da177e4SLinus Torvalds } 20461da177e4SLinus Torvalds 20471da177e4SLinus Torvalds static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 20481da177e4SLinus Torvalds { 2049a1261f54SAl Viro return test_ti_thread_flag(task_thread_info(tsk), flag); 20501da177e4SLinus Torvalds } 20511da177e4SLinus Torvalds 20521da177e4SLinus Torvalds static inline void set_tsk_need_resched(struct task_struct *tsk) 20531da177e4SLinus Torvalds { 20541da177e4SLinus Torvalds set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 20551da177e4SLinus Torvalds } 20561da177e4SLinus Torvalds 20571da177e4SLinus Torvalds static inline void clear_tsk_need_resched(struct task_struct *tsk) 20581da177e4SLinus Torvalds { 20591da177e4SLinus Torvalds clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 20601da177e4SLinus Torvalds } 20611da177e4SLinus Torvalds 20628ae121acSGregory Haskins static inline int test_tsk_need_resched(struct task_struct *tsk) 20638ae121acSGregory Haskins { 20648ae121acSGregory Haskins return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); 20658ae121acSGregory Haskins } 20668ae121acSGregory Haskins 20671da177e4SLinus Torvalds /* 20681da177e4SLinus Torvalds * cond_resched() and cond_resched_lock(): latency reduction via 20691da177e4SLinus Torvalds * explicit rescheduling in places that are safe. The return 20701da177e4SLinus Torvalds * value indicates whether a reschedule was done in fact. 20711da177e4SLinus Torvalds * cond_resched_lock() will drop the spinlock before scheduling, 20721da177e4SLinus Torvalds */ 2073b965f1ddSPeter Zijlstra (Intel) #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) 2074b965f1ddSPeter Zijlstra (Intel) extern int __cond_resched(void); 2075b965f1ddSPeter Zijlstra (Intel) 207699cf983cSMark Rutland #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 2077b965f1ddSPeter Zijlstra (Intel) 2078b965f1ddSPeter Zijlstra (Intel) DECLARE_STATIC_CALL(cond_resched, __cond_resched); 2079b965f1ddSPeter Zijlstra (Intel) 2080b965f1ddSPeter Zijlstra (Intel) static __always_inline int _cond_resched(void) 2081b965f1ddSPeter Zijlstra (Intel) { 2082ef72661eSPeter Zijlstra return static_call_mod(cond_resched)(); 2083b965f1ddSPeter Zijlstra (Intel) } 2084b965f1ddSPeter Zijlstra (Intel) 208599cf983cSMark Rutland #elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 208699cf983cSMark Rutland extern int dynamic_cond_resched(void); 208799cf983cSMark Rutland 208899cf983cSMark Rutland static __always_inline int _cond_resched(void) 208999cf983cSMark Rutland { 209099cf983cSMark Rutland return dynamic_cond_resched(); 209199cf983cSMark Rutland } 209299cf983cSMark Rutland 209335a773a0SPeter Zijlstra #else 2094b965f1ddSPeter Zijlstra (Intel) 2095b965f1ddSPeter Zijlstra (Intel) static inline int _cond_resched(void) 2096b965f1ddSPeter Zijlstra (Intel) { 2097b965f1ddSPeter Zijlstra (Intel) return __cond_resched(); 2098b965f1ddSPeter Zijlstra (Intel) } 2099b965f1ddSPeter Zijlstra (Intel) 2100b965f1ddSPeter Zijlstra (Intel) #endif /* CONFIG_PREEMPT_DYNAMIC */ 2101b965f1ddSPeter Zijlstra (Intel) 2102b965f1ddSPeter Zijlstra (Intel) #else 2103b965f1ddSPeter Zijlstra (Intel) 210435a773a0SPeter Zijlstra static inline int _cond_resched(void) { return 0; } 2105b965f1ddSPeter Zijlstra (Intel) 2106b965f1ddSPeter Zijlstra (Intel) #endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */ 21076f80bd98SFrederic Weisbecker 2108613afbf8SFrederic Weisbecker #define cond_resched() ({ \ 2109874f670eSThomas Gleixner __might_resched(__FILE__, __LINE__, 0); \ 2110613afbf8SFrederic Weisbecker _cond_resched(); \ 2111613afbf8SFrederic Weisbecker }) 21126f80bd98SFrederic Weisbecker 2113613afbf8SFrederic Weisbecker extern int __cond_resched_lock(spinlock_t *lock); 2114f3d4b4b1SBen Gardon extern int __cond_resched_rwlock_read(rwlock_t *lock); 2115f3d4b4b1SBen Gardon extern int __cond_resched_rwlock_write(rwlock_t *lock); 2116613afbf8SFrederic Weisbecker 211750e081b9SThomas Gleixner #define MIGHT_RESCHED_RCU_SHIFT 8 211850e081b9SThomas Gleixner #define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1) 211950e081b9SThomas Gleixner 21203e9cc688SThomas Gleixner #ifndef CONFIG_PREEMPT_RT 21213e9cc688SThomas Gleixner /* 21223e9cc688SThomas Gleixner * Non RT kernels have an elevated preempt count due to the held lock, 21233e9cc688SThomas Gleixner * but are not allowed to be inside a RCU read side critical section 21243e9cc688SThomas Gleixner */ 21253e9cc688SThomas Gleixner # define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET 21263e9cc688SThomas Gleixner #else 21273e9cc688SThomas Gleixner /* 21283e9cc688SThomas Gleixner * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in 21293e9cc688SThomas Gleixner * cond_resched*lock() has to take that into account because it checks for 21303e9cc688SThomas Gleixner * preempt_count() and rcu_preempt_depth(). 21313e9cc688SThomas Gleixner */ 21323e9cc688SThomas Gleixner # define PREEMPT_LOCK_RESCHED_OFFSETS \ 21333e9cc688SThomas Gleixner (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT)) 21343e9cc688SThomas Gleixner #endif 21353e9cc688SThomas Gleixner 2136613afbf8SFrederic Weisbecker #define cond_resched_lock(lock) ({ \ 21373e9cc688SThomas Gleixner __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \ 2138613afbf8SFrederic Weisbecker __cond_resched_lock(lock); \ 2139613afbf8SFrederic Weisbecker }) 2140613afbf8SFrederic Weisbecker 2141f3d4b4b1SBen Gardon #define cond_resched_rwlock_read(lock) ({ \ 21423e9cc688SThomas Gleixner __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \ 2143f3d4b4b1SBen Gardon __cond_resched_rwlock_read(lock); \ 2144f3d4b4b1SBen Gardon }) 2145f3d4b4b1SBen Gardon 2146f3d4b4b1SBen Gardon #define cond_resched_rwlock_write(lock) ({ \ 21473e9cc688SThomas Gleixner __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \ 2148f3d4b4b1SBen Gardon __cond_resched_rwlock_write(lock); \ 2149f3d4b4b1SBen Gardon }) 2150f3d4b4b1SBen Gardon 2151f6f3c437SSimon Horman static inline void cond_resched_rcu(void) 2152f6f3c437SSimon Horman { 2153f6f3c437SSimon Horman #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) 2154f6f3c437SSimon Horman rcu_read_unlock(); 2155f6f3c437SSimon Horman cond_resched(); 2156f6f3c437SSimon Horman rcu_read_lock(); 2157f6f3c437SSimon Horman #endif 2158f6f3c437SSimon Horman } 2159f6f3c437SSimon Horman 2160cfe43f47SValentin Schneider #ifdef CONFIG_PREEMPT_DYNAMIC 2161cfe43f47SValentin Schneider 2162cfe43f47SValentin Schneider extern bool preempt_model_none(void); 2163cfe43f47SValentin Schneider extern bool preempt_model_voluntary(void); 2164cfe43f47SValentin Schneider extern bool preempt_model_full(void); 2165cfe43f47SValentin Schneider 2166cfe43f47SValentin Schneider #else 2167cfe43f47SValentin Schneider 2168cfe43f47SValentin Schneider static inline bool preempt_model_none(void) 2169cfe43f47SValentin Schneider { 2170cfe43f47SValentin Schneider return IS_ENABLED(CONFIG_PREEMPT_NONE); 2171cfe43f47SValentin Schneider } 2172cfe43f47SValentin Schneider static inline bool preempt_model_voluntary(void) 2173cfe43f47SValentin Schneider { 2174cfe43f47SValentin Schneider return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY); 2175cfe43f47SValentin Schneider } 2176cfe43f47SValentin Schneider static inline bool preempt_model_full(void) 2177cfe43f47SValentin Schneider { 2178cfe43f47SValentin Schneider return IS_ENABLED(CONFIG_PREEMPT); 2179cfe43f47SValentin Schneider } 2180cfe43f47SValentin Schneider 2181cfe43f47SValentin Schneider #endif 2182cfe43f47SValentin Schneider 2183cfe43f47SValentin Schneider static inline bool preempt_model_rt(void) 2184cfe43f47SValentin Schneider { 2185cfe43f47SValentin Schneider return IS_ENABLED(CONFIG_PREEMPT_RT); 2186cfe43f47SValentin Schneider } 2187cfe43f47SValentin Schneider 2188cfe43f47SValentin Schneider /* 2189cfe43f47SValentin Schneider * Does the preemption model allow non-cooperative preemption? 2190cfe43f47SValentin Schneider * 2191cfe43f47SValentin Schneider * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with 2192cfe43f47SValentin Schneider * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the 2193cfe43f47SValentin Schneider * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the 2194cfe43f47SValentin Schneider * PREEMPT_NONE model. 2195cfe43f47SValentin Schneider */ 2196cfe43f47SValentin Schneider static inline bool preempt_model_preemptible(void) 2197cfe43f47SValentin Schneider { 2198cfe43f47SValentin Schneider return preempt_model_full() || preempt_model_rt(); 2199cfe43f47SValentin Schneider } 2200cfe43f47SValentin Schneider 22011da177e4SLinus Torvalds /* 22021da177e4SLinus Torvalds * Does a critical section need to be broken due to another 2203c1a280b6SThomas Gleixner * task waiting?: (technically does not depend on CONFIG_PREEMPTION, 220495c354feSNick Piggin * but a general need for low latency) 22051da177e4SLinus Torvalds */ 220695c354feSNick Piggin static inline int spin_needbreak(spinlock_t *lock) 22071da177e4SLinus Torvalds { 2208c1a280b6SThomas Gleixner #ifdef CONFIG_PREEMPTION 220995c354feSNick Piggin return spin_is_contended(lock); 221095c354feSNick Piggin #else 22111da177e4SLinus Torvalds return 0; 221295c354feSNick Piggin #endif 22131da177e4SLinus Torvalds } 22141da177e4SLinus Torvalds 2215a09a689aSBen Gardon /* 2216a09a689aSBen Gardon * Check if a rwlock is contended. 2217a09a689aSBen Gardon * Returns non-zero if there is another task waiting on the rwlock. 2218a09a689aSBen Gardon * Returns zero if the lock is not contended or the system / underlying 2219a09a689aSBen Gardon * rwlock implementation does not support contention detection. 2220a09a689aSBen Gardon * Technically does not depend on CONFIG_PREEMPTION, but a general need 2221a09a689aSBen Gardon * for low latency. 2222a09a689aSBen Gardon */ 2223a09a689aSBen Gardon static inline int rwlock_needbreak(rwlock_t *lock) 2224a09a689aSBen Gardon { 2225a09a689aSBen Gardon #ifdef CONFIG_PREEMPTION 2226a09a689aSBen Gardon return rwlock_is_contended(lock); 2227a09a689aSBen Gardon #else 2228a09a689aSBen Gardon return 0; 2229a09a689aSBen Gardon #endif 2230a09a689aSBen Gardon } 2231a09a689aSBen Gardon 223275f93fedSPeter Zijlstra static __always_inline bool need_resched(void) 223375f93fedSPeter Zijlstra { 223475f93fedSPeter Zijlstra return unlikely(tif_need_resched()); 223575f93fedSPeter Zijlstra } 223675f93fedSPeter Zijlstra 2237ee761f62SThomas Gleixner /* 22381da177e4SLinus Torvalds * Wrappers for p->thread_info->cpu access. No-op on UP. 22391da177e4SLinus Torvalds */ 22401da177e4SLinus Torvalds #ifdef CONFIG_SMP 22411da177e4SLinus Torvalds 22421da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p) 22431da177e4SLinus Torvalds { 2244c546951dSAndrea Parri return READ_ONCE(task_thread_info(p)->cpu); 22451da177e4SLinus Torvalds } 22461da177e4SLinus Torvalds 2247c65cc870SIngo Molnar extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 22481da177e4SLinus Torvalds 22491da177e4SLinus Torvalds #else 22501da177e4SLinus Torvalds 22511da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p) 22521da177e4SLinus Torvalds { 22531da177e4SLinus Torvalds return 0; 22541da177e4SLinus Torvalds } 22551da177e4SLinus Torvalds 22561da177e4SLinus Torvalds static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 22571da177e4SLinus Torvalds { 22581da177e4SLinus Torvalds } 22591da177e4SLinus Torvalds 22601da177e4SLinus Torvalds #endif /* CONFIG_SMP */ 22611da177e4SLinus Torvalds 2262a1dfb631SMarcelo Tosatti extern bool sched_task_on_rq(struct task_struct *p); 226342a20f86SKees Cook extern unsigned long get_wchan(struct task_struct *p); 2264e386b672SPaul E. McKenney extern struct task_struct *cpu_curr_snapshot(int cpu); 2265a1dfb631SMarcelo Tosatti 2266d9345c65SPan Xinhui /* 2267d9345c65SPan Xinhui * In order to reduce various lock holder preemption latencies provide an 2268d9345c65SPan Xinhui * interface to see if a vCPU is currently running or not. 2269d9345c65SPan Xinhui * 2270d9345c65SPan Xinhui * This allows us to terminate optimistic spin loops and block, analogous to 2271d9345c65SPan Xinhui * the native optimistic spin heuristic of testing if the lock owner task is 2272d9345c65SPan Xinhui * running or not. 2273d9345c65SPan Xinhui */ 2274d9345c65SPan Xinhui #ifndef vcpu_is_preempted 227542fd8baaSQian Cai static inline bool vcpu_is_preempted(int cpu) 227642fd8baaSQian Cai { 227742fd8baaSQian Cai return false; 227842fd8baaSQian Cai } 2279d9345c65SPan Xinhui #endif 2280d9345c65SPan Xinhui 228196f874e2SRusty Russell extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 228296f874e2SRusty Russell extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 22835c45bf27SSiddha, Suresh B 228482455257SDave Hansen #ifndef TASK_SIZE_OF 228582455257SDave Hansen #define TASK_SIZE_OF(tsk) TASK_SIZE 228682455257SDave Hansen #endif 228782455257SDave Hansen 2288a5418be9SViresh Kumar #ifdef CONFIG_SMP 2289c0bed69dSKefeng Wang static inline bool owner_on_cpu(struct task_struct *owner) 2290c0bed69dSKefeng Wang { 2291c0bed69dSKefeng Wang /* 2292c0bed69dSKefeng Wang * As lock holder preemption issue, we both skip spinning if 2293c0bed69dSKefeng Wang * task is not on cpu or its cpu is preempted 2294c0bed69dSKefeng Wang */ 22954cf75fd4SMarco Elver return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner)); 2296c0bed69dSKefeng Wang } 2297c0bed69dSKefeng Wang 2298a5418be9SViresh Kumar /* Returns effective CPU energy utilization, as seen by the scheduler */ 2299bb447999SDietmar Eggemann unsigned long sched_cpu_util(int cpu); 2300a5418be9SViresh Kumar #endif /* CONFIG_SMP */ 2301a5418be9SViresh Kumar 2302d7822b1eSMathieu Desnoyers #ifdef CONFIG_RSEQ 2303d7822b1eSMathieu Desnoyers 2304d7822b1eSMathieu Desnoyers /* 2305d7822b1eSMathieu Desnoyers * Map the event mask on the user-space ABI enum rseq_cs_flags 2306d7822b1eSMathieu Desnoyers * for direct mask checks. 2307d7822b1eSMathieu Desnoyers */ 2308d7822b1eSMathieu Desnoyers enum rseq_event_mask_bits { 2309d7822b1eSMathieu Desnoyers RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT, 2310d7822b1eSMathieu Desnoyers RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT, 2311d7822b1eSMathieu Desnoyers RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT, 2312d7822b1eSMathieu Desnoyers }; 2313d7822b1eSMathieu Desnoyers 2314d7822b1eSMathieu Desnoyers enum rseq_event_mask { 2315d7822b1eSMathieu Desnoyers RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT), 2316d7822b1eSMathieu Desnoyers RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT), 2317d7822b1eSMathieu Desnoyers RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT), 2318d7822b1eSMathieu Desnoyers }; 2319d7822b1eSMathieu Desnoyers 2320d7822b1eSMathieu Desnoyers static inline void rseq_set_notify_resume(struct task_struct *t) 2321d7822b1eSMathieu Desnoyers { 2322d7822b1eSMathieu Desnoyers if (t->rseq) 2323d7822b1eSMathieu Desnoyers set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); 2324d7822b1eSMathieu Desnoyers } 2325d7822b1eSMathieu Desnoyers 2326784e0300SWill Deacon void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs); 2327d7822b1eSMathieu Desnoyers 2328784e0300SWill Deacon static inline void rseq_handle_notify_resume(struct ksignal *ksig, 2329784e0300SWill Deacon struct pt_regs *regs) 2330d7822b1eSMathieu Desnoyers { 2331d7822b1eSMathieu Desnoyers if (current->rseq) 2332784e0300SWill Deacon __rseq_handle_notify_resume(ksig, regs); 2333d7822b1eSMathieu Desnoyers } 2334d7822b1eSMathieu Desnoyers 2335784e0300SWill Deacon static inline void rseq_signal_deliver(struct ksignal *ksig, 2336784e0300SWill Deacon struct pt_regs *regs) 2337d7822b1eSMathieu Desnoyers { 2338d7822b1eSMathieu Desnoyers preempt_disable(); 2339d7822b1eSMathieu Desnoyers __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); 2340d7822b1eSMathieu Desnoyers preempt_enable(); 2341784e0300SWill Deacon rseq_handle_notify_resume(ksig, regs); 2342d7822b1eSMathieu Desnoyers } 2343d7822b1eSMathieu Desnoyers 2344d7822b1eSMathieu Desnoyers /* rseq_preempt() requires preemption to be disabled. */ 2345d7822b1eSMathieu Desnoyers static inline void rseq_preempt(struct task_struct *t) 2346d7822b1eSMathieu Desnoyers { 2347d7822b1eSMathieu Desnoyers __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); 2348d7822b1eSMathieu Desnoyers rseq_set_notify_resume(t); 2349d7822b1eSMathieu Desnoyers } 2350d7822b1eSMathieu Desnoyers 2351d7822b1eSMathieu Desnoyers /* rseq_migrate() requires preemption to be disabled. */ 2352d7822b1eSMathieu Desnoyers static inline void rseq_migrate(struct task_struct *t) 2353d7822b1eSMathieu Desnoyers { 2354d7822b1eSMathieu Desnoyers __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); 2355d7822b1eSMathieu Desnoyers rseq_set_notify_resume(t); 2356d7822b1eSMathieu Desnoyers } 2357d7822b1eSMathieu Desnoyers 2358d7822b1eSMathieu Desnoyers /* 2359d7822b1eSMathieu Desnoyers * If parent process has a registered restartable sequences area, the 2360463f550fSMathieu Desnoyers * child inherits. Unregister rseq for a clone with CLONE_VM set. 2361d7822b1eSMathieu Desnoyers */ 2362d7822b1eSMathieu Desnoyers static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) 2363d7822b1eSMathieu Desnoyers { 2364463f550fSMathieu Desnoyers if (clone_flags & CLONE_VM) { 2365d7822b1eSMathieu Desnoyers t->rseq = NULL; 2366ee3e3ac0SMathieu Desnoyers t->rseq_len = 0; 2367d7822b1eSMathieu Desnoyers t->rseq_sig = 0; 2368d7822b1eSMathieu Desnoyers t->rseq_event_mask = 0; 2369d7822b1eSMathieu Desnoyers } else { 2370d7822b1eSMathieu Desnoyers t->rseq = current->rseq; 2371ee3e3ac0SMathieu Desnoyers t->rseq_len = current->rseq_len; 2372d7822b1eSMathieu Desnoyers t->rseq_sig = current->rseq_sig; 2373d7822b1eSMathieu Desnoyers t->rseq_event_mask = current->rseq_event_mask; 2374d7822b1eSMathieu Desnoyers } 2375d7822b1eSMathieu Desnoyers } 2376d7822b1eSMathieu Desnoyers 2377d7822b1eSMathieu Desnoyers static inline void rseq_execve(struct task_struct *t) 2378d7822b1eSMathieu Desnoyers { 2379d7822b1eSMathieu Desnoyers t->rseq = NULL; 2380ee3e3ac0SMathieu Desnoyers t->rseq_len = 0; 2381d7822b1eSMathieu Desnoyers t->rseq_sig = 0; 2382d7822b1eSMathieu Desnoyers t->rseq_event_mask = 0; 2383d7822b1eSMathieu Desnoyers } 2384d7822b1eSMathieu Desnoyers 2385d7822b1eSMathieu Desnoyers #else 2386d7822b1eSMathieu Desnoyers 2387d7822b1eSMathieu Desnoyers static inline void rseq_set_notify_resume(struct task_struct *t) 2388d7822b1eSMathieu Desnoyers { 2389d7822b1eSMathieu Desnoyers } 2390784e0300SWill Deacon static inline void rseq_handle_notify_resume(struct ksignal *ksig, 2391784e0300SWill Deacon struct pt_regs *regs) 2392d7822b1eSMathieu Desnoyers { 2393d7822b1eSMathieu Desnoyers } 2394784e0300SWill Deacon static inline void rseq_signal_deliver(struct ksignal *ksig, 2395784e0300SWill Deacon struct pt_regs *regs) 2396d7822b1eSMathieu Desnoyers { 2397d7822b1eSMathieu Desnoyers } 2398d7822b1eSMathieu Desnoyers static inline void rseq_preempt(struct task_struct *t) 2399d7822b1eSMathieu Desnoyers { 2400d7822b1eSMathieu Desnoyers } 2401d7822b1eSMathieu Desnoyers static inline void rseq_migrate(struct task_struct *t) 2402d7822b1eSMathieu Desnoyers { 2403d7822b1eSMathieu Desnoyers } 2404d7822b1eSMathieu Desnoyers static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) 2405d7822b1eSMathieu Desnoyers { 2406d7822b1eSMathieu Desnoyers } 2407d7822b1eSMathieu Desnoyers static inline void rseq_execve(struct task_struct *t) 2408d7822b1eSMathieu Desnoyers { 2409d7822b1eSMathieu Desnoyers } 2410d7822b1eSMathieu Desnoyers 2411d7822b1eSMathieu Desnoyers #endif 2412d7822b1eSMathieu Desnoyers 2413d7822b1eSMathieu Desnoyers #ifdef CONFIG_DEBUG_RSEQ 2414d7822b1eSMathieu Desnoyers 2415d7822b1eSMathieu Desnoyers void rseq_syscall(struct pt_regs *regs); 2416d7822b1eSMathieu Desnoyers 2417d7822b1eSMathieu Desnoyers #else 2418d7822b1eSMathieu Desnoyers 2419d7822b1eSMathieu Desnoyers static inline void rseq_syscall(struct pt_regs *regs) 2420d7822b1eSMathieu Desnoyers { 2421d7822b1eSMathieu Desnoyers } 2422d7822b1eSMathieu Desnoyers 2423d7822b1eSMathieu Desnoyers #endif 2424d7822b1eSMathieu Desnoyers 24256e33cad0SPeter Zijlstra #ifdef CONFIG_SCHED_CORE 24266e33cad0SPeter Zijlstra extern void sched_core_free(struct task_struct *tsk); 242785dd3f61SPeter Zijlstra extern void sched_core_fork(struct task_struct *p); 24287ac592aaSChris Hyser extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, 24297ac592aaSChris Hyser unsigned long uaddr); 24306e33cad0SPeter Zijlstra #else 24316e33cad0SPeter Zijlstra static inline void sched_core_free(struct task_struct *tsk) { } 243285dd3f61SPeter Zijlstra static inline void sched_core_fork(struct task_struct *p) { } 24336e33cad0SPeter Zijlstra #endif 24346e33cad0SPeter Zijlstra 2435d664e399SThomas Gleixner extern void sched_set_stop_task(int cpu, struct task_struct *stop); 2436d664e399SThomas Gleixner 24371da177e4SLinus Torvalds #endif 2438