1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 21da177e4SLinus Torvalds #ifndef _LINUX_SCHED_H 31da177e4SLinus Torvalds #define _LINUX_SCHED_H 41da177e4SLinus Torvalds 55eca1c10SIngo Molnar /* 65eca1c10SIngo Molnar * Define 'struct task_struct' and provide the main scheduler 75eca1c10SIngo Molnar * APIs (schedule(), wakeup variants, etc.) 85eca1c10SIngo Molnar */ 95eca1c10SIngo Molnar 10607ca46eSDavid Howells #include <uapi/linux/sched.h> 11b7b3c76aSDavid Woodhouse 1270b8157eSIngo Molnar #include <asm/current.h> 1370b8157eSIngo Molnar 145eca1c10SIngo Molnar #include <linux/pid.h> 155eca1c10SIngo Molnar #include <linux/sem.h> 165eca1c10SIngo Molnar #include <linux/shm.h> 175eca1c10SIngo Molnar #include <linux/kcov.h> 185eca1c10SIngo Molnar #include <linux/mutex.h> 195eca1c10SIngo Molnar #include <linux/plist.h> 205eca1c10SIngo Molnar #include <linux/hrtimer.h> 21*0584df9cSMarco Elver #include <linux/irqflags.h> 225eca1c10SIngo Molnar #include <linux/seccomp.h> 235eca1c10SIngo Molnar #include <linux/nodemask.h> 245eca1c10SIngo Molnar #include <linux/rcupdate.h> 25ec1d2819SElena Reshetova #include <linux/refcount.h> 265eca1c10SIngo Molnar #include <linux/resource.h> 275eca1c10SIngo Molnar #include <linux/latencytop.h> 285eca1c10SIngo Molnar #include <linux/sched/prio.h> 299eacb5c7SThomas Gleixner #include <linux/sched/types.h> 305eca1c10SIngo Molnar #include <linux/signal_types.h> 315eca1c10SIngo Molnar #include <linux/mm_types_task.h> 325eca1c10SIngo Molnar #include <linux/task_io_accounting.h> 332b69942fSThomas Gleixner #include <linux/posix-timers.h> 34d7822b1eSMathieu Desnoyers #include <linux/rseq.h> 35dfd402a4SMarco Elver #include <linux/kcsan.h> 365eca1c10SIngo Molnar 375eca1c10SIngo Molnar /* task_struct member predeclarations (sorted alphabetically): */ 38c7af7877SIngo Molnar struct audit_context; 39c7af7877SIngo Molnar struct backing_dev_info; 40c7af7877SIngo Molnar struct bio_list; 41c7af7877SIngo Molnar struct blk_plug; 423c93a0c0SQais Yousef struct capture_control; 43c7af7877SIngo Molnar struct cfs_rq; 44c7af7877SIngo Molnar struct fs_struct; 45c7af7877SIngo Molnar struct futex_pi_state; 46c7af7877SIngo Molnar struct io_context; 47c7af7877SIngo Molnar struct mempolicy; 48c7af7877SIngo Molnar struct nameidata; 49c7af7877SIngo Molnar struct nsproxy; 50c7af7877SIngo Molnar struct perf_event_context; 51c7af7877SIngo Molnar struct pid_namespace; 52c7af7877SIngo Molnar struct pipe_inode_info; 53c7af7877SIngo Molnar struct rcu_node; 54c7af7877SIngo Molnar struct reclaim_state; 55c7af7877SIngo Molnar struct robust_list_head; 563c93a0c0SQais Yousef struct root_domain; 573c93a0c0SQais Yousef struct rq; 58e2d1e2aeSIngo Molnar struct sched_attr; 59e2d1e2aeSIngo Molnar struct sched_param; 6043ae34cbSIngo Molnar struct seq_file; 61c7af7877SIngo Molnar struct sighand_struct; 62c7af7877SIngo Molnar struct signal_struct; 63c7af7877SIngo Molnar struct task_delay_info; 644cf86d77SIngo Molnar struct task_group; 651da177e4SLinus Torvalds 664a8342d2SLinus Torvalds /* 674a8342d2SLinus Torvalds * Task state bitmask. NOTE! These bits are also 684a8342d2SLinus Torvalds * encoded in fs/proc/array.c: get_task_state(). 694a8342d2SLinus Torvalds * 704a8342d2SLinus Torvalds * We have two separate sets of flags: task->state 714a8342d2SLinus Torvalds * is about runnability, while task->exit_state are 724a8342d2SLinus Torvalds * about the task exiting. Confusing, but this way 734a8342d2SLinus Torvalds * modifying one set can't modify the other one by 744a8342d2SLinus Torvalds * mistake. 754a8342d2SLinus Torvalds */ 765eca1c10SIngo Molnar 775eca1c10SIngo Molnar /* Used in tsk->state: */ 7892c4bc9fSPeter Zijlstra #define TASK_RUNNING 0x0000 7992c4bc9fSPeter Zijlstra #define TASK_INTERRUPTIBLE 0x0001 8092c4bc9fSPeter Zijlstra #define TASK_UNINTERRUPTIBLE 0x0002 8192c4bc9fSPeter Zijlstra #define __TASK_STOPPED 0x0004 8292c4bc9fSPeter Zijlstra #define __TASK_TRACED 0x0008 835eca1c10SIngo Molnar /* Used in tsk->exit_state: */ 8492c4bc9fSPeter Zijlstra #define EXIT_DEAD 0x0010 8592c4bc9fSPeter Zijlstra #define EXIT_ZOMBIE 0x0020 86abd50b39SOleg Nesterov #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 875eca1c10SIngo Molnar /* Used in tsk->state again: */ 888ef9925bSPeter Zijlstra #define TASK_PARKED 0x0040 898ef9925bSPeter Zijlstra #define TASK_DEAD 0x0080 908ef9925bSPeter Zijlstra #define TASK_WAKEKILL 0x0100 918ef9925bSPeter Zijlstra #define TASK_WAKING 0x0200 9292c4bc9fSPeter Zijlstra #define TASK_NOLOAD 0x0400 9392c4bc9fSPeter Zijlstra #define TASK_NEW 0x0800 9492c4bc9fSPeter Zijlstra #define TASK_STATE_MAX 0x1000 95f021a3c2SMatthew Wilcox 965eca1c10SIngo Molnar /* Convenience macros for the sake of set_current_state: */ 97f021a3c2SMatthew Wilcox #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 98f021a3c2SMatthew Wilcox #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 99f021a3c2SMatthew Wilcox #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) 1001da177e4SLinus Torvalds 10180ed87c8SPeter Zijlstra #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) 10280ed87c8SPeter Zijlstra 1035eca1c10SIngo Molnar /* Convenience macros for the sake of wake_up(): */ 10492a1f4bcSMatthew Wilcox #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 10592a1f4bcSMatthew Wilcox 1065eca1c10SIngo Molnar /* get_task_state(): */ 10792a1f4bcSMatthew Wilcox #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 108f021a3c2SMatthew Wilcox TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 1098ef9925bSPeter Zijlstra __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ 1108ef9925bSPeter Zijlstra TASK_PARKED) 11192a1f4bcSMatthew Wilcox 112f021a3c2SMatthew Wilcox #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) 1135eca1c10SIngo Molnar 114f021a3c2SMatthew Wilcox #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) 1155eca1c10SIngo Molnar 1165eca1c10SIngo Molnar #define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 1175eca1c10SIngo Molnar 1188eb23b9fSPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 1198eb23b9fSPeter Zijlstra 120b5bf9a90SPeter Zijlstra /* 121b5bf9a90SPeter Zijlstra * Special states are those that do not use the normal wait-loop pattern. See 122b5bf9a90SPeter Zijlstra * the comment with set_special_state(). 123b5bf9a90SPeter Zijlstra */ 124b5bf9a90SPeter Zijlstra #define is_special_task_state(state) \ 1251cef1150SPeter Zijlstra ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) 126b5bf9a90SPeter Zijlstra 1278eb23b9fSPeter Zijlstra #define __set_current_state(state_value) \ 1288eb23b9fSPeter Zijlstra do { \ 129b5bf9a90SPeter Zijlstra WARN_ON_ONCE(is_special_task_state(state_value));\ 1308eb23b9fSPeter Zijlstra current->task_state_change = _THIS_IP_; \ 1318eb23b9fSPeter Zijlstra current->state = (state_value); \ 1328eb23b9fSPeter Zijlstra } while (0) 133b5bf9a90SPeter Zijlstra 1348eb23b9fSPeter Zijlstra #define set_current_state(state_value) \ 1358eb23b9fSPeter Zijlstra do { \ 136b5bf9a90SPeter Zijlstra WARN_ON_ONCE(is_special_task_state(state_value));\ 1378eb23b9fSPeter Zijlstra current->task_state_change = _THIS_IP_; \ 138b92b8b35SPeter Zijlstra smp_store_mb(current->state, (state_value)); \ 1398eb23b9fSPeter Zijlstra } while (0) 1408eb23b9fSPeter Zijlstra 141b5bf9a90SPeter Zijlstra #define set_special_state(state_value) \ 142b5bf9a90SPeter Zijlstra do { \ 143b5bf9a90SPeter Zijlstra unsigned long flags; /* may shadow */ \ 144b5bf9a90SPeter Zijlstra WARN_ON_ONCE(!is_special_task_state(state_value)); \ 145b5bf9a90SPeter Zijlstra raw_spin_lock_irqsave(¤t->pi_lock, flags); \ 146b5bf9a90SPeter Zijlstra current->task_state_change = _THIS_IP_; \ 147b5bf9a90SPeter Zijlstra current->state = (state_value); \ 148b5bf9a90SPeter Zijlstra raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ 149b5bf9a90SPeter Zijlstra } while (0) 1508eb23b9fSPeter Zijlstra #else 151498d0c57SAndrew Morton /* 152498d0c57SAndrew Morton * set_current_state() includes a barrier so that the write of current->state 153498d0c57SAndrew Morton * is correctly serialised wrt the caller's subsequent test of whether to 154498d0c57SAndrew Morton * actually sleep: 155498d0c57SAndrew Morton * 156a2250238SPeter Zijlstra * for (;;) { 157498d0c57SAndrew Morton * set_current_state(TASK_UNINTERRUPTIBLE); 158a2250238SPeter Zijlstra * if (!need_sleep) 159a2250238SPeter Zijlstra * break; 160498d0c57SAndrew Morton * 161a2250238SPeter Zijlstra * schedule(); 162a2250238SPeter Zijlstra * } 163a2250238SPeter Zijlstra * __set_current_state(TASK_RUNNING); 164a2250238SPeter Zijlstra * 165a2250238SPeter Zijlstra * If the caller does not need such serialisation (because, for instance, the 166a2250238SPeter Zijlstra * condition test and condition change and wakeup are under the same lock) then 167a2250238SPeter Zijlstra * use __set_current_state(). 168a2250238SPeter Zijlstra * 169a2250238SPeter Zijlstra * The above is typically ordered against the wakeup, which does: 170a2250238SPeter Zijlstra * 171a2250238SPeter Zijlstra * need_sleep = false; 172a2250238SPeter Zijlstra * wake_up_state(p, TASK_UNINTERRUPTIBLE); 173a2250238SPeter Zijlstra * 1747696f991SAndrea Parri * where wake_up_state() executes a full memory barrier before accessing the 1757696f991SAndrea Parri * task state. 176a2250238SPeter Zijlstra * 177a2250238SPeter Zijlstra * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, 178a2250238SPeter Zijlstra * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a 179a2250238SPeter Zijlstra * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). 180a2250238SPeter Zijlstra * 181b5bf9a90SPeter Zijlstra * However, with slightly different timing the wakeup TASK_RUNNING store can 182dfcb245eSIngo Molnar * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not 183b5bf9a90SPeter Zijlstra * a problem either because that will result in one extra go around the loop 184b5bf9a90SPeter Zijlstra * and our @cond test will save the day. 185a2250238SPeter Zijlstra * 186a2250238SPeter Zijlstra * Also see the comments of try_to_wake_up(). 187498d0c57SAndrew Morton */ 188b5bf9a90SPeter Zijlstra #define __set_current_state(state_value) \ 189b5bf9a90SPeter Zijlstra current->state = (state_value) 190b5bf9a90SPeter Zijlstra 191b5bf9a90SPeter Zijlstra #define set_current_state(state_value) \ 192b5bf9a90SPeter Zijlstra smp_store_mb(current->state, (state_value)) 193b5bf9a90SPeter Zijlstra 194b5bf9a90SPeter Zijlstra /* 195b5bf9a90SPeter Zijlstra * set_special_state() should be used for those states when the blocking task 196b5bf9a90SPeter Zijlstra * can not use the regular condition based wait-loop. In that case we must 197b5bf9a90SPeter Zijlstra * serialize against wakeups such that any possible in-flight TASK_RUNNING stores 198b5bf9a90SPeter Zijlstra * will not collide with our state change. 199b5bf9a90SPeter Zijlstra */ 200b5bf9a90SPeter Zijlstra #define set_special_state(state_value) \ 201b5bf9a90SPeter Zijlstra do { \ 202b5bf9a90SPeter Zijlstra unsigned long flags; /* may shadow */ \ 203b5bf9a90SPeter Zijlstra raw_spin_lock_irqsave(¤t->pi_lock, flags); \ 204b5bf9a90SPeter Zijlstra current->state = (state_value); \ 205b5bf9a90SPeter Zijlstra raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ 206b5bf9a90SPeter Zijlstra } while (0) 207b5bf9a90SPeter Zijlstra 2088eb23b9fSPeter Zijlstra #endif 2098eb23b9fSPeter Zijlstra 2105eca1c10SIngo Molnar /* Task command name length: */ 2111da177e4SLinus Torvalds #define TASK_COMM_LEN 16 2121da177e4SLinus Torvalds 2131da177e4SLinus Torvalds extern void scheduler_tick(void); 2141da177e4SLinus Torvalds 2151da177e4SLinus Torvalds #define MAX_SCHEDULE_TIMEOUT LONG_MAX 2165eca1c10SIngo Molnar 2175eca1c10SIngo Molnar extern long schedule_timeout(long timeout); 2185eca1c10SIngo Molnar extern long schedule_timeout_interruptible(long timeout); 2195eca1c10SIngo Molnar extern long schedule_timeout_killable(long timeout); 2205eca1c10SIngo Molnar extern long schedule_timeout_uninterruptible(long timeout); 2215eca1c10SIngo Molnar extern long schedule_timeout_idle(long timeout); 2221da177e4SLinus Torvalds asmlinkage void schedule(void); 223c5491ea7SThomas Gleixner extern void schedule_preempt_disabled(void); 22419c95f26SJulien Thierry asmlinkage void preempt_schedule_irq(void); 2251da177e4SLinus Torvalds 22610ab5643STejun Heo extern int __must_check io_schedule_prepare(void); 22710ab5643STejun Heo extern void io_schedule_finish(int token); 2289cff8adeSNeilBrown extern long io_schedule_timeout(long timeout); 22910ab5643STejun Heo extern void io_schedule(void); 2309cff8adeSNeilBrown 231f06febc9SFrank Mayhar /** 2320ba42a59SMasanari Iida * struct prev_cputime - snapshot of system and user cputime 233d37f761dSFrederic Weisbecker * @utime: time spent in user mode 234d37f761dSFrederic Weisbecker * @stime: time spent in system mode 2359d7fb042SPeter Zijlstra * @lock: protects the above two fields 236d37f761dSFrederic Weisbecker * 2379d7fb042SPeter Zijlstra * Stores previous user/system time values such that we can guarantee 2389d7fb042SPeter Zijlstra * monotonicity. 239d37f761dSFrederic Weisbecker */ 2409d7fb042SPeter Zijlstra struct prev_cputime { 2419d7fb042SPeter Zijlstra #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 2425613fda9SFrederic Weisbecker u64 utime; 2435613fda9SFrederic Weisbecker u64 stime; 2449d7fb042SPeter Zijlstra raw_spinlock_t lock; 2459d7fb042SPeter Zijlstra #endif 246d37f761dSFrederic Weisbecker }; 247d37f761dSFrederic Weisbecker 248bac5b6b6SFrederic Weisbecker enum vtime_state { 249bac5b6b6SFrederic Weisbecker /* Task is sleeping or running in a CPU with VTIME inactive: */ 250bac5b6b6SFrederic Weisbecker VTIME_INACTIVE = 0, 25114faf6fcSFrederic Weisbecker /* Task is idle */ 25214faf6fcSFrederic Weisbecker VTIME_IDLE, 253bac5b6b6SFrederic Weisbecker /* Task runs in kernelspace in a CPU with VTIME active: */ 254bac5b6b6SFrederic Weisbecker VTIME_SYS, 25514faf6fcSFrederic Weisbecker /* Task runs in userspace in a CPU with VTIME active: */ 25614faf6fcSFrederic Weisbecker VTIME_USER, 257e6d5bf3eSFrederic Weisbecker /* Task runs as guests in a CPU with VTIME active: */ 258e6d5bf3eSFrederic Weisbecker VTIME_GUEST, 259bac5b6b6SFrederic Weisbecker }; 260bac5b6b6SFrederic Weisbecker 261bac5b6b6SFrederic Weisbecker struct vtime { 262bac5b6b6SFrederic Weisbecker seqcount_t seqcount; 263bac5b6b6SFrederic Weisbecker unsigned long long starttime; 264bac5b6b6SFrederic Weisbecker enum vtime_state state; 265802f4a82SFrederic Weisbecker unsigned int cpu; 2662a42eb95SWanpeng Li u64 utime; 2672a42eb95SWanpeng Li u64 stime; 2682a42eb95SWanpeng Li u64 gtime; 269bac5b6b6SFrederic Weisbecker }; 270bac5b6b6SFrederic Weisbecker 27169842cbaSPatrick Bellasi /* 27269842cbaSPatrick Bellasi * Utilization clamp constraints. 27369842cbaSPatrick Bellasi * @UCLAMP_MIN: Minimum utilization 27469842cbaSPatrick Bellasi * @UCLAMP_MAX: Maximum utilization 27569842cbaSPatrick Bellasi * @UCLAMP_CNT: Utilization clamp constraints count 27669842cbaSPatrick Bellasi */ 27769842cbaSPatrick Bellasi enum uclamp_id { 27869842cbaSPatrick Bellasi UCLAMP_MIN = 0, 27969842cbaSPatrick Bellasi UCLAMP_MAX, 28069842cbaSPatrick Bellasi UCLAMP_CNT 28169842cbaSPatrick Bellasi }; 28269842cbaSPatrick Bellasi 283f9a25f77SMathieu Poirier #ifdef CONFIG_SMP 284f9a25f77SMathieu Poirier extern struct root_domain def_root_domain; 285f9a25f77SMathieu Poirier extern struct mutex sched_domains_mutex; 286f9a25f77SMathieu Poirier #endif 287f9a25f77SMathieu Poirier 2881da177e4SLinus Torvalds struct sched_info { 2897f5f8e8dSIngo Molnar #ifdef CONFIG_SCHED_INFO 2905eca1c10SIngo Molnar /* Cumulative counters: */ 2911da177e4SLinus Torvalds 2925eca1c10SIngo Molnar /* # of times we have run on this CPU: */ 2935eca1c10SIngo Molnar unsigned long pcount; 2945eca1c10SIngo Molnar 2955eca1c10SIngo Molnar /* Time spent waiting on a runqueue: */ 2965eca1c10SIngo Molnar unsigned long long run_delay; 2975eca1c10SIngo Molnar 2985eca1c10SIngo Molnar /* Timestamps: */ 2995eca1c10SIngo Molnar 3005eca1c10SIngo Molnar /* When did we last run on a CPU? */ 3015eca1c10SIngo Molnar unsigned long long last_arrival; 3025eca1c10SIngo Molnar 3035eca1c10SIngo Molnar /* When were we last queued to run? */ 3045eca1c10SIngo Molnar unsigned long long last_queued; 3055eca1c10SIngo Molnar 306f6db8347SNaveen N. Rao #endif /* CONFIG_SCHED_INFO */ 3077f5f8e8dSIngo Molnar }; 3081da177e4SLinus Torvalds 3091da177e4SLinus Torvalds /* 3106ecdd749SYuyang Du * Integer metrics need fixed point arithmetic, e.g., sched/fair 3116ecdd749SYuyang Du * has a few: load, load_avg, util_avg, freq, and capacity. 3126ecdd749SYuyang Du * 3136ecdd749SYuyang Du * We define a basic fixed point arithmetic range, and then formalize 3146ecdd749SYuyang Du * all these metrics based on that basic range. 3156ecdd749SYuyang Du */ 3166ecdd749SYuyang Du # define SCHED_FIXEDPOINT_SHIFT 10 3176ecdd749SYuyang Du # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) 3186ecdd749SYuyang Du 31969842cbaSPatrick Bellasi /* Increase resolution of cpu_capacity calculations */ 32069842cbaSPatrick Bellasi # define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT 32169842cbaSPatrick Bellasi # define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) 32269842cbaSPatrick Bellasi 32320b8a59fSIngo Molnar struct load_weight { 3249dbdb155SPeter Zijlstra unsigned long weight; 3259dbdb155SPeter Zijlstra u32 inv_weight; 32620b8a59fSIngo Molnar }; 32720b8a59fSIngo Molnar 3287f65ea42SPatrick Bellasi /** 3297f65ea42SPatrick Bellasi * struct util_est - Estimation utilization of FAIR tasks 3307f65ea42SPatrick Bellasi * @enqueued: instantaneous estimated utilization of a task/cpu 3317f65ea42SPatrick Bellasi * @ewma: the Exponential Weighted Moving Average (EWMA) 3327f65ea42SPatrick Bellasi * utilization of a task 3337f65ea42SPatrick Bellasi * 3347f65ea42SPatrick Bellasi * Support data structure to track an Exponential Weighted Moving Average 3357f65ea42SPatrick Bellasi * (EWMA) of a FAIR task's utilization. New samples are added to the moving 3367f65ea42SPatrick Bellasi * average each time a task completes an activation. Sample's weight is chosen 3377f65ea42SPatrick Bellasi * so that the EWMA will be relatively insensitive to transient changes to the 3387f65ea42SPatrick Bellasi * task's workload. 3397f65ea42SPatrick Bellasi * 3407f65ea42SPatrick Bellasi * The enqueued attribute has a slightly different meaning for tasks and cpus: 3417f65ea42SPatrick Bellasi * - task: the task's util_avg at last task dequeue time 3427f65ea42SPatrick Bellasi * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU 3437f65ea42SPatrick Bellasi * Thus, the util_est.enqueued of a task represents the contribution on the 3447f65ea42SPatrick Bellasi * estimated utilization of the CPU where that task is currently enqueued. 3457f65ea42SPatrick Bellasi * 3467f65ea42SPatrick Bellasi * Only for tasks we track a moving average of the past instantaneous 3477f65ea42SPatrick Bellasi * estimated utilization. This allows to absorb sporadic drops in utilization 3487f65ea42SPatrick Bellasi * of an otherwise almost periodic task. 3497f65ea42SPatrick Bellasi */ 3507f65ea42SPatrick Bellasi struct util_est { 3517f65ea42SPatrick Bellasi unsigned int enqueued; 3527f65ea42SPatrick Bellasi unsigned int ewma; 3537f65ea42SPatrick Bellasi #define UTIL_EST_WEIGHT_SHIFT 2 354317d359dSPeter Zijlstra } __attribute__((__aligned__(sizeof(u64)))); 3557f65ea42SPatrick Bellasi 3569d89c257SYuyang Du /* 3579f683953SVincent Guittot * The load/runnable/util_avg accumulates an infinite geometric series 3580dacee1bSVincent Guittot * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c). 3597b595334SYuyang Du * 3607b595334SYuyang Du * [load_avg definition] 3617b595334SYuyang Du * 3627b595334SYuyang Du * load_avg = runnable% * scale_load_down(load) 3637b595334SYuyang Du * 3649f683953SVincent Guittot * [runnable_avg definition] 3659f683953SVincent Guittot * 3669f683953SVincent Guittot * runnable_avg = runnable% * SCHED_CAPACITY_SCALE 3677b595334SYuyang Du * 3687b595334SYuyang Du * [util_avg definition] 3697b595334SYuyang Du * 3707b595334SYuyang Du * util_avg = running% * SCHED_CAPACITY_SCALE 3717b595334SYuyang Du * 3729f683953SVincent Guittot * where runnable% is the time ratio that a sched_entity is runnable and 3739f683953SVincent Guittot * running% the time ratio that a sched_entity is running. 3747b595334SYuyang Du * 3759f683953SVincent Guittot * For cfs_rq, they are the aggregated values of all runnable and blocked 3769f683953SVincent Guittot * sched_entities. 3779f683953SVincent Guittot * 3789f683953SVincent Guittot * The load/runnable/util_avg doesn't direcly factor frequency scaling and CPU 3799f683953SVincent Guittot * capacity scaling. The scaling is done through the rq_clock_pelt that is used 3809f683953SVincent Guittot * for computing those signals (see update_rq_clock_pelt()) 3817b595334SYuyang Du * 38223127296SVincent Guittot * N.B., the above ratios (runnable% and running%) themselves are in the 38323127296SVincent Guittot * range of [0, 1]. To do fixed point arithmetics, we therefore scale them 38423127296SVincent Guittot * to as large a range as necessary. This is for example reflected by 38523127296SVincent Guittot * util_avg's SCHED_CAPACITY_SCALE. 3867b595334SYuyang Du * 3877b595334SYuyang Du * [Overflow issue] 3887b595334SYuyang Du * 3897b595334SYuyang Du * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities 3907b595334SYuyang Du * with the highest load (=88761), always runnable on a single cfs_rq, 3917b595334SYuyang Du * and should not overflow as the number already hits PID_MAX_LIMIT. 3927b595334SYuyang Du * 3937b595334SYuyang Du * For all other cases (including 32-bit kernels), struct load_weight's 3947b595334SYuyang Du * weight will overflow first before we do, because: 3957b595334SYuyang Du * 3967b595334SYuyang Du * Max(load_avg) <= Max(load.weight) 3977b595334SYuyang Du * 3987b595334SYuyang Du * Then it is the load_weight's responsibility to consider overflow 3997b595334SYuyang Du * issues. 4009d89c257SYuyang Du */ 4019d85f21cSPaul Turner struct sched_avg { 4025eca1c10SIngo Molnar u64 last_update_time; 4035eca1c10SIngo Molnar u64 load_sum; 4049f683953SVincent Guittot u64 runnable_sum; 4055eca1c10SIngo Molnar u32 util_sum; 4065eca1c10SIngo Molnar u32 period_contrib; 4075eca1c10SIngo Molnar unsigned long load_avg; 4089f683953SVincent Guittot unsigned long runnable_avg; 4095eca1c10SIngo Molnar unsigned long util_avg; 4107f65ea42SPatrick Bellasi struct util_est util_est; 411317d359dSPeter Zijlstra } ____cacheline_aligned; 4129d85f21cSPaul Turner 41341acab88SLucas De Marchi struct sched_statistics { 4147f5f8e8dSIngo Molnar #ifdef CONFIG_SCHEDSTATS 41594c18227SIngo Molnar u64 wait_start; 41694c18227SIngo Molnar u64 wait_max; 4176d082592SArjan van de Ven u64 wait_count; 4186d082592SArjan van de Ven u64 wait_sum; 4198f0dfc34SArjan van de Ven u64 iowait_count; 4208f0dfc34SArjan van de Ven u64 iowait_sum; 42194c18227SIngo Molnar 42294c18227SIngo Molnar u64 sleep_start; 42320b8a59fSIngo Molnar u64 sleep_max; 42494c18227SIngo Molnar s64 sum_sleep_runtime; 42594c18227SIngo Molnar 42694c18227SIngo Molnar u64 block_start; 42720b8a59fSIngo Molnar u64 block_max; 42820b8a59fSIngo Molnar u64 exec_max; 429eba1ed4bSIngo Molnar u64 slice_max; 430cc367732SIngo Molnar 431cc367732SIngo Molnar u64 nr_migrations_cold; 432cc367732SIngo Molnar u64 nr_failed_migrations_affine; 433cc367732SIngo Molnar u64 nr_failed_migrations_running; 434cc367732SIngo Molnar u64 nr_failed_migrations_hot; 435cc367732SIngo Molnar u64 nr_forced_migrations; 436cc367732SIngo Molnar 437cc367732SIngo Molnar u64 nr_wakeups; 438cc367732SIngo Molnar u64 nr_wakeups_sync; 439cc367732SIngo Molnar u64 nr_wakeups_migrate; 440cc367732SIngo Molnar u64 nr_wakeups_local; 441cc367732SIngo Molnar u64 nr_wakeups_remote; 442cc367732SIngo Molnar u64 nr_wakeups_affine; 443cc367732SIngo Molnar u64 nr_wakeups_affine_attempts; 444cc367732SIngo Molnar u64 nr_wakeups_passive; 445cc367732SIngo Molnar u64 nr_wakeups_idle; 44641acab88SLucas De Marchi #endif 4477f5f8e8dSIngo Molnar }; 44841acab88SLucas De Marchi 44941acab88SLucas De Marchi struct sched_entity { 4505eca1c10SIngo Molnar /* For load-balancing: */ 4515eca1c10SIngo Molnar struct load_weight load; 45241acab88SLucas De Marchi struct rb_node run_node; 45341acab88SLucas De Marchi struct list_head group_node; 45441acab88SLucas De Marchi unsigned int on_rq; 45541acab88SLucas De Marchi 45641acab88SLucas De Marchi u64 exec_start; 45741acab88SLucas De Marchi u64 sum_exec_runtime; 45841acab88SLucas De Marchi u64 vruntime; 45941acab88SLucas De Marchi u64 prev_sum_exec_runtime; 46041acab88SLucas De Marchi 46141acab88SLucas De Marchi u64 nr_migrations; 46241acab88SLucas De Marchi 46341acab88SLucas De Marchi struct sched_statistics statistics; 46494c18227SIngo Molnar 46520b8a59fSIngo Molnar #ifdef CONFIG_FAIR_GROUP_SCHED 466fed14d45SPeter Zijlstra int depth; 46720b8a59fSIngo Molnar struct sched_entity *parent; 46820b8a59fSIngo Molnar /* rq on which this entity is (to be) queued: */ 46920b8a59fSIngo Molnar struct cfs_rq *cfs_rq; 47020b8a59fSIngo Molnar /* rq "owned" by this entity/group: */ 47120b8a59fSIngo Molnar struct cfs_rq *my_q; 4729f683953SVincent Guittot /* cached value of my_q->h_nr_running */ 4739f683953SVincent Guittot unsigned long runnable_weight; 47420b8a59fSIngo Molnar #endif 4758bd75c77SClark Williams 476141965c7SAlex Shi #ifdef CONFIG_SMP 4775a107804SJiri Olsa /* 4785a107804SJiri Olsa * Per entity load average tracking. 4795a107804SJiri Olsa * 4805a107804SJiri Olsa * Put into separate cache line so it does not 4815a107804SJiri Olsa * collide with read-mostly values above. 4825a107804SJiri Olsa */ 483317d359dSPeter Zijlstra struct sched_avg avg; 4849d85f21cSPaul Turner #endif 48520b8a59fSIngo Molnar }; 48670b97a7fSIngo Molnar 487fa717060SPeter Zijlstra struct sched_rt_entity { 488fa717060SPeter Zijlstra struct list_head run_list; 48978f2c7dbSPeter Zijlstra unsigned long timeout; 49057d2aa00SYing Xue unsigned long watchdog_stamp; 491bee367edSRichard Kennedy unsigned int time_slice; 492ff77e468SPeter Zijlstra unsigned short on_rq; 493ff77e468SPeter Zijlstra unsigned short on_list; 4946f505b16SPeter Zijlstra 49558d6c2d7SPeter Zijlstra struct sched_rt_entity *back; 496052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 4976f505b16SPeter Zijlstra struct sched_rt_entity *parent; 4986f505b16SPeter Zijlstra /* rq on which this entity is (to be) queued: */ 4996f505b16SPeter Zijlstra struct rt_rq *rt_rq; 5006f505b16SPeter Zijlstra /* rq "owned" by this entity/group: */ 5016f505b16SPeter Zijlstra struct rt_rq *my_q; 5026f505b16SPeter Zijlstra #endif 5033859a271SKees Cook } __randomize_layout; 504fa717060SPeter Zijlstra 505aab03e05SDario Faggioli struct sched_dl_entity { 506aab03e05SDario Faggioli struct rb_node rb_node; 507aab03e05SDario Faggioli 508aab03e05SDario Faggioli /* 509aab03e05SDario Faggioli * Original scheduling parameters. Copied here from sched_attr 5104027d080Sxiaofeng.yan * during sched_setattr(), they will remain the same until 5114027d080Sxiaofeng.yan * the next sched_setattr(). 512aab03e05SDario Faggioli */ 5135eca1c10SIngo Molnar u64 dl_runtime; /* Maximum runtime for each instance */ 5145eca1c10SIngo Molnar u64 dl_deadline; /* Relative deadline of each instance */ 5155eca1c10SIngo Molnar u64 dl_period; /* Separation of two instances (period) */ 51654d6d303SDaniel Bristot de Oliveira u64 dl_bw; /* dl_runtime / dl_period */ 5173effcb42SDaniel Bristot de Oliveira u64 dl_density; /* dl_runtime / dl_deadline */ 518aab03e05SDario Faggioli 519aab03e05SDario Faggioli /* 520aab03e05SDario Faggioli * Actual scheduling parameters. Initialized with the values above, 521dfcb245eSIngo Molnar * they are continuously updated during task execution. Note that 522aab03e05SDario Faggioli * the remaining runtime could be < 0 in case we are in overrun. 523aab03e05SDario Faggioli */ 5245eca1c10SIngo Molnar s64 runtime; /* Remaining runtime for this instance */ 5255eca1c10SIngo Molnar u64 deadline; /* Absolute deadline for this instance */ 5265eca1c10SIngo Molnar unsigned int flags; /* Specifying the scheduler behaviour */ 527aab03e05SDario Faggioli 528aab03e05SDario Faggioli /* 529aab03e05SDario Faggioli * Some bool flags: 530aab03e05SDario Faggioli * 531aab03e05SDario Faggioli * @dl_throttled tells if we exhausted the runtime. If so, the 532aab03e05SDario Faggioli * task has to wait for a replenishment to be performed at the 533aab03e05SDario Faggioli * next firing of dl_timer. 534aab03e05SDario Faggioli * 5352d3d891dSDario Faggioli * @dl_boosted tells if we are boosted due to DI. If so we are 5362d3d891dSDario Faggioli * outside bandwidth enforcement mechanism (but only until we 5375bfd126eSJuri Lelli * exit the critical section); 5385bfd126eSJuri Lelli * 5395eca1c10SIngo Molnar * @dl_yielded tells if task gave up the CPU before consuming 5405bfd126eSJuri Lelli * all its available runtime during the last job. 541209a0cbdSLuca Abeni * 542209a0cbdSLuca Abeni * @dl_non_contending tells if the task is inactive while still 543209a0cbdSLuca Abeni * contributing to the active utilization. In other words, it 544209a0cbdSLuca Abeni * indicates if the inactive timer has been armed and its handler 545209a0cbdSLuca Abeni * has not been executed yet. This flag is useful to avoid race 546209a0cbdSLuca Abeni * conditions between the inactive timer handler and the wakeup 547209a0cbdSLuca Abeni * code. 54834be3930SJuri Lelli * 54934be3930SJuri Lelli * @dl_overrun tells if the task asked to be informed about runtime 55034be3930SJuri Lelli * overruns. 551aab03e05SDario Faggioli */ 552aa5222e9SDan Carpenter unsigned int dl_throttled : 1; 553aa5222e9SDan Carpenter unsigned int dl_boosted : 1; 554aa5222e9SDan Carpenter unsigned int dl_yielded : 1; 555aa5222e9SDan Carpenter unsigned int dl_non_contending : 1; 55634be3930SJuri Lelli unsigned int dl_overrun : 1; 557aab03e05SDario Faggioli 558aab03e05SDario Faggioli /* 559aab03e05SDario Faggioli * Bandwidth enforcement timer. Each -deadline task has its 560aab03e05SDario Faggioli * own bandwidth to be enforced, thus we need one timer per task. 561aab03e05SDario Faggioli */ 562aab03e05SDario Faggioli struct hrtimer dl_timer; 563209a0cbdSLuca Abeni 564209a0cbdSLuca Abeni /* 565209a0cbdSLuca Abeni * Inactive timer, responsible for decreasing the active utilization 566209a0cbdSLuca Abeni * at the "0-lag time". When a -deadline task blocks, it contributes 567209a0cbdSLuca Abeni * to GRUB's active utilization until the "0-lag time", hence a 568209a0cbdSLuca Abeni * timer is needed to decrease the active utilization at the correct 569209a0cbdSLuca Abeni * time. 570209a0cbdSLuca Abeni */ 571209a0cbdSLuca Abeni struct hrtimer inactive_timer; 572aab03e05SDario Faggioli }; 5738bd75c77SClark Williams 57469842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 57569842cbaSPatrick Bellasi /* Number of utilization clamp buckets (shorter alias) */ 57669842cbaSPatrick Bellasi #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT 57769842cbaSPatrick Bellasi 57869842cbaSPatrick Bellasi /* 57969842cbaSPatrick Bellasi * Utilization clamp for a scheduling entity 58069842cbaSPatrick Bellasi * @value: clamp value "assigned" to a se 58169842cbaSPatrick Bellasi * @bucket_id: bucket index corresponding to the "assigned" value 582e8f14172SPatrick Bellasi * @active: the se is currently refcounted in a rq's bucket 583a509a7cdSPatrick Bellasi * @user_defined: the requested clamp value comes from user-space 58469842cbaSPatrick Bellasi * 58569842cbaSPatrick Bellasi * The bucket_id is the index of the clamp bucket matching the clamp value 58669842cbaSPatrick Bellasi * which is pre-computed and stored to avoid expensive integer divisions from 58769842cbaSPatrick Bellasi * the fast path. 588e8f14172SPatrick Bellasi * 589e8f14172SPatrick Bellasi * The active bit is set whenever a task has got an "effective" value assigned, 590e8f14172SPatrick Bellasi * which can be different from the clamp value "requested" from user-space. 591e8f14172SPatrick Bellasi * This allows to know a task is refcounted in the rq's bucket corresponding 592e8f14172SPatrick Bellasi * to the "effective" bucket_id. 593a509a7cdSPatrick Bellasi * 594a509a7cdSPatrick Bellasi * The user_defined bit is set whenever a task has got a task-specific clamp 595a509a7cdSPatrick Bellasi * value requested from userspace, i.e. the system defaults apply to this task 596a509a7cdSPatrick Bellasi * just as a restriction. This allows to relax default clamps when a less 597a509a7cdSPatrick Bellasi * restrictive task-specific value has been requested, thus allowing to 598a509a7cdSPatrick Bellasi * implement a "nice" semantic. For example, a task running with a 20% 599a509a7cdSPatrick Bellasi * default boost can still drop its own boosting to 0%. 60069842cbaSPatrick Bellasi */ 60169842cbaSPatrick Bellasi struct uclamp_se { 60269842cbaSPatrick Bellasi unsigned int value : bits_per(SCHED_CAPACITY_SCALE); 60369842cbaSPatrick Bellasi unsigned int bucket_id : bits_per(UCLAMP_BUCKETS); 604e8f14172SPatrick Bellasi unsigned int active : 1; 605a509a7cdSPatrick Bellasi unsigned int user_defined : 1; 60669842cbaSPatrick Bellasi }; 60769842cbaSPatrick Bellasi #endif /* CONFIG_UCLAMP_TASK */ 60869842cbaSPatrick Bellasi 6091d082fd0SPaul E. McKenney union rcu_special { 6101d082fd0SPaul E. McKenney struct { 6118203d6d0SPaul E. McKenney u8 blocked; 6128203d6d0SPaul E. McKenney u8 need_qs; 61305f41571SPaul E. McKenney u8 exp_hint; /* Hint for performance. */ 614276c4104SPaul E. McKenney u8 need_mb; /* Readers need smp_mb(). */ 6158203d6d0SPaul E. McKenney } b; /* Bits. */ 61605f41571SPaul E. McKenney u32 s; /* Set of bits. */ 6171d082fd0SPaul E. McKenney }; 61886848966SPaul E. McKenney 6198dc85d54SPeter Zijlstra enum perf_event_task_context { 6208dc85d54SPeter Zijlstra perf_invalid_context = -1, 6218dc85d54SPeter Zijlstra perf_hw_context = 0, 62289a1e187SPeter Zijlstra perf_sw_context, 6238dc85d54SPeter Zijlstra perf_nr_task_contexts, 6248dc85d54SPeter Zijlstra }; 6258dc85d54SPeter Zijlstra 626eb61baf6SIngo Molnar struct wake_q_node { 627eb61baf6SIngo Molnar struct wake_q_node *next; 628eb61baf6SIngo Molnar }; 629eb61baf6SIngo Molnar 6301da177e4SLinus Torvalds struct task_struct { 631c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 632c65eacbeSAndy Lutomirski /* 633c65eacbeSAndy Lutomirski * For reasons of header soup (see current_thread_info()), this 634c65eacbeSAndy Lutomirski * must be the first element of task_struct. 635c65eacbeSAndy Lutomirski */ 636c65eacbeSAndy Lutomirski struct thread_info thread_info; 637c65eacbeSAndy Lutomirski #endif 6385eca1c10SIngo Molnar /* -1 unrunnable, 0 runnable, >0 stopped: */ 6395eca1c10SIngo Molnar volatile long state; 64029e48ce8SKees Cook 64129e48ce8SKees Cook /* 64229e48ce8SKees Cook * This begins the randomizable portion of task_struct. Only 64329e48ce8SKees Cook * scheduling-critical items should be added above here. 64429e48ce8SKees Cook */ 64529e48ce8SKees Cook randomized_struct_fields_start 64629e48ce8SKees Cook 647f7e4217bSRoman Zippel void *stack; 648ec1d2819SElena Reshetova refcount_t usage; 6495eca1c10SIngo Molnar /* Per task flags (PF_*), defined further below: */ 6505eca1c10SIngo Molnar unsigned int flags; 65197dc32cdSWilliam Cohen unsigned int ptrace; 6521da177e4SLinus Torvalds 6532dd73a4fSPeter Williams #ifdef CONFIG_SMP 6543ca7a440SPeter Zijlstra int on_cpu; 6558c4890d1SPeter Zijlstra struct __call_single_node wake_entry; 656c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 6575eca1c10SIngo Molnar /* Current CPU: */ 6585eca1c10SIngo Molnar unsigned int cpu; 659c65eacbeSAndy Lutomirski #endif 66063b0e9edSMike Galbraith unsigned int wakee_flips; 66162470419SMichael Wang unsigned long wakee_flip_decay_ts; 66263b0e9edSMike Galbraith struct task_struct *last_wakee; 663ac66f547SPeter Zijlstra 66432e839ddSMel Gorman /* 66532e839ddSMel Gorman * recent_used_cpu is initially set as the last CPU used by a task 66632e839ddSMel Gorman * that wakes affine another task. Waker/wakee relationships can 66732e839ddSMel Gorman * push tasks around a CPU where each wakeup moves to the next one. 66832e839ddSMel Gorman * Tracking a recently used CPU allows a quick search for a recently 66932e839ddSMel Gorman * used CPU that may be idle. 67032e839ddSMel Gorman */ 67132e839ddSMel Gorman int recent_used_cpu; 672ac66f547SPeter Zijlstra int wake_cpu; 6734866cde0SNick Piggin #endif 674fd2f4419SPeter Zijlstra int on_rq; 67550e645a8SIngo Molnar 6765eca1c10SIngo Molnar int prio; 6775eca1c10SIngo Molnar int static_prio; 6785eca1c10SIngo Molnar int normal_prio; 679c7aceabaSRichard Kennedy unsigned int rt_priority; 6805eca1c10SIngo Molnar 6815522d5d5SIngo Molnar const struct sched_class *sched_class; 68220b8a59fSIngo Molnar struct sched_entity se; 683fa717060SPeter Zijlstra struct sched_rt_entity rt; 6848323f26cSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED 6858323f26cSPeter Zijlstra struct task_group *sched_task_group; 6868323f26cSPeter Zijlstra #endif 687aab03e05SDario Faggioli struct sched_dl_entity dl; 6881da177e4SLinus Torvalds 68969842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 690e8f14172SPatrick Bellasi /* Clamp values requested for a scheduling entity */ 691e8f14172SPatrick Bellasi struct uclamp_se uclamp_req[UCLAMP_CNT]; 692e8f14172SPatrick Bellasi /* Effective clamp values used for a scheduling entity */ 69369842cbaSPatrick Bellasi struct uclamp_se uclamp[UCLAMP_CNT]; 69469842cbaSPatrick Bellasi #endif 69569842cbaSPatrick Bellasi 696e107be36SAvi Kivity #ifdef CONFIG_PREEMPT_NOTIFIERS 6975eca1c10SIngo Molnar /* List of struct preempt_notifier: */ 698e107be36SAvi Kivity struct hlist_head preempt_notifiers; 699e107be36SAvi Kivity #endif 700e107be36SAvi Kivity 7016c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 7022056a782SJens Axboe unsigned int btrace_seq; 7036c5c9341SAlexey Dobriyan #endif 7041da177e4SLinus Torvalds 70597dc32cdSWilliam Cohen unsigned int policy; 70629baa747SPeter Zijlstra int nr_cpus_allowed; 7073bd37062SSebastian Andrzej Siewior const cpumask_t *cpus_ptr; 7083bd37062SSebastian Andrzej Siewior cpumask_t cpus_mask; 7091da177e4SLinus Torvalds 710a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU 711e260be67SPaul E. McKenney int rcu_read_lock_nesting; 7121d082fd0SPaul E. McKenney union rcu_special rcu_read_unlock_special; 713f41d911fSPaul E. McKenney struct list_head rcu_node_entry; 714a57eb940SPaul E. McKenney struct rcu_node *rcu_blocked_node; 71528f6569aSPranith Kumar #endif /* #ifdef CONFIG_PREEMPT_RCU */ 7165eca1c10SIngo Molnar 7178315f422SPaul E. McKenney #ifdef CONFIG_TASKS_RCU 7188315f422SPaul E. McKenney unsigned long rcu_tasks_nvcsw; 719ccdd29ffSPaul E. McKenney u8 rcu_tasks_holdout; 720ccdd29ffSPaul E. McKenney u8 rcu_tasks_idx; 721176f8f7aSPaul E. McKenney int rcu_tasks_idle_cpu; 722ccdd29ffSPaul E. McKenney struct list_head rcu_tasks_holdout_list; 7238315f422SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_RCU */ 724e260be67SPaul E. McKenney 725d5f177d3SPaul E. McKenney #ifdef CONFIG_TASKS_TRACE_RCU 726d5f177d3SPaul E. McKenney int trc_reader_nesting; 727d5f177d3SPaul E. McKenney int trc_ipi_to_cpu; 728276c4104SPaul E. McKenney union rcu_special trc_reader_special; 729d5f177d3SPaul E. McKenney bool trc_reader_checked; 730d5f177d3SPaul E. McKenney struct list_head trc_holdout_list; 731d5f177d3SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ 732d5f177d3SPaul E. McKenney 7331da177e4SLinus Torvalds struct sched_info sched_info; 7341da177e4SLinus Torvalds 7351da177e4SLinus Torvalds struct list_head tasks; 736806c09a7SDario Faggioli #ifdef CONFIG_SMP 737917b627dSGregory Haskins struct plist_node pushable_tasks; 7381baca4ceSJuri Lelli struct rb_node pushable_dl_tasks; 739806c09a7SDario Faggioli #endif 7401da177e4SLinus Torvalds 7415eca1c10SIngo Molnar struct mm_struct *mm; 7425eca1c10SIngo Molnar struct mm_struct *active_mm; 743314ff785SIngo Molnar 744314ff785SIngo Molnar /* Per-thread vma caching: */ 745314ff785SIngo Molnar struct vmacache vmacache; 746314ff785SIngo Molnar 7475eca1c10SIngo Molnar #ifdef SPLIT_RSS_COUNTING 74834e55232SKAMEZAWA Hiroyuki struct task_rss_stat rss_stat; 74934e55232SKAMEZAWA Hiroyuki #endif 75097dc32cdSWilliam Cohen int exit_state; 7515eca1c10SIngo Molnar int exit_code; 7525eca1c10SIngo Molnar int exit_signal; 7535eca1c10SIngo Molnar /* The signal sent when the parent dies: */ 7545eca1c10SIngo Molnar int pdeath_signal; 7555eca1c10SIngo Molnar /* JOBCTL_*, siglock protected: */ 7565eca1c10SIngo Molnar unsigned long jobctl; 7579b89f6baSAndrei Epure 7585eca1c10SIngo Molnar /* Used for emulating ABI behavior of previous Linux versions: */ 75997dc32cdSWilliam Cohen unsigned int personality; 7609b89f6baSAndrei Epure 7615eca1c10SIngo Molnar /* Scheduler bits, serialized by scheduler locks: */ 762ca94c442SLennart Poettering unsigned sched_reset_on_fork:1; 763a8e4f2eaSPeter Zijlstra unsigned sched_contributes_to_load:1; 764ff303e66SPeter Zijlstra unsigned sched_migrated:1; 765b7e7ade3SPeter Zijlstra unsigned sched_remote_wakeup:1; 766eb414681SJohannes Weiner #ifdef CONFIG_PSI 767eb414681SJohannes Weiner unsigned sched_psi_wake_requeue:1; 768eb414681SJohannes Weiner #endif 769eb414681SJohannes Weiner 7705eca1c10SIngo Molnar /* Force alignment to the next boundary: */ 7715eca1c10SIngo Molnar unsigned :0; 772be958bdcSPeter Zijlstra 7735eca1c10SIngo Molnar /* Unserialized, strictly 'current' */ 7745eca1c10SIngo Molnar 7755eca1c10SIngo Molnar /* Bit to tell LSMs we're in execve(): */ 7765eca1c10SIngo Molnar unsigned in_execve:1; 777be958bdcSPeter Zijlstra unsigned in_iowait:1; 7785eca1c10SIngo Molnar #ifndef TIF_RESTORE_SIGMASK 7797e781418SAndy Lutomirski unsigned restore_sigmask:1; 7807e781418SAndy Lutomirski #endif 781626ebc41STejun Heo #ifdef CONFIG_MEMCG 78229ef680aSMichal Hocko unsigned in_user_fault:1; 783127424c8SJohannes Weiner #endif 784ff303e66SPeter Zijlstra #ifdef CONFIG_COMPAT_BRK 785ff303e66SPeter Zijlstra unsigned brk_randomized:1; 786ff303e66SPeter Zijlstra #endif 78777f88796STejun Heo #ifdef CONFIG_CGROUPS 78877f88796STejun Heo /* disallow userland-initiated cgroup migration */ 78977f88796STejun Heo unsigned no_cgroup_migration:1; 79076f969e8SRoman Gushchin /* task is frozen/stopped (used by the cgroup freezer) */ 79176f969e8SRoman Gushchin unsigned frozen:1; 79277f88796STejun Heo #endif 793d09d8df3SJosef Bacik #ifdef CONFIG_BLK_CGROUP 794d09d8df3SJosef Bacik unsigned use_memdelay:1; 795d09d8df3SJosef Bacik #endif 7961066d1b6SYafang Shao #ifdef CONFIG_PSI 7971066d1b6SYafang Shao /* Stalled due to lack of memory */ 7981066d1b6SYafang Shao unsigned in_memstall:1; 7991066d1b6SYafang Shao #endif 8006f185c29SVladimir Davydov 8015eca1c10SIngo Molnar unsigned long atomic_flags; /* Flags requiring atomic access. */ 8021d4457f9SKees Cook 803f56141e3SAndy Lutomirski struct restart_block restart_block; 804f56141e3SAndy Lutomirski 8051da177e4SLinus Torvalds pid_t pid; 8061da177e4SLinus Torvalds pid_t tgid; 8070a425405SArjan van de Ven 808050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR 8095eca1c10SIngo Molnar /* Canary value for the -fstack-protector GCC feature: */ 8100a425405SArjan van de Ven unsigned long stack_canary; 8111314562aSHiroshi Shimamoto #endif 8121da177e4SLinus Torvalds /* 8135eca1c10SIngo Molnar * Pointers to the (original) parent process, youngest child, younger sibling, 8141da177e4SLinus Torvalds * older sibling, respectively. (p->father can be replaced with 815f470021aSRoland McGrath * p->real_parent->pid) 8161da177e4SLinus Torvalds */ 8175eca1c10SIngo Molnar 8185eca1c10SIngo Molnar /* Real parent process: */ 8195eca1c10SIngo Molnar struct task_struct __rcu *real_parent; 8205eca1c10SIngo Molnar 8215eca1c10SIngo Molnar /* Recipient of SIGCHLD, wait4() reports: */ 8225eca1c10SIngo Molnar struct task_struct __rcu *parent; 8231da177e4SLinus Torvalds 824f470021aSRoland McGrath /* 8255eca1c10SIngo Molnar * Children/sibling form the list of natural children: 8265eca1c10SIngo Molnar */ 8275eca1c10SIngo Molnar struct list_head children; 8285eca1c10SIngo Molnar struct list_head sibling; 8295eca1c10SIngo Molnar struct task_struct *group_leader; 8305eca1c10SIngo Molnar 8315eca1c10SIngo Molnar /* 8325eca1c10SIngo Molnar * 'ptraced' is the list of tasks this task is using ptrace() on. 8335eca1c10SIngo Molnar * 834f470021aSRoland McGrath * This includes both natural children and PTRACE_ATTACH targets. 8355eca1c10SIngo Molnar * 'ptrace_entry' is this task's link on the p->parent->ptraced list. 836f470021aSRoland McGrath */ 837f470021aSRoland McGrath struct list_head ptraced; 838f470021aSRoland McGrath struct list_head ptrace_entry; 839f470021aSRoland McGrath 8401da177e4SLinus Torvalds /* PID/PID hash table linkage. */ 8412c470475SEric W. Biederman struct pid *thread_pid; 8422c470475SEric W. Biederman struct hlist_node pid_links[PIDTYPE_MAX]; 84347e65328SOleg Nesterov struct list_head thread_group; 8440c740d0aSOleg Nesterov struct list_head thread_node; 8451da177e4SLinus Torvalds 8465eca1c10SIngo Molnar struct completion *vfork_done; 8471da177e4SLinus Torvalds 8485eca1c10SIngo Molnar /* CLONE_CHILD_SETTID: */ 8495eca1c10SIngo Molnar int __user *set_child_tid; 8505eca1c10SIngo Molnar 8515eca1c10SIngo Molnar /* CLONE_CHILD_CLEARTID: */ 8525eca1c10SIngo Molnar int __user *clear_child_tid; 8535eca1c10SIngo Molnar 8545eca1c10SIngo Molnar u64 utime; 8555eca1c10SIngo Molnar u64 stime; 85640565b5aSStanislaw Gruszka #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 8575eca1c10SIngo Molnar u64 utimescaled; 8585eca1c10SIngo Molnar u64 stimescaled; 85940565b5aSStanislaw Gruszka #endif 86016a6d9beSFrederic Weisbecker u64 gtime; 8619d7fb042SPeter Zijlstra struct prev_cputime prev_cputime; 8626a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 863bac5b6b6SFrederic Weisbecker struct vtime vtime; 8646a61671bSFrederic Weisbecker #endif 865d027d45dSFrederic Weisbecker 866d027d45dSFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL 867f009a7a7SFrederic Weisbecker atomic_t tick_dep_mask; 868d027d45dSFrederic Weisbecker #endif 8695eca1c10SIngo Molnar /* Context switch counts: */ 8705eca1c10SIngo Molnar unsigned long nvcsw; 8715eca1c10SIngo Molnar unsigned long nivcsw; 8725eca1c10SIngo Molnar 8735eca1c10SIngo Molnar /* Monotonic time in nsecs: */ 8745eca1c10SIngo Molnar u64 start_time; 8755eca1c10SIngo Molnar 8765eca1c10SIngo Molnar /* Boot based time in nsecs: */ 877cf25e24dSPeter Zijlstra u64 start_boottime; 8785eca1c10SIngo Molnar 8795eca1c10SIngo Molnar /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ 8805eca1c10SIngo Molnar unsigned long min_flt; 8815eca1c10SIngo Molnar unsigned long maj_flt; 8821da177e4SLinus Torvalds 8832b69942fSThomas Gleixner /* Empty if CONFIG_POSIX_CPUTIMERS=n */ 8842b69942fSThomas Gleixner struct posix_cputimers posix_cputimers; 8851da177e4SLinus Torvalds 8865eca1c10SIngo Molnar /* Process credentials: */ 8875eca1c10SIngo Molnar 8885eca1c10SIngo Molnar /* Tracer's credentials at attach: */ 8895eca1c10SIngo Molnar const struct cred __rcu *ptracer_cred; 8905eca1c10SIngo Molnar 8915eca1c10SIngo Molnar /* Objective and real subjective task credentials (COW): */ 8925eca1c10SIngo Molnar const struct cred __rcu *real_cred; 8935eca1c10SIngo Molnar 8945eca1c10SIngo Molnar /* Effective (overridable) subjective task credentials (COW): */ 8955eca1c10SIngo Molnar const struct cred __rcu *cred; 8965eca1c10SIngo Molnar 8977743c48eSDavid Howells #ifdef CONFIG_KEYS 8987743c48eSDavid Howells /* Cached requested key. */ 8997743c48eSDavid Howells struct key *cached_requested_key; 9007743c48eSDavid Howells #endif 9017743c48eSDavid Howells 9025eca1c10SIngo Molnar /* 9035eca1c10SIngo Molnar * executable name, excluding path. 9045eca1c10SIngo Molnar * 9055eca1c10SIngo Molnar * - normally initialized setup_new_exec() 9065eca1c10SIngo Molnar * - access it with [gs]et_task_comm() 9075eca1c10SIngo Molnar * - lock it with task_lock() 9085eca1c10SIngo Molnar */ 9095eca1c10SIngo Molnar char comm[TASK_COMM_LEN]; 9105eca1c10SIngo Molnar 911756daf26SNeilBrown struct nameidata *nameidata; 9125eca1c10SIngo Molnar 9133d5b6fccSAlexey Dobriyan #ifdef CONFIG_SYSVIPC 9141da177e4SLinus Torvalds struct sysv_sem sysvsem; 915ab602f79SJack Miller struct sysv_shm sysvshm; 9163d5b6fccSAlexey Dobriyan #endif 917e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK 91882a1fcb9SIngo Molnar unsigned long last_switch_count; 919a2e51445SDmitry Vyukov unsigned long last_switch_time; 92082a1fcb9SIngo Molnar #endif 9215eca1c10SIngo Molnar /* Filesystem information: */ 9221da177e4SLinus Torvalds struct fs_struct *fs; 9235eca1c10SIngo Molnar 9245eca1c10SIngo Molnar /* Open file information: */ 9251da177e4SLinus Torvalds struct files_struct *files; 9265eca1c10SIngo Molnar 9275eca1c10SIngo Molnar /* Namespaces: */ 928ab516013SSerge E. Hallyn struct nsproxy *nsproxy; 9295eca1c10SIngo Molnar 9305eca1c10SIngo Molnar /* Signal handlers: */ 9311da177e4SLinus Torvalds struct signal_struct *signal; 932913292c9SMadhuparna Bhowmik struct sighand_struct __rcu *sighand; 9335eca1c10SIngo Molnar sigset_t blocked; 9345eca1c10SIngo Molnar sigset_t real_blocked; 9355eca1c10SIngo Molnar /* Restored if set_restore_sigmask() was used: */ 9365eca1c10SIngo Molnar sigset_t saved_sigmask; 9371da177e4SLinus Torvalds struct sigpending pending; 9381da177e4SLinus Torvalds unsigned long sas_ss_sp; 9391da177e4SLinus Torvalds size_t sas_ss_size; 9405eca1c10SIngo Molnar unsigned int sas_ss_flags; 9412e01fabeSOleg Nesterov 94267d12145SAl Viro struct callback_head *task_works; 943e73f8959SOleg Nesterov 9444b7d248bSRichard Guy Briggs #ifdef CONFIG_AUDIT 945bfef93a5SAl Viro #ifdef CONFIG_AUDITSYSCALL 9465f3d544fSRichard Guy Briggs struct audit_context *audit_context; 9475f3d544fSRichard Guy Briggs #endif 948e1760bd5SEric W. Biederman kuid_t loginuid; 9494746ec5bSEric Paris unsigned int sessionid; 950bfef93a5SAl Viro #endif 951932ecebbSWill Drewry struct seccomp seccomp; 9521da177e4SLinus Torvalds 9535eca1c10SIngo Molnar /* Thread group tracking: */ 954d1e7fd64SEric W. Biederman u64 parent_exec_id; 955d1e7fd64SEric W. Biederman u64 self_exec_id; 9565eca1c10SIngo Molnar 9575eca1c10SIngo Molnar /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ 9581da177e4SLinus Torvalds spinlock_t alloc_lock; 9591da177e4SLinus Torvalds 960b29739f9SIngo Molnar /* Protection of the PI data structures: */ 9611d615482SThomas Gleixner raw_spinlock_t pi_lock; 962b29739f9SIngo Molnar 96376751049SPeter Zijlstra struct wake_q_node wake_q; 96476751049SPeter Zijlstra 96523f78d4aSIngo Molnar #ifdef CONFIG_RT_MUTEXES 9665eca1c10SIngo Molnar /* PI waiters blocked on a rt_mutex held by this task: */ 967a23ba907SDavidlohr Bueso struct rb_root_cached pi_waiters; 968e96a7705SXunlei Pang /* Updated under owner's pi_lock and rq lock */ 969e96a7705SXunlei Pang struct task_struct *pi_top_task; 9705eca1c10SIngo Molnar /* Deadlock detection and priority inheritance handling: */ 97123f78d4aSIngo Molnar struct rt_mutex_waiter *pi_blocked_on; 97223f78d4aSIngo Molnar #endif 97323f78d4aSIngo Molnar 974408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES 9755eca1c10SIngo Molnar /* Mutex deadlock detection: */ 976408894eeSIngo Molnar struct mutex_waiter *blocked_on; 977408894eeSIngo Molnar #endif 9785eca1c10SIngo Molnar 979312364f3SDaniel Vetter #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 980312364f3SDaniel Vetter int non_block_count; 981312364f3SDaniel Vetter #endif 982312364f3SDaniel Vetter 983de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS 984*0584df9cSMarco Elver struct irqtrace_events irqtrace; 985de8f5e4fSPeter Zijlstra unsigned int hardirq_threaded; 986c86e9b98SPeter Zijlstra u64 hardirq_chain_key; 987fa1452e8SHiroshi Shimamoto int softirqs_enabled; 988de30a2b3SIngo Molnar int softirq_context; 98940db1739SSebastian Andrzej Siewior int irq_config; 990de30a2b3SIngo Molnar #endif 9915eca1c10SIngo Molnar 992fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP 993bdb9441eSPeter Zijlstra # define MAX_LOCK_DEPTH 48UL 994fbb9ce95SIngo Molnar u64 curr_chain_key; 995fbb9ce95SIngo Molnar int lockdep_depth; 996fbb9ce95SIngo Molnar unsigned int lockdep_recursion; 997c7aceabaSRichard Kennedy struct held_lock held_locks[MAX_LOCK_DEPTH]; 998fbb9ce95SIngo Molnar #endif 9995eca1c10SIngo Molnar 1000c6d30853SAndrey Ryabinin #ifdef CONFIG_UBSAN 1001c6d30853SAndrey Ryabinin unsigned int in_ubsan; 1002c6d30853SAndrey Ryabinin #endif 1003408894eeSIngo Molnar 10045eca1c10SIngo Molnar /* Journalling filesystem info: */ 10051da177e4SLinus Torvalds void *journal_info; 10061da177e4SLinus Torvalds 10075eca1c10SIngo Molnar /* Stacked block device info: */ 1008bddd87c7SAkinobu Mita struct bio_list *bio_list; 1009d89d8796SNeil Brown 101073c10101SJens Axboe #ifdef CONFIG_BLOCK 10115eca1c10SIngo Molnar /* Stack plugging: */ 101273c10101SJens Axboe struct blk_plug *plug; 101373c10101SJens Axboe #endif 101473c10101SJens Axboe 10155eca1c10SIngo Molnar /* VM state: */ 10161da177e4SLinus Torvalds struct reclaim_state *reclaim_state; 10171da177e4SLinus Torvalds 10181da177e4SLinus Torvalds struct backing_dev_info *backing_dev_info; 10191da177e4SLinus Torvalds 10201da177e4SLinus Torvalds struct io_context *io_context; 10211da177e4SLinus Torvalds 10225e1f0f09SMel Gorman #ifdef CONFIG_COMPACTION 10235e1f0f09SMel Gorman struct capture_control *capture_control; 10245e1f0f09SMel Gorman #endif 10255eca1c10SIngo Molnar /* Ptrace state: */ 10261da177e4SLinus Torvalds unsigned long ptrace_message; 1027ae7795bcSEric W. Biederman kernel_siginfo_t *last_siginfo; 10285eca1c10SIngo Molnar 10297c3ab738SAndrew Morton struct task_io_accounting ioac; 1030eb414681SJohannes Weiner #ifdef CONFIG_PSI 1031eb414681SJohannes Weiner /* Pressure stall state */ 1032eb414681SJohannes Weiner unsigned int psi_flags; 1033eb414681SJohannes Weiner #endif 10345eca1c10SIngo Molnar #ifdef CONFIG_TASK_XACCT 10355eca1c10SIngo Molnar /* Accumulated RSS usage: */ 10365eca1c10SIngo Molnar u64 acct_rss_mem1; 10375eca1c10SIngo Molnar /* Accumulated virtual memory usage: */ 10385eca1c10SIngo Molnar u64 acct_vm_mem1; 10395eca1c10SIngo Molnar /* stime + utime since last update: */ 10405eca1c10SIngo Molnar u64 acct_timexpd; 10411da177e4SLinus Torvalds #endif 10421da177e4SLinus Torvalds #ifdef CONFIG_CPUSETS 10435eca1c10SIngo Molnar /* Protected by ->alloc_lock: */ 10445eca1c10SIngo Molnar nodemask_t mems_allowed; 10455eca1c10SIngo Molnar /* Seqence number to catch updates: */ 10465eca1c10SIngo Molnar seqcount_t mems_allowed_seq; 1047825a46afSPaul Jackson int cpuset_mem_spread_rotor; 10486adef3ebSJack Steiner int cpuset_slab_spread_rotor; 10491da177e4SLinus Torvalds #endif 1050ddbcc7e8SPaul Menage #ifdef CONFIG_CGROUPS 10515eca1c10SIngo Molnar /* Control Group info protected by css_set_lock: */ 10522c392b8cSArnd Bergmann struct css_set __rcu *cgroups; 10535eca1c10SIngo Molnar /* cg_list protected by css_set_lock and tsk->alloc_lock: */ 1054817929ecSPaul Menage struct list_head cg_list; 1055ddbcc7e8SPaul Menage #endif 1056e6d42931SJohannes Weiner #ifdef CONFIG_X86_CPU_RESCTRL 10570734ded1SVikas Shivappa u32 closid; 1058d6aaba61SVikas Shivappa u32 rmid; 1059e02737d5SFenghua Yu #endif 106042b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX 10610771dfefSIngo Molnar struct robust_list_head __user *robust_list; 106234f192c6SIngo Molnar #ifdef CONFIG_COMPAT 106334f192c6SIngo Molnar struct compat_robust_list_head __user *compat_robust_list; 106434f192c6SIngo Molnar #endif 1065c87e2837SIngo Molnar struct list_head pi_state_list; 1066c87e2837SIngo Molnar struct futex_pi_state *pi_state_cache; 10673f186d97SThomas Gleixner struct mutex futex_exit_mutex; 10683d4775dfSThomas Gleixner unsigned int futex_state; 106942b2dd0aSAlexey Dobriyan #endif 1070cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS 10718dc85d54SPeter Zijlstra struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; 1072cdd6c482SIngo Molnar struct mutex perf_event_mutex; 1073cdd6c482SIngo Molnar struct list_head perf_event_list; 1074a63eaf34SPaul Mackerras #endif 10758f47b187SThomas Gleixner #ifdef CONFIG_DEBUG_PREEMPT 10768f47b187SThomas Gleixner unsigned long preempt_disable_ip; 10778f47b187SThomas Gleixner #endif 1078c7aceabaSRichard Kennedy #ifdef CONFIG_NUMA 10795eca1c10SIngo Molnar /* Protected by alloc_lock: */ 10805eca1c10SIngo Molnar struct mempolicy *mempolicy; 108145816682SVlastimil Babka short il_prev; 1082207205a2SEric Dumazet short pref_node_fork; 1083c7aceabaSRichard Kennedy #endif 1084cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING 1085cbee9f88SPeter Zijlstra int numa_scan_seq; 1086cbee9f88SPeter Zijlstra unsigned int numa_scan_period; 1087598f0ec0SMel Gorman unsigned int numa_scan_period_max; 1088de1c9ce6SRik van Riel int numa_preferred_nid; 10896b9a7460SMel Gorman unsigned long numa_migrate_retry; 10905eca1c10SIngo Molnar /* Migration stamp: */ 10915eca1c10SIngo Molnar u64 node_stamp; 10927e2703e6SRik van Riel u64 last_task_numa_placement; 10937e2703e6SRik van Riel u64 last_sum_exec_runtime; 1094cbee9f88SPeter Zijlstra struct callback_head numa_work; 1095f809ca9aSMel Gorman 1096cb361d8cSJann Horn /* 1097cb361d8cSJann Horn * This pointer is only modified for current in syscall and 1098cb361d8cSJann Horn * pagefault context (and for tasks being destroyed), so it can be read 1099cb361d8cSJann Horn * from any of the following contexts: 1100cb361d8cSJann Horn * - RCU read-side critical section 1101cb361d8cSJann Horn * - current->numa_group from everywhere 1102cb361d8cSJann Horn * - task's runqueue locked, task not running 1103cb361d8cSJann Horn */ 1104cb361d8cSJann Horn struct numa_group __rcu *numa_group; 11058c8a743cSPeter Zijlstra 1106745d6147SMel Gorman /* 110744dba3d5SIulia Manda * numa_faults is an array split into four regions: 110844dba3d5SIulia Manda * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer 110944dba3d5SIulia Manda * in this precise order. 111044dba3d5SIulia Manda * 111144dba3d5SIulia Manda * faults_memory: Exponential decaying average of faults on a per-node 111244dba3d5SIulia Manda * basis. Scheduling placement decisions are made based on these 111344dba3d5SIulia Manda * counts. The values remain static for the duration of a PTE scan. 111444dba3d5SIulia Manda * faults_cpu: Track the nodes the process was running on when a NUMA 111544dba3d5SIulia Manda * hinting fault was incurred. 111644dba3d5SIulia Manda * faults_memory_buffer and faults_cpu_buffer: Record faults per node 111744dba3d5SIulia Manda * during the current scan window. When the scan completes, the counts 111844dba3d5SIulia Manda * in faults_memory and faults_cpu decay and these values are copied. 1119745d6147SMel Gorman */ 112044dba3d5SIulia Manda unsigned long *numa_faults; 112183e1d2cdSMel Gorman unsigned long total_numa_faults; 1122745d6147SMel Gorman 1123745d6147SMel Gorman /* 112404bb2f94SRik van Riel * numa_faults_locality tracks if faults recorded during the last 1125074c2381SMel Gorman * scan window were remote/local or failed to migrate. The task scan 1126074c2381SMel Gorman * period is adapted based on the locality of the faults with different 1127074c2381SMel Gorman * weights depending on whether they were shared or private faults 112804bb2f94SRik van Riel */ 1129074c2381SMel Gorman unsigned long numa_faults_locality[3]; 113004bb2f94SRik van Riel 1131b32e86b4SIngo Molnar unsigned long numa_pages_migrated; 1132cbee9f88SPeter Zijlstra #endif /* CONFIG_NUMA_BALANCING */ 1133cbee9f88SPeter Zijlstra 1134d7822b1eSMathieu Desnoyers #ifdef CONFIG_RSEQ 1135d7822b1eSMathieu Desnoyers struct rseq __user *rseq; 1136d7822b1eSMathieu Desnoyers u32 rseq_sig; 1137d7822b1eSMathieu Desnoyers /* 1138d7822b1eSMathieu Desnoyers * RmW on rseq_event_mask must be performed atomically 1139d7822b1eSMathieu Desnoyers * with respect to preemption. 1140d7822b1eSMathieu Desnoyers */ 1141d7822b1eSMathieu Desnoyers unsigned long rseq_event_mask; 1142d7822b1eSMathieu Desnoyers #endif 1143d7822b1eSMathieu Desnoyers 114472b252aeSMel Gorman struct tlbflush_unmap_batch tlb_ubc; 114572b252aeSMel Gorman 11463fbd7ee2SEric W. Biederman union { 11473fbd7ee2SEric W. Biederman refcount_t rcu_users; 1148e56d0903SIngo Molnar struct rcu_head rcu; 11493fbd7ee2SEric W. Biederman }; 1150b92ce558SJens Axboe 11515eca1c10SIngo Molnar /* Cache last used pipe for splice(): */ 1152b92ce558SJens Axboe struct pipe_inode_info *splice_pipe; 11535640f768SEric Dumazet 11545640f768SEric Dumazet struct page_frag task_frag; 11555640f768SEric Dumazet 1156ca74e92bSShailabh Nagar #ifdef CONFIG_TASK_DELAY_ACCT 1157ca74e92bSShailabh Nagar struct task_delay_info *delays; 1158ca74e92bSShailabh Nagar #endif 115947913d4eSIngo Molnar 1160f4f154fdSAkinobu Mita #ifdef CONFIG_FAULT_INJECTION 1161f4f154fdSAkinobu Mita int make_it_fail; 11629049f2f6SAkinobu Mita unsigned int fail_nth; 1163f4f154fdSAkinobu Mita #endif 11649d823e8fSWu Fengguang /* 11655eca1c10SIngo Molnar * When (nr_dirtied >= nr_dirtied_pause), it's time to call 11665eca1c10SIngo Molnar * balance_dirty_pages() for a dirty throttling pause: 11679d823e8fSWu Fengguang */ 11689d823e8fSWu Fengguang int nr_dirtied; 11699d823e8fSWu Fengguang int nr_dirtied_pause; 11705eca1c10SIngo Molnar /* Start of a write-and-pause period: */ 11715eca1c10SIngo Molnar unsigned long dirty_paused_when; 11729d823e8fSWu Fengguang 11739745512cSArjan van de Ven #ifdef CONFIG_LATENCYTOP 11749745512cSArjan van de Ven int latency_record_count; 11759745512cSArjan van de Ven struct latency_record latency_record[LT_SAVECOUNT]; 11769745512cSArjan van de Ven #endif 11776976675dSArjan van de Ven /* 11785eca1c10SIngo Molnar * Time slack values; these are used to round up poll() and 11796976675dSArjan van de Ven * select() etc timeout values. These are in nanoseconds. 11806976675dSArjan van de Ven */ 1181da8b44d5SJohn Stultz u64 timer_slack_ns; 1182da8b44d5SJohn Stultz u64 default_timer_slack_ns; 1183f8d570a4SDavid Miller 11840b24beccSAndrey Ryabinin #ifdef CONFIG_KASAN 11850b24beccSAndrey Ryabinin unsigned int kasan_depth; 11860b24beccSAndrey Ryabinin #endif 1187dfd402a4SMarco Elver #ifdef CONFIG_KCSAN 1188dfd402a4SMarco Elver struct kcsan_ctx kcsan_ctx; 1189dfd402a4SMarco Elver #endif 11905eca1c10SIngo Molnar 1191fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER 11925eca1c10SIngo Molnar /* Index of current stored address in ret_stack: */ 1193f201ae23SFrederic Weisbecker int curr_ret_stack; 119439eb456dSSteven Rostedt (VMware) int curr_ret_depth; 11955eca1c10SIngo Molnar 11965eca1c10SIngo Molnar /* Stack of return addresses for return function tracing: */ 1197f201ae23SFrederic Weisbecker struct ftrace_ret_stack *ret_stack; 11985eca1c10SIngo Molnar 11995eca1c10SIngo Molnar /* Timestamp for last schedule: */ 12008aef2d28SSteven Rostedt unsigned long long ftrace_timestamp; 12015eca1c10SIngo Molnar 1202f201ae23SFrederic Weisbecker /* 1203f201ae23SFrederic Weisbecker * Number of functions that haven't been traced 12045eca1c10SIngo Molnar * because of depth overrun: 1205f201ae23SFrederic Weisbecker */ 1206f201ae23SFrederic Weisbecker atomic_t trace_overrun; 12075eca1c10SIngo Molnar 12085eca1c10SIngo Molnar /* Pause tracing: */ 1209380c4b14SFrederic Weisbecker atomic_t tracing_graph_pause; 1210f201ae23SFrederic Weisbecker #endif 12115eca1c10SIngo Molnar 1212ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING 12135eca1c10SIngo Molnar /* State flags for use by tracers: */ 1214ea4e2bc4SSteven Rostedt unsigned long trace; 12155eca1c10SIngo Molnar 12165eca1c10SIngo Molnar /* Bitmask and counter of trace recursion: */ 1217261842b7SSteven Rostedt unsigned long trace_recursion; 1218261842b7SSteven Rostedt #endif /* CONFIG_TRACING */ 12195eca1c10SIngo Molnar 12205c9a8750SDmitry Vyukov #ifdef CONFIG_KCOV 1221eec028c9SAndrey Konovalov /* See kernel/kcov.c for more details. */ 1222eec028c9SAndrey Konovalov 12235eca1c10SIngo Molnar /* Coverage collection mode enabled for this task (0 if disabled): */ 12240ed557aaSMark Rutland unsigned int kcov_mode; 12255eca1c10SIngo Molnar 12265eca1c10SIngo Molnar /* Size of the kcov_area: */ 12275eca1c10SIngo Molnar unsigned int kcov_size; 12285eca1c10SIngo Molnar 12295eca1c10SIngo Molnar /* Buffer for coverage collection: */ 12305c9a8750SDmitry Vyukov void *kcov_area; 12315eca1c10SIngo Molnar 12325eca1c10SIngo Molnar /* KCOV descriptor wired with this task or NULL: */ 12335c9a8750SDmitry Vyukov struct kcov *kcov; 1234eec028c9SAndrey Konovalov 1235eec028c9SAndrey Konovalov /* KCOV common handle for remote coverage collection: */ 1236eec028c9SAndrey Konovalov u64 kcov_handle; 1237eec028c9SAndrey Konovalov 1238eec028c9SAndrey Konovalov /* KCOV sequence number: */ 1239eec028c9SAndrey Konovalov int kcov_sequence; 12405ff3b30aSAndrey Konovalov 12415ff3b30aSAndrey Konovalov /* Collect coverage from softirq context: */ 12425ff3b30aSAndrey Konovalov unsigned int kcov_softirq; 12435c9a8750SDmitry Vyukov #endif 12445eca1c10SIngo Molnar 12456f185c29SVladimir Davydov #ifdef CONFIG_MEMCG 1246626ebc41STejun Heo struct mem_cgroup *memcg_in_oom; 1247626ebc41STejun Heo gfp_t memcg_oom_gfp_mask; 1248626ebc41STejun Heo int memcg_oom_order; 1249b23afb93STejun Heo 12505eca1c10SIngo Molnar /* Number of pages to reclaim on returning to userland: */ 1251b23afb93STejun Heo unsigned int memcg_nr_pages_over_high; 1252d46eb14bSShakeel Butt 1253d46eb14bSShakeel Butt /* Used by memcontrol for targeted memcg charge: */ 1254d46eb14bSShakeel Butt struct mem_cgroup *active_memcg; 1255569b846dSKAMEZAWA Hiroyuki #endif 12565eca1c10SIngo Molnar 1257d09d8df3SJosef Bacik #ifdef CONFIG_BLK_CGROUP 1258d09d8df3SJosef Bacik struct request_queue *throttle_queue; 1259d09d8df3SJosef Bacik #endif 1260d09d8df3SJosef Bacik 12610326f5a9SSrikar Dronamraju #ifdef CONFIG_UPROBES 12620326f5a9SSrikar Dronamraju struct uprobe_task *utask; 12630326f5a9SSrikar Dronamraju #endif 1264cafe5635SKent Overstreet #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) 1265cafe5635SKent Overstreet unsigned int sequential_io; 1266cafe5635SKent Overstreet unsigned int sequential_io_avg; 1267cafe5635SKent Overstreet #endif 12688eb23b9fSPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 12698eb23b9fSPeter Zijlstra unsigned long task_state_change; 12708eb23b9fSPeter Zijlstra #endif 12718bcbde54SDavid Hildenbrand int pagefault_disabled; 127203049269SMichal Hocko #ifdef CONFIG_MMU 127329c696e1SVladimir Davydov struct task_struct *oom_reaper_list; 127403049269SMichal Hocko #endif 1275ba14a194SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 1276ba14a194SAndy Lutomirski struct vm_struct *stack_vm_area; 1277ba14a194SAndy Lutomirski #endif 127868f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 12795eca1c10SIngo Molnar /* A live task holds one reference: */ 1280f0b89d39SElena Reshetova refcount_t stack_refcount; 128168f24b08SAndy Lutomirski #endif 1282d83a7cb3SJosh Poimboeuf #ifdef CONFIG_LIVEPATCH 1283d83a7cb3SJosh Poimboeuf int patch_state; 1284d83a7cb3SJosh Poimboeuf #endif 1285e4e55b47STetsuo Handa #ifdef CONFIG_SECURITY 1286e4e55b47STetsuo Handa /* Used by LSM modules for access restriction: */ 1287e4e55b47STetsuo Handa void *security; 1288e4e55b47STetsuo Handa #endif 128929e48ce8SKees Cook 1290afaef01cSAlexander Popov #ifdef CONFIG_GCC_PLUGIN_STACKLEAK 1291afaef01cSAlexander Popov unsigned long lowest_stack; 1292c8d12627SAlexander Popov unsigned long prev_lowest_stack; 1293afaef01cSAlexander Popov #endif 1294afaef01cSAlexander Popov 12955567d11cSPeter Zijlstra #ifdef CONFIG_X86_MCE 12965567d11cSPeter Zijlstra u64 mce_addr; 129717fae129STony Luck __u64 mce_ripv : 1, 129817fae129STony Luck mce_whole_page : 1, 129917fae129STony Luck __mce_reserved : 62; 13005567d11cSPeter Zijlstra struct callback_head mce_kill_me; 13015567d11cSPeter Zijlstra #endif 13025567d11cSPeter Zijlstra 130329e48ce8SKees Cook /* 130429e48ce8SKees Cook * New fields for task_struct should be added above here, so that 130529e48ce8SKees Cook * they are included in the randomized portion of task_struct. 130629e48ce8SKees Cook */ 130729e48ce8SKees Cook randomized_struct_fields_end 130829e48ce8SKees Cook 13095eca1c10SIngo Molnar /* CPU-specific state of this task: */ 13100c8c0f03SDave Hansen struct thread_struct thread; 13115eca1c10SIngo Molnar 13120c8c0f03SDave Hansen /* 13130c8c0f03SDave Hansen * WARNING: on x86, 'thread_struct' contains a variable-sized 13140c8c0f03SDave Hansen * structure. It *MUST* be at the end of 'task_struct'. 13150c8c0f03SDave Hansen * 13160c8c0f03SDave Hansen * Do not put anything below here! 13170c8c0f03SDave Hansen */ 13181da177e4SLinus Torvalds }; 13191da177e4SLinus Torvalds 1320e868171aSAlexey Dobriyan static inline struct pid *task_pid(struct task_struct *task) 132122c935f4SEric W. Biederman { 13222c470475SEric W. Biederman return task->thread_pid; 132322c935f4SEric W. Biederman } 132422c935f4SEric W. Biederman 13257af57294SPavel Emelyanov /* 13267af57294SPavel Emelyanov * the helpers to get the task's different pids as they are seen 13277af57294SPavel Emelyanov * from various namespaces 13287af57294SPavel Emelyanov * 13297af57294SPavel Emelyanov * task_xid_nr() : global id, i.e. the id seen from the init namespace; 133044c4e1b2SEric W. Biederman * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of 133144c4e1b2SEric W. Biederman * current. 13327af57294SPavel Emelyanov * task_xid_nr_ns() : id seen from the ns specified; 13337af57294SPavel Emelyanov * 13347af57294SPavel Emelyanov * see also pid_nr() etc in include/linux/pid.h 13357af57294SPavel Emelyanov */ 13365eca1c10SIngo Molnar pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); 13377af57294SPavel Emelyanov 1338e868171aSAlexey Dobriyan static inline pid_t task_pid_nr(struct task_struct *tsk) 13397af57294SPavel Emelyanov { 13407af57294SPavel Emelyanov return tsk->pid; 13417af57294SPavel Emelyanov } 13427af57294SPavel Emelyanov 13435eca1c10SIngo Molnar static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 134452ee2dfdSOleg Nesterov { 134552ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); 134652ee2dfdSOleg Nesterov } 13477af57294SPavel Emelyanov 13487af57294SPavel Emelyanov static inline pid_t task_pid_vnr(struct task_struct *tsk) 13497af57294SPavel Emelyanov { 135052ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); 13517af57294SPavel Emelyanov } 13527af57294SPavel Emelyanov 13537af57294SPavel Emelyanov 1354e868171aSAlexey Dobriyan static inline pid_t task_tgid_nr(struct task_struct *tsk) 13557af57294SPavel Emelyanov { 13567af57294SPavel Emelyanov return tsk->tgid; 13577af57294SPavel Emelyanov } 13587af57294SPavel Emelyanov 13595eca1c10SIngo Molnar /** 13605eca1c10SIngo Molnar * pid_alive - check that a task structure is not stale 13615eca1c10SIngo Molnar * @p: Task structure to be checked. 13625eca1c10SIngo Molnar * 13635eca1c10SIngo Molnar * Test if a process is not yet dead (at most zombie state) 13645eca1c10SIngo Molnar * If pid_alive fails, then pointers within the task structure 13655eca1c10SIngo Molnar * can be stale and must not be dereferenced. 13665eca1c10SIngo Molnar * 13675eca1c10SIngo Molnar * Return: 1 if the process is alive. 0 otherwise. 13685eca1c10SIngo Molnar */ 13695eca1c10SIngo Molnar static inline int pid_alive(const struct task_struct *p) 13705eca1c10SIngo Molnar { 13712c470475SEric W. Biederman return p->thread_pid != NULL; 13725eca1c10SIngo Molnar } 13737af57294SPavel Emelyanov 13745eca1c10SIngo Molnar static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 13757af57294SPavel Emelyanov { 137652ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); 13777af57294SPavel Emelyanov } 13787af57294SPavel Emelyanov 13797af57294SPavel Emelyanov static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 13807af57294SPavel Emelyanov { 138152ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); 13827af57294SPavel Emelyanov } 13837af57294SPavel Emelyanov 13847af57294SPavel Emelyanov 13855eca1c10SIngo Molnar static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 13867af57294SPavel Emelyanov { 138752ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); 13887af57294SPavel Emelyanov } 13897af57294SPavel Emelyanov 13907af57294SPavel Emelyanov static inline pid_t task_session_vnr(struct task_struct *tsk) 13917af57294SPavel Emelyanov { 139252ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); 13937af57294SPavel Emelyanov } 13947af57294SPavel Emelyanov 1395dd1c1f2fSOleg Nesterov static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1396dd1c1f2fSOleg Nesterov { 13976883f81aSEric W. Biederman return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns); 1398dd1c1f2fSOleg Nesterov } 1399dd1c1f2fSOleg Nesterov 1400dd1c1f2fSOleg Nesterov static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1401dd1c1f2fSOleg Nesterov { 14026883f81aSEric W. Biederman return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL); 1403dd1c1f2fSOleg Nesterov } 1404dd1c1f2fSOleg Nesterov 1405dd1c1f2fSOleg Nesterov static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) 1406dd1c1f2fSOleg Nesterov { 1407dd1c1f2fSOleg Nesterov pid_t pid = 0; 1408dd1c1f2fSOleg Nesterov 1409dd1c1f2fSOleg Nesterov rcu_read_lock(); 1410dd1c1f2fSOleg Nesterov if (pid_alive(tsk)) 1411dd1c1f2fSOleg Nesterov pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); 1412dd1c1f2fSOleg Nesterov rcu_read_unlock(); 1413dd1c1f2fSOleg Nesterov 1414dd1c1f2fSOleg Nesterov return pid; 1415dd1c1f2fSOleg Nesterov } 1416dd1c1f2fSOleg Nesterov 1417dd1c1f2fSOleg Nesterov static inline pid_t task_ppid_nr(const struct task_struct *tsk) 1418dd1c1f2fSOleg Nesterov { 1419dd1c1f2fSOleg Nesterov return task_ppid_nr_ns(tsk, &init_pid_ns); 1420dd1c1f2fSOleg Nesterov } 1421dd1c1f2fSOleg Nesterov 14225eca1c10SIngo Molnar /* Obsolete, do not use: */ 14231b0f7ffdSOleg Nesterov static inline pid_t task_pgrp_nr(struct task_struct *tsk) 14241b0f7ffdSOleg Nesterov { 14251b0f7ffdSOleg Nesterov return task_pgrp_nr_ns(tsk, &init_pid_ns); 14261b0f7ffdSOleg Nesterov } 14277af57294SPavel Emelyanov 142806eb6184SPeter Zijlstra #define TASK_REPORT_IDLE (TASK_REPORT + 1) 142906eb6184SPeter Zijlstra #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) 143006eb6184SPeter Zijlstra 14311d48b080SPeter Zijlstra static inline unsigned int task_state_index(struct task_struct *tsk) 143220435d84SXie XiuQi { 14331593baabSPeter Zijlstra unsigned int tsk_state = READ_ONCE(tsk->state); 14341593baabSPeter Zijlstra unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT; 143520435d84SXie XiuQi 143606eb6184SPeter Zijlstra BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); 143706eb6184SPeter Zijlstra 143806eb6184SPeter Zijlstra if (tsk_state == TASK_IDLE) 143906eb6184SPeter Zijlstra state = TASK_REPORT_IDLE; 144006eb6184SPeter Zijlstra 14411593baabSPeter Zijlstra return fls(state); 14421593baabSPeter Zijlstra } 144320435d84SXie XiuQi 14441d48b080SPeter Zijlstra static inline char task_index_to_char(unsigned int state) 14451593baabSPeter Zijlstra { 14468ef9925bSPeter Zijlstra static const char state_char[] = "RSDTtXZPI"; 14471593baabSPeter Zijlstra 144806eb6184SPeter Zijlstra BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); 14491593baabSPeter Zijlstra 14501593baabSPeter Zijlstra return state_char[state]; 14511593baabSPeter Zijlstra } 14521593baabSPeter Zijlstra 14531593baabSPeter Zijlstra static inline char task_state_to_char(struct task_struct *tsk) 14541593baabSPeter Zijlstra { 14551d48b080SPeter Zijlstra return task_index_to_char(task_state_index(tsk)); 145620435d84SXie XiuQi } 145720435d84SXie XiuQi 14581da177e4SLinus Torvalds /** 1459570f5241SSergey Senozhatsky * is_global_init - check if a task structure is init. Since init 1460570f5241SSergey Senozhatsky * is free to have sub-threads we need to check tgid. 14613260259fSHenne * @tsk: Task structure to be checked. 14623260259fSHenne * 14633260259fSHenne * Check if a task structure is the first user space task the kernel created. 1464e69f6186SYacine Belkadi * 1465e69f6186SYacine Belkadi * Return: 1 if the task structure is init. 0 otherwise. 1466f400e198SSukadev Bhattiprolu */ 1467e868171aSAlexey Dobriyan static inline int is_global_init(struct task_struct *tsk) 1468b461cc03SPavel Emelyanov { 1469570f5241SSergey Senozhatsky return task_tgid_nr(tsk) == 1; 1470b461cc03SPavel Emelyanov } 1471b460cbc5SSerge E. Hallyn 14729ec52099SCedric Le Goater extern struct pid *cad_pid; 14739ec52099SCedric Le Goater 14741da177e4SLinus Torvalds /* 14751da177e4SLinus Torvalds * Per process flags 14761da177e4SLinus Torvalds */ 1477c1de45caSPeter Zijlstra #define PF_IDLE 0x00000002 /* I am an IDLE thread */ 14785eca1c10SIngo Molnar #define PF_EXITING 0x00000004 /* Getting shut down */ 147994886b84SLaurent Vivier #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 148021aa9af0STejun Heo #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 14815eca1c10SIngo Molnar #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ 14825eca1c10SIngo Molnar #define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ 14835eca1c10SIngo Molnar #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ 14845eca1c10SIngo Molnar #define PF_DUMPCORE 0x00000200 /* Dumped core */ 14855eca1c10SIngo Molnar #define PF_SIGNALED 0x00000400 /* Killed by a signal */ 14861da177e4SLinus Torvalds #define PF_MEMALLOC 0x00000800 /* Allocating memory */ 14875eca1c10SIngo Molnar #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ 14885eca1c10SIngo Molnar #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ 14895eca1c10SIngo Molnar #define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */ 14905eca1c10SIngo Molnar #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ 14915eca1c10SIngo Molnar #define PF_FROZEN 0x00010000 /* Frozen for system suspend */ 14927dea19f9SMichal Hocko #define PF_KSWAPD 0x00020000 /* I am kswapd */ 14937dea19f9SMichal Hocko #define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */ 14947dea19f9SMichal Hocko #define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */ 1495a37b0715SNeilBrown #define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to, 1496a37b0715SNeilBrown * I am cleaning dirty pages from some other bdi. */ 1497246bb0b1SOleg Nesterov #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 14985eca1c10SIngo Molnar #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ 1499b31dc66aSJens Axboe #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 150073ab1cb2STaehee Yoo #define PF_UMH 0x02000000 /* I'm an Usermodehelper process */ 15013bd37062SSebastian Andrzej Siewior #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ 15024db96cf0SAndi Kleen #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1503d7fefcc8SAneesh Kumar K.V #define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ 1504771b53d0SJens Axboe #define PF_IO_WORKER 0x20000000 /* Task is an IO worker */ 150558a69cb4STejun Heo #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 15065eca1c10SIngo Molnar #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ 15071da177e4SLinus Torvalds 15081da177e4SLinus Torvalds /* 15091da177e4SLinus Torvalds * Only the _current_ task can read/write to tsk->flags, but other 15101da177e4SLinus Torvalds * tasks can access tsk->flags in readonly mode for example 15111da177e4SLinus Torvalds * with tsk_used_math (like during threaded core dumping). 15121da177e4SLinus Torvalds * There is however an exception to this rule during ptrace 15131da177e4SLinus Torvalds * or during fork: the ptracer task is allowed to write to the 15141da177e4SLinus Torvalds * child->flags of its traced child (same goes for fork, the parent 15151da177e4SLinus Torvalds * can write to the child->flags), because we're guaranteed the 15161da177e4SLinus Torvalds * child is not running and in turn not changing child->flags 15171da177e4SLinus Torvalds * at the same time the parent does it. 15181da177e4SLinus Torvalds */ 15191da177e4SLinus Torvalds #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 15201da177e4SLinus Torvalds #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 15211da177e4SLinus Torvalds #define clear_used_math() clear_stopped_child_used_math(current) 15221da177e4SLinus Torvalds #define set_used_math() set_stopped_child_used_math(current) 15235eca1c10SIngo Molnar 15241da177e4SLinus Torvalds #define conditional_stopped_child_used_math(condition, child) \ 15251da177e4SLinus Torvalds do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 15265eca1c10SIngo Molnar 15275eca1c10SIngo Molnar #define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) 15285eca1c10SIngo Molnar 15291da177e4SLinus Torvalds #define copy_to_stopped_child_used_math(child) \ 15301da177e4SLinus Torvalds do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 15315eca1c10SIngo Molnar 15321da177e4SLinus Torvalds /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 15331da177e4SLinus Torvalds #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 15341da177e4SLinus Torvalds #define used_math() tsk_used_math(current) 15351da177e4SLinus Torvalds 153662ec05ddSThomas Gleixner static inline bool is_percpu_thread(void) 153762ec05ddSThomas Gleixner { 153862ec05ddSThomas Gleixner #ifdef CONFIG_SMP 153962ec05ddSThomas Gleixner return (current->flags & PF_NO_SETAFFINITY) && 154062ec05ddSThomas Gleixner (current->nr_cpus_allowed == 1); 154162ec05ddSThomas Gleixner #else 154262ec05ddSThomas Gleixner return true; 154362ec05ddSThomas Gleixner #endif 154462ec05ddSThomas Gleixner } 154562ec05ddSThomas Gleixner 15461d4457f9SKees Cook /* Per-process atomic flags. */ 1547a2b86f77SZefan Li #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ 15482ad654bcSZefan Li #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ 15492ad654bcSZefan Li #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 1550356e4bffSThomas Gleixner #define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ 1551356e4bffSThomas Gleixner #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ 15529137bb27SThomas Gleixner #define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ 15539137bb27SThomas Gleixner #define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ 155471368af9SWaiman Long #define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */ 15551d4457f9SKees Cook 1556e0e5070bSZefan Li #define TASK_PFA_TEST(name, func) \ 1557e0e5070bSZefan Li static inline bool task_##func(struct task_struct *p) \ 1558e0e5070bSZefan Li { return test_bit(PFA_##name, &p->atomic_flags); } 15595eca1c10SIngo Molnar 1560e0e5070bSZefan Li #define TASK_PFA_SET(name, func) \ 1561e0e5070bSZefan Li static inline void task_set_##func(struct task_struct *p) \ 1562e0e5070bSZefan Li { set_bit(PFA_##name, &p->atomic_flags); } 15635eca1c10SIngo Molnar 1564e0e5070bSZefan Li #define TASK_PFA_CLEAR(name, func) \ 1565e0e5070bSZefan Li static inline void task_clear_##func(struct task_struct *p) \ 1566e0e5070bSZefan Li { clear_bit(PFA_##name, &p->atomic_flags); } 15671d4457f9SKees Cook 1568e0e5070bSZefan Li TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) 1569e0e5070bSZefan Li TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) 15701d4457f9SKees Cook 15712ad654bcSZefan Li TASK_PFA_TEST(SPREAD_PAGE, spread_page) 15722ad654bcSZefan Li TASK_PFA_SET(SPREAD_PAGE, spread_page) 15732ad654bcSZefan Li TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) 15742ad654bcSZefan Li 15752ad654bcSZefan Li TASK_PFA_TEST(SPREAD_SLAB, spread_slab) 15762ad654bcSZefan Li TASK_PFA_SET(SPREAD_SLAB, spread_slab) 15772ad654bcSZefan Li TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) 1578544b2c91STejun Heo 1579356e4bffSThomas Gleixner TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) 1580356e4bffSThomas Gleixner TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) 1581356e4bffSThomas Gleixner TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) 1582356e4bffSThomas Gleixner 158371368af9SWaiman Long TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec) 158471368af9SWaiman Long TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec) 158571368af9SWaiman Long TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec) 158671368af9SWaiman Long 1587356e4bffSThomas Gleixner TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 1588356e4bffSThomas Gleixner TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 1589356e4bffSThomas Gleixner 15909137bb27SThomas Gleixner TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable) 15919137bb27SThomas Gleixner TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable) 15929137bb27SThomas Gleixner TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable) 15939137bb27SThomas Gleixner 15949137bb27SThomas Gleixner TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) 15959137bb27SThomas Gleixner TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) 15969137bb27SThomas Gleixner 15975eca1c10SIngo Molnar static inline void 1598717a94b5SNeilBrown current_restore_flags(unsigned long orig_flags, unsigned long flags) 1599907aed48SMel Gorman { 1600717a94b5SNeilBrown current->flags &= ~flags; 1601717a94b5SNeilBrown current->flags |= orig_flags & flags; 1602907aed48SMel Gorman } 1603907aed48SMel Gorman 16045eca1c10SIngo Molnar extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 16055eca1c10SIngo Molnar extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 16061da177e4SLinus Torvalds #ifdef CONFIG_SMP 16075eca1c10SIngo Molnar extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); 16085eca1c10SIngo Molnar extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); 16091da177e4SLinus Torvalds #else 16105eca1c10SIngo Molnar static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 16111e1b6c51SKOSAKI Motohiro { 16121e1b6c51SKOSAKI Motohiro } 16135eca1c10SIngo Molnar static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 16141da177e4SLinus Torvalds { 161596f874e2SRusty Russell if (!cpumask_test_cpu(0, new_mask)) 16161da177e4SLinus Torvalds return -EINVAL; 16171da177e4SLinus Torvalds return 0; 16181da177e4SLinus Torvalds } 16191da177e4SLinus Torvalds #endif 1620e0ad9556SRusty Russell 1621fa93384fSDan Carpenter extern int yield_to(struct task_struct *p, bool preempt); 162236c8b586SIngo Molnar extern void set_user_nice(struct task_struct *p, long nice); 162336c8b586SIngo Molnar extern int task_prio(const struct task_struct *p); 16245eca1c10SIngo Molnar 1625d0ea0268SDongsheng Yang /** 1626d0ea0268SDongsheng Yang * task_nice - return the nice value of a given task. 1627d0ea0268SDongsheng Yang * @p: the task in question. 1628d0ea0268SDongsheng Yang * 1629d0ea0268SDongsheng Yang * Return: The nice value [ -20 ... 0 ... 19 ]. 1630d0ea0268SDongsheng Yang */ 1631d0ea0268SDongsheng Yang static inline int task_nice(const struct task_struct *p) 1632d0ea0268SDongsheng Yang { 1633d0ea0268SDongsheng Yang return PRIO_TO_NICE((p)->static_prio); 1634d0ea0268SDongsheng Yang } 16355eca1c10SIngo Molnar 163636c8b586SIngo Molnar extern int can_nice(const struct task_struct *p, const int nice); 163736c8b586SIngo Molnar extern int task_curr(const struct task_struct *p); 16381da177e4SLinus Torvalds extern int idle_cpu(int cpu); 1639943d355dSRohit Jain extern int available_idle_cpu(int cpu); 16405eca1c10SIngo Molnar extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); 16415eca1c10SIngo Molnar extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); 16425eca1c10SIngo Molnar extern int sched_setattr(struct task_struct *, const struct sched_attr *); 1643794a56ebSJuri Lelli extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); 164436c8b586SIngo Molnar extern struct task_struct *idle_task(int cpu); 16455eca1c10SIngo Molnar 1646c4f30608SPaul E. McKenney /** 1647c4f30608SPaul E. McKenney * is_idle_task - is the specified task an idle task? 1648fa757281SRandy Dunlap * @p: the task in question. 1649e69f6186SYacine Belkadi * 1650e69f6186SYacine Belkadi * Return: 1 if @p is an idle task. 0 otherwise. 1651c4f30608SPaul E. McKenney */ 16527061ca3bSPaul E. McKenney static inline bool is_idle_task(const struct task_struct *p) 1653c4f30608SPaul E. McKenney { 1654c1de45caSPeter Zijlstra return !!(p->flags & PF_IDLE); 1655c4f30608SPaul E. McKenney } 16565eca1c10SIngo Molnar 165736c8b586SIngo Molnar extern struct task_struct *curr_task(int cpu); 1658a458ae2eSPeter Zijlstra extern void ia64_set_curr_task(int cpu, struct task_struct *p); 16591da177e4SLinus Torvalds 16601da177e4SLinus Torvalds void yield(void); 16611da177e4SLinus Torvalds 16621da177e4SLinus Torvalds union thread_union { 16630500871fSDavid Howells #ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK 16640500871fSDavid Howells struct task_struct task; 16650500871fSDavid Howells #endif 1666c65eacbeSAndy Lutomirski #ifndef CONFIG_THREAD_INFO_IN_TASK 16671da177e4SLinus Torvalds struct thread_info thread_info; 1668c65eacbeSAndy Lutomirski #endif 16691da177e4SLinus Torvalds unsigned long stack[THREAD_SIZE/sizeof(long)]; 16701da177e4SLinus Torvalds }; 16711da177e4SLinus Torvalds 16720500871fSDavid Howells #ifndef CONFIG_THREAD_INFO_IN_TASK 16730500871fSDavid Howells extern struct thread_info init_thread_info; 16740500871fSDavid Howells #endif 16750500871fSDavid Howells 16760500871fSDavid Howells extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)]; 16770500871fSDavid Howells 1678f3ac6067SIngo Molnar #ifdef CONFIG_THREAD_INFO_IN_TASK 1679f3ac6067SIngo Molnar static inline struct thread_info *task_thread_info(struct task_struct *task) 1680f3ac6067SIngo Molnar { 1681f3ac6067SIngo Molnar return &task->thread_info; 1682f3ac6067SIngo Molnar } 1683f3ac6067SIngo Molnar #elif !defined(__HAVE_THREAD_FUNCTIONS) 1684f3ac6067SIngo Molnar # define task_thread_info(task) ((struct thread_info *)(task)->stack) 1685f3ac6067SIngo Molnar #endif 1686f3ac6067SIngo Molnar 1687198fe21bSPavel Emelyanov /* 1688198fe21bSPavel Emelyanov * find a task by one of its numerical ids 1689198fe21bSPavel Emelyanov * 1690198fe21bSPavel Emelyanov * find_task_by_pid_ns(): 1691198fe21bSPavel Emelyanov * finds a task by its pid in the specified namespace 1692228ebcbeSPavel Emelyanov * find_task_by_vpid(): 1693228ebcbeSPavel Emelyanov * finds a task by its virtual pid 1694198fe21bSPavel Emelyanov * 1695e49859e7SPavel Emelyanov * see also find_vpid() etc in include/linux/pid.h 1696198fe21bSPavel Emelyanov */ 1697198fe21bSPavel Emelyanov 1698228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_vpid(pid_t nr); 16995eca1c10SIngo Molnar extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); 1700198fe21bSPavel Emelyanov 17012ee08260SMike Rapoport /* 17022ee08260SMike Rapoport * find a task by its virtual pid and get the task struct 17032ee08260SMike Rapoport */ 17042ee08260SMike Rapoport extern struct task_struct *find_get_task_by_vpid(pid_t nr); 17052ee08260SMike Rapoport 1706b3c97528SHarvey Harrison extern int wake_up_state(struct task_struct *tsk, unsigned int state); 1707b3c97528SHarvey Harrison extern int wake_up_process(struct task_struct *tsk); 17083e51e3edSSamir Bellabes extern void wake_up_new_task(struct task_struct *tsk); 17095eca1c10SIngo Molnar 17101da177e4SLinus Torvalds #ifdef CONFIG_SMP 17111da177e4SLinus Torvalds extern void kick_process(struct task_struct *tsk); 17121da177e4SLinus Torvalds #else 17131da177e4SLinus Torvalds static inline void kick_process(struct task_struct *tsk) { } 17141da177e4SLinus Torvalds #endif 17151da177e4SLinus Torvalds 171682b89778SAdrian Hunter extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); 17175eca1c10SIngo Molnar 171882b89778SAdrian Hunter static inline void set_task_comm(struct task_struct *tsk, const char *from) 171982b89778SAdrian Hunter { 172082b89778SAdrian Hunter __set_task_comm(tsk, from, false); 172182b89778SAdrian Hunter } 17225eca1c10SIngo Molnar 17233756f640SArnd Bergmann extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); 17243756f640SArnd Bergmann #define get_task_comm(buf, tsk) ({ \ 17253756f640SArnd Bergmann BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \ 17263756f640SArnd Bergmann __get_task_comm(buf, sizeof(buf), tsk); \ 17273756f640SArnd Bergmann }) 17281da177e4SLinus Torvalds 17291da177e4SLinus Torvalds #ifdef CONFIG_SMP 17302a0a24ebSThomas Gleixner static __always_inline void scheduler_ipi(void) 17312a0a24ebSThomas Gleixner { 17322a0a24ebSThomas Gleixner /* 17332a0a24ebSThomas Gleixner * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting 17342a0a24ebSThomas Gleixner * TIF_NEED_RESCHED remotely (for the first time) will also send 17352a0a24ebSThomas Gleixner * this IPI. 17362a0a24ebSThomas Gleixner */ 17372a0a24ebSThomas Gleixner preempt_fold_need_resched(); 17382a0a24ebSThomas Gleixner } 173985ba2d86SRoland McGrath extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 17401da177e4SLinus Torvalds #else 1741184748ccSPeter Zijlstra static inline void scheduler_ipi(void) { } 17425eca1c10SIngo Molnar static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) 174385ba2d86SRoland McGrath { 174485ba2d86SRoland McGrath return 1; 174585ba2d86SRoland McGrath } 17461da177e4SLinus Torvalds #endif 17471da177e4SLinus Torvalds 17485eca1c10SIngo Molnar /* 17495eca1c10SIngo Molnar * Set thread flags in other task's structures. 17505eca1c10SIngo Molnar * See asm/thread_info.h for TIF_xxxx flags available: 17511da177e4SLinus Torvalds */ 17521da177e4SLinus Torvalds static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 17531da177e4SLinus Torvalds { 1754a1261f54SAl Viro set_ti_thread_flag(task_thread_info(tsk), flag); 17551da177e4SLinus Torvalds } 17561da177e4SLinus Torvalds 17571da177e4SLinus Torvalds static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 17581da177e4SLinus Torvalds { 1759a1261f54SAl Viro clear_ti_thread_flag(task_thread_info(tsk), flag); 17601da177e4SLinus Torvalds } 17611da177e4SLinus Torvalds 176293ee37c2SDave Martin static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag, 176393ee37c2SDave Martin bool value) 176493ee37c2SDave Martin { 176593ee37c2SDave Martin update_ti_thread_flag(task_thread_info(tsk), flag, value); 176693ee37c2SDave Martin } 176793ee37c2SDave Martin 17681da177e4SLinus Torvalds static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 17691da177e4SLinus Torvalds { 1770a1261f54SAl Viro return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 17711da177e4SLinus Torvalds } 17721da177e4SLinus Torvalds 17731da177e4SLinus Torvalds static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 17741da177e4SLinus Torvalds { 1775a1261f54SAl Viro return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 17761da177e4SLinus Torvalds } 17771da177e4SLinus Torvalds 17781da177e4SLinus Torvalds static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 17791da177e4SLinus Torvalds { 1780a1261f54SAl Viro return test_ti_thread_flag(task_thread_info(tsk), flag); 17811da177e4SLinus Torvalds } 17821da177e4SLinus Torvalds 17831da177e4SLinus Torvalds static inline void set_tsk_need_resched(struct task_struct *tsk) 17841da177e4SLinus Torvalds { 17851da177e4SLinus Torvalds set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 17861da177e4SLinus Torvalds } 17871da177e4SLinus Torvalds 17881da177e4SLinus Torvalds static inline void clear_tsk_need_resched(struct task_struct *tsk) 17891da177e4SLinus Torvalds { 17901da177e4SLinus Torvalds clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 17911da177e4SLinus Torvalds } 17921da177e4SLinus Torvalds 17938ae121acSGregory Haskins static inline int test_tsk_need_resched(struct task_struct *tsk) 17948ae121acSGregory Haskins { 17958ae121acSGregory Haskins return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); 17968ae121acSGregory Haskins } 17978ae121acSGregory Haskins 17981da177e4SLinus Torvalds /* 17991da177e4SLinus Torvalds * cond_resched() and cond_resched_lock(): latency reduction via 18001da177e4SLinus Torvalds * explicit rescheduling in places that are safe. The return 18011da177e4SLinus Torvalds * value indicates whether a reschedule was done in fact. 18021da177e4SLinus Torvalds * cond_resched_lock() will drop the spinlock before scheduling, 18031da177e4SLinus Torvalds */ 1804c1a280b6SThomas Gleixner #ifndef CONFIG_PREEMPTION 1805c3921ab7SLinus Torvalds extern int _cond_resched(void); 180635a773a0SPeter Zijlstra #else 180735a773a0SPeter Zijlstra static inline int _cond_resched(void) { return 0; } 180835a773a0SPeter Zijlstra #endif 18096f80bd98SFrederic Weisbecker 1810613afbf8SFrederic Weisbecker #define cond_resched() ({ \ 18113427445aSPeter Zijlstra ___might_sleep(__FILE__, __LINE__, 0); \ 1812613afbf8SFrederic Weisbecker _cond_resched(); \ 1813613afbf8SFrederic Weisbecker }) 18146f80bd98SFrederic Weisbecker 1815613afbf8SFrederic Weisbecker extern int __cond_resched_lock(spinlock_t *lock); 1816613afbf8SFrederic Weisbecker 1817613afbf8SFrederic Weisbecker #define cond_resched_lock(lock) ({ \ 18183427445aSPeter Zijlstra ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ 1819613afbf8SFrederic Weisbecker __cond_resched_lock(lock); \ 1820613afbf8SFrederic Weisbecker }) 1821613afbf8SFrederic Weisbecker 1822f6f3c437SSimon Horman static inline void cond_resched_rcu(void) 1823f6f3c437SSimon Horman { 1824f6f3c437SSimon Horman #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) 1825f6f3c437SSimon Horman rcu_read_unlock(); 1826f6f3c437SSimon Horman cond_resched(); 1827f6f3c437SSimon Horman rcu_read_lock(); 1828f6f3c437SSimon Horman #endif 1829f6f3c437SSimon Horman } 1830f6f3c437SSimon Horman 18311da177e4SLinus Torvalds /* 18321da177e4SLinus Torvalds * Does a critical section need to be broken due to another 1833c1a280b6SThomas Gleixner * task waiting?: (technically does not depend on CONFIG_PREEMPTION, 183495c354feSNick Piggin * but a general need for low latency) 18351da177e4SLinus Torvalds */ 183695c354feSNick Piggin static inline int spin_needbreak(spinlock_t *lock) 18371da177e4SLinus Torvalds { 1838c1a280b6SThomas Gleixner #ifdef CONFIG_PREEMPTION 183995c354feSNick Piggin return spin_is_contended(lock); 184095c354feSNick Piggin #else 18411da177e4SLinus Torvalds return 0; 184295c354feSNick Piggin #endif 18431da177e4SLinus Torvalds } 18441da177e4SLinus Torvalds 184575f93fedSPeter Zijlstra static __always_inline bool need_resched(void) 184675f93fedSPeter Zijlstra { 184775f93fedSPeter Zijlstra return unlikely(tif_need_resched()); 184875f93fedSPeter Zijlstra } 184975f93fedSPeter Zijlstra 1850ee761f62SThomas Gleixner /* 18511da177e4SLinus Torvalds * Wrappers for p->thread_info->cpu access. No-op on UP. 18521da177e4SLinus Torvalds */ 18531da177e4SLinus Torvalds #ifdef CONFIG_SMP 18541da177e4SLinus Torvalds 18551da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p) 18561da177e4SLinus Torvalds { 1857c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 1858c546951dSAndrea Parri return READ_ONCE(p->cpu); 1859c65eacbeSAndy Lutomirski #else 1860c546951dSAndrea Parri return READ_ONCE(task_thread_info(p)->cpu); 1861c65eacbeSAndy Lutomirski #endif 18621da177e4SLinus Torvalds } 18631da177e4SLinus Torvalds 1864c65cc870SIngo Molnar extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 18651da177e4SLinus Torvalds 18661da177e4SLinus Torvalds #else 18671da177e4SLinus Torvalds 18681da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p) 18691da177e4SLinus Torvalds { 18701da177e4SLinus Torvalds return 0; 18711da177e4SLinus Torvalds } 18721da177e4SLinus Torvalds 18731da177e4SLinus Torvalds static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 18741da177e4SLinus Torvalds { 18751da177e4SLinus Torvalds } 18761da177e4SLinus Torvalds 18771da177e4SLinus Torvalds #endif /* CONFIG_SMP */ 18781da177e4SLinus Torvalds 1879d9345c65SPan Xinhui /* 1880d9345c65SPan Xinhui * In order to reduce various lock holder preemption latencies provide an 1881d9345c65SPan Xinhui * interface to see if a vCPU is currently running or not. 1882d9345c65SPan Xinhui * 1883d9345c65SPan Xinhui * This allows us to terminate optimistic spin loops and block, analogous to 1884d9345c65SPan Xinhui * the native optimistic spin heuristic of testing if the lock owner task is 1885d9345c65SPan Xinhui * running or not. 1886d9345c65SPan Xinhui */ 1887d9345c65SPan Xinhui #ifndef vcpu_is_preempted 188842fd8baaSQian Cai static inline bool vcpu_is_preempted(int cpu) 188942fd8baaSQian Cai { 189042fd8baaSQian Cai return false; 189142fd8baaSQian Cai } 1892d9345c65SPan Xinhui #endif 1893d9345c65SPan Xinhui 189496f874e2SRusty Russell extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 189596f874e2SRusty Russell extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 18965c45bf27SSiddha, Suresh B 189782455257SDave Hansen #ifndef TASK_SIZE_OF 189882455257SDave Hansen #define TASK_SIZE_OF(tsk) TASK_SIZE 189982455257SDave Hansen #endif 190082455257SDave Hansen 1901d7822b1eSMathieu Desnoyers #ifdef CONFIG_RSEQ 1902d7822b1eSMathieu Desnoyers 1903d7822b1eSMathieu Desnoyers /* 1904d7822b1eSMathieu Desnoyers * Map the event mask on the user-space ABI enum rseq_cs_flags 1905d7822b1eSMathieu Desnoyers * for direct mask checks. 1906d7822b1eSMathieu Desnoyers */ 1907d7822b1eSMathieu Desnoyers enum rseq_event_mask_bits { 1908d7822b1eSMathieu Desnoyers RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT, 1909d7822b1eSMathieu Desnoyers RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT, 1910d7822b1eSMathieu Desnoyers RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT, 1911d7822b1eSMathieu Desnoyers }; 1912d7822b1eSMathieu Desnoyers 1913d7822b1eSMathieu Desnoyers enum rseq_event_mask { 1914d7822b1eSMathieu Desnoyers RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT), 1915d7822b1eSMathieu Desnoyers RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT), 1916d7822b1eSMathieu Desnoyers RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT), 1917d7822b1eSMathieu Desnoyers }; 1918d7822b1eSMathieu Desnoyers 1919d7822b1eSMathieu Desnoyers static inline void rseq_set_notify_resume(struct task_struct *t) 1920d7822b1eSMathieu Desnoyers { 1921d7822b1eSMathieu Desnoyers if (t->rseq) 1922d7822b1eSMathieu Desnoyers set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); 1923d7822b1eSMathieu Desnoyers } 1924d7822b1eSMathieu Desnoyers 1925784e0300SWill Deacon void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs); 1926d7822b1eSMathieu Desnoyers 1927784e0300SWill Deacon static inline void rseq_handle_notify_resume(struct ksignal *ksig, 1928784e0300SWill Deacon struct pt_regs *regs) 1929d7822b1eSMathieu Desnoyers { 1930d7822b1eSMathieu Desnoyers if (current->rseq) 1931784e0300SWill Deacon __rseq_handle_notify_resume(ksig, regs); 1932d7822b1eSMathieu Desnoyers } 1933d7822b1eSMathieu Desnoyers 1934784e0300SWill Deacon static inline void rseq_signal_deliver(struct ksignal *ksig, 1935784e0300SWill Deacon struct pt_regs *regs) 1936d7822b1eSMathieu Desnoyers { 1937d7822b1eSMathieu Desnoyers preempt_disable(); 1938d7822b1eSMathieu Desnoyers __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); 1939d7822b1eSMathieu Desnoyers preempt_enable(); 1940784e0300SWill Deacon rseq_handle_notify_resume(ksig, regs); 1941d7822b1eSMathieu Desnoyers } 1942d7822b1eSMathieu Desnoyers 1943d7822b1eSMathieu Desnoyers /* rseq_preempt() requires preemption to be disabled. */ 1944d7822b1eSMathieu Desnoyers static inline void rseq_preempt(struct task_struct *t) 1945d7822b1eSMathieu Desnoyers { 1946d7822b1eSMathieu Desnoyers __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); 1947d7822b1eSMathieu Desnoyers rseq_set_notify_resume(t); 1948d7822b1eSMathieu Desnoyers } 1949d7822b1eSMathieu Desnoyers 1950d7822b1eSMathieu Desnoyers /* rseq_migrate() requires preemption to be disabled. */ 1951d7822b1eSMathieu Desnoyers static inline void rseq_migrate(struct task_struct *t) 1952d7822b1eSMathieu Desnoyers { 1953d7822b1eSMathieu Desnoyers __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); 1954d7822b1eSMathieu Desnoyers rseq_set_notify_resume(t); 1955d7822b1eSMathieu Desnoyers } 1956d7822b1eSMathieu Desnoyers 1957d7822b1eSMathieu Desnoyers /* 1958d7822b1eSMathieu Desnoyers * If parent process has a registered restartable sequences area, the 1959463f550fSMathieu Desnoyers * child inherits. Unregister rseq for a clone with CLONE_VM set. 1960d7822b1eSMathieu Desnoyers */ 1961d7822b1eSMathieu Desnoyers static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) 1962d7822b1eSMathieu Desnoyers { 1963463f550fSMathieu Desnoyers if (clone_flags & CLONE_VM) { 1964d7822b1eSMathieu Desnoyers t->rseq = NULL; 1965d7822b1eSMathieu Desnoyers t->rseq_sig = 0; 1966d7822b1eSMathieu Desnoyers t->rseq_event_mask = 0; 1967d7822b1eSMathieu Desnoyers } else { 1968d7822b1eSMathieu Desnoyers t->rseq = current->rseq; 1969d7822b1eSMathieu Desnoyers t->rseq_sig = current->rseq_sig; 1970d7822b1eSMathieu Desnoyers t->rseq_event_mask = current->rseq_event_mask; 1971d7822b1eSMathieu Desnoyers } 1972d7822b1eSMathieu Desnoyers } 1973d7822b1eSMathieu Desnoyers 1974d7822b1eSMathieu Desnoyers static inline void rseq_execve(struct task_struct *t) 1975d7822b1eSMathieu Desnoyers { 1976d7822b1eSMathieu Desnoyers t->rseq = NULL; 1977d7822b1eSMathieu Desnoyers t->rseq_sig = 0; 1978d7822b1eSMathieu Desnoyers t->rseq_event_mask = 0; 1979d7822b1eSMathieu Desnoyers } 1980d7822b1eSMathieu Desnoyers 1981d7822b1eSMathieu Desnoyers #else 1982d7822b1eSMathieu Desnoyers 1983d7822b1eSMathieu Desnoyers static inline void rseq_set_notify_resume(struct task_struct *t) 1984d7822b1eSMathieu Desnoyers { 1985d7822b1eSMathieu Desnoyers } 1986784e0300SWill Deacon static inline void rseq_handle_notify_resume(struct ksignal *ksig, 1987784e0300SWill Deacon struct pt_regs *regs) 1988d7822b1eSMathieu Desnoyers { 1989d7822b1eSMathieu Desnoyers } 1990784e0300SWill Deacon static inline void rseq_signal_deliver(struct ksignal *ksig, 1991784e0300SWill Deacon struct pt_regs *regs) 1992d7822b1eSMathieu Desnoyers { 1993d7822b1eSMathieu Desnoyers } 1994d7822b1eSMathieu Desnoyers static inline void rseq_preempt(struct task_struct *t) 1995d7822b1eSMathieu Desnoyers { 1996d7822b1eSMathieu Desnoyers } 1997d7822b1eSMathieu Desnoyers static inline void rseq_migrate(struct task_struct *t) 1998d7822b1eSMathieu Desnoyers { 1999d7822b1eSMathieu Desnoyers } 2000d7822b1eSMathieu Desnoyers static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) 2001d7822b1eSMathieu Desnoyers { 2002d7822b1eSMathieu Desnoyers } 2003d7822b1eSMathieu Desnoyers static inline void rseq_execve(struct task_struct *t) 2004d7822b1eSMathieu Desnoyers { 2005d7822b1eSMathieu Desnoyers } 2006d7822b1eSMathieu Desnoyers 2007d7822b1eSMathieu Desnoyers #endif 2008d7822b1eSMathieu Desnoyers 200973ab1cb2STaehee Yoo void __exit_umh(struct task_struct *tsk); 201073ab1cb2STaehee Yoo 201173ab1cb2STaehee Yoo static inline void exit_umh(struct task_struct *tsk) 201273ab1cb2STaehee Yoo { 201373ab1cb2STaehee Yoo if (unlikely(tsk->flags & PF_UMH)) 201473ab1cb2STaehee Yoo __exit_umh(tsk); 201573ab1cb2STaehee Yoo } 201673ab1cb2STaehee Yoo 2017d7822b1eSMathieu Desnoyers #ifdef CONFIG_DEBUG_RSEQ 2018d7822b1eSMathieu Desnoyers 2019d7822b1eSMathieu Desnoyers void rseq_syscall(struct pt_regs *regs); 2020d7822b1eSMathieu Desnoyers 2021d7822b1eSMathieu Desnoyers #else 2022d7822b1eSMathieu Desnoyers 2023d7822b1eSMathieu Desnoyers static inline void rseq_syscall(struct pt_regs *regs) 2024d7822b1eSMathieu Desnoyers { 2025d7822b1eSMathieu Desnoyers } 2026d7822b1eSMathieu Desnoyers 2027d7822b1eSMathieu Desnoyers #endif 2028d7822b1eSMathieu Desnoyers 20293c93a0c0SQais Yousef const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq); 20303c93a0c0SQais Yousef char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len); 20313c93a0c0SQais Yousef int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq); 20323c93a0c0SQais Yousef 20333c93a0c0SQais Yousef const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq); 20343c93a0c0SQais Yousef const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq); 20353c93a0c0SQais Yousef const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq); 20363c93a0c0SQais Yousef 20373c93a0c0SQais Yousef int sched_trace_rq_cpu(struct rq *rq); 20383c93a0c0SQais Yousef 20393c93a0c0SQais Yousef const struct cpumask *sched_trace_rd_span(struct root_domain *rd); 20403c93a0c0SQais Yousef 20411da177e4SLinus Torvalds #endif 2042