1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 21da177e4SLinus Torvalds #ifndef _LINUX_SCHED_H 31da177e4SLinus Torvalds #define _LINUX_SCHED_H 41da177e4SLinus Torvalds 55eca1c10SIngo Molnar /* 65eca1c10SIngo Molnar * Define 'struct task_struct' and provide the main scheduler 75eca1c10SIngo Molnar * APIs (schedule(), wakeup variants, etc.) 85eca1c10SIngo Molnar */ 95eca1c10SIngo Molnar 10607ca46eSDavid Howells #include <uapi/linux/sched.h> 11b7b3c76aSDavid Woodhouse 1270b8157eSIngo Molnar #include <asm/current.h> 1370b8157eSIngo Molnar 145eca1c10SIngo Molnar #include <linux/pid.h> 155eca1c10SIngo Molnar #include <linux/sem.h> 165eca1c10SIngo Molnar #include <linux/shm.h> 175eca1c10SIngo Molnar #include <linux/mutex.h> 185eca1c10SIngo Molnar #include <linux/plist.h> 195eca1c10SIngo Molnar #include <linux/hrtimer.h> 200584df9cSMarco Elver #include <linux/irqflags.h> 215eca1c10SIngo Molnar #include <linux/seccomp.h> 225eca1c10SIngo Molnar #include <linux/nodemask.h> 235eca1c10SIngo Molnar #include <linux/rcupdate.h> 24ec1d2819SElena Reshetova #include <linux/refcount.h> 255eca1c10SIngo Molnar #include <linux/resource.h> 265eca1c10SIngo Molnar #include <linux/latencytop.h> 275eca1c10SIngo Molnar #include <linux/sched/prio.h> 289eacb5c7SThomas Gleixner #include <linux/sched/types.h> 295eca1c10SIngo Molnar #include <linux/signal_types.h> 301446e1dfSGabriel Krisman Bertazi #include <linux/syscall_user_dispatch.h> 315eca1c10SIngo Molnar #include <linux/mm_types_task.h> 325eca1c10SIngo Molnar #include <linux/task_io_accounting.h> 332b69942fSThomas Gleixner #include <linux/posix-timers.h> 34d7822b1eSMathieu Desnoyers #include <linux/rseq.h> 350cd39f46SPeter Zijlstra #include <linux/seqlock.h> 36dfd402a4SMarco Elver #include <linux/kcsan.h> 375fbda3ecSThomas Gleixner #include <asm/kmap_size.h> 385eca1c10SIngo Molnar 395eca1c10SIngo Molnar /* task_struct member predeclarations (sorted alphabetically): */ 40c7af7877SIngo Molnar struct audit_context; 41c7af7877SIngo Molnar struct backing_dev_info; 42c7af7877SIngo Molnar struct bio_list; 43c7af7877SIngo Molnar struct blk_plug; 44a10787e6SSong Liu struct bpf_local_storage; 453c93a0c0SQais Yousef struct capture_control; 46c7af7877SIngo Molnar struct cfs_rq; 47c7af7877SIngo Molnar struct fs_struct; 48c7af7877SIngo Molnar struct futex_pi_state; 49c7af7877SIngo Molnar struct io_context; 501875dc5bSPeter Oskolkov struct io_uring_task; 51c7af7877SIngo Molnar struct mempolicy; 52c7af7877SIngo Molnar struct nameidata; 53c7af7877SIngo Molnar struct nsproxy; 54c7af7877SIngo Molnar struct perf_event_context; 55c7af7877SIngo Molnar struct pid_namespace; 56c7af7877SIngo Molnar struct pipe_inode_info; 57c7af7877SIngo Molnar struct rcu_node; 58c7af7877SIngo Molnar struct reclaim_state; 59c7af7877SIngo Molnar struct robust_list_head; 603c93a0c0SQais Yousef struct root_domain; 613c93a0c0SQais Yousef struct rq; 62e2d1e2aeSIngo Molnar struct sched_attr; 63e2d1e2aeSIngo Molnar struct sched_param; 6443ae34cbSIngo Molnar struct seq_file; 65c7af7877SIngo Molnar struct sighand_struct; 66c7af7877SIngo Molnar struct signal_struct; 67c7af7877SIngo Molnar struct task_delay_info; 684cf86d77SIngo Molnar struct task_group; 691da177e4SLinus Torvalds 704a8342d2SLinus Torvalds /* 714a8342d2SLinus Torvalds * Task state bitmask. NOTE! These bits are also 724a8342d2SLinus Torvalds * encoded in fs/proc/array.c: get_task_state(). 734a8342d2SLinus Torvalds * 744a8342d2SLinus Torvalds * We have two separate sets of flags: task->state 754a8342d2SLinus Torvalds * is about runnability, while task->exit_state are 764a8342d2SLinus Torvalds * about the task exiting. Confusing, but this way 774a8342d2SLinus Torvalds * modifying one set can't modify the other one by 784a8342d2SLinus Torvalds * mistake. 794a8342d2SLinus Torvalds */ 805eca1c10SIngo Molnar 815eca1c10SIngo Molnar /* Used in tsk->state: */ 8292c4bc9fSPeter Zijlstra #define TASK_RUNNING 0x0000 8392c4bc9fSPeter Zijlstra #define TASK_INTERRUPTIBLE 0x0001 8492c4bc9fSPeter Zijlstra #define TASK_UNINTERRUPTIBLE 0x0002 8592c4bc9fSPeter Zijlstra #define __TASK_STOPPED 0x0004 8692c4bc9fSPeter Zijlstra #define __TASK_TRACED 0x0008 875eca1c10SIngo Molnar /* Used in tsk->exit_state: */ 8892c4bc9fSPeter Zijlstra #define EXIT_DEAD 0x0010 8992c4bc9fSPeter Zijlstra #define EXIT_ZOMBIE 0x0020 90abd50b39SOleg Nesterov #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 915eca1c10SIngo Molnar /* Used in tsk->state again: */ 928ef9925bSPeter Zijlstra #define TASK_PARKED 0x0040 938ef9925bSPeter Zijlstra #define TASK_DEAD 0x0080 948ef9925bSPeter Zijlstra #define TASK_WAKEKILL 0x0100 958ef9925bSPeter Zijlstra #define TASK_WAKING 0x0200 9692c4bc9fSPeter Zijlstra #define TASK_NOLOAD 0x0400 9792c4bc9fSPeter Zijlstra #define TASK_NEW 0x0800 9892c4bc9fSPeter Zijlstra #define TASK_STATE_MAX 0x1000 99f021a3c2SMatthew Wilcox 1005eca1c10SIngo Molnar /* Convenience macros for the sake of set_current_state: */ 101f021a3c2SMatthew Wilcox #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 102f021a3c2SMatthew Wilcox #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) 103f021a3c2SMatthew Wilcox #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) 1041da177e4SLinus Torvalds 10580ed87c8SPeter Zijlstra #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) 10680ed87c8SPeter Zijlstra 1075eca1c10SIngo Molnar /* Convenience macros for the sake of wake_up(): */ 10892a1f4bcSMatthew Wilcox #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) 10992a1f4bcSMatthew Wilcox 1105eca1c10SIngo Molnar /* get_task_state(): */ 11192a1f4bcSMatthew Wilcox #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 112f021a3c2SMatthew Wilcox TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 1138ef9925bSPeter Zijlstra __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ 1148ef9925bSPeter Zijlstra TASK_PARKED) 11592a1f4bcSMatthew Wilcox 1162f064a59SPeter Zijlstra #define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING) 117b03fbd4fSPeter Zijlstra 1182f064a59SPeter Zijlstra #define task_is_traced(task) ((READ_ONCE(task->__state) & __TASK_TRACED) != 0) 1195eca1c10SIngo Molnar 1202f064a59SPeter Zijlstra #define task_is_stopped(task) ((READ_ONCE(task->__state) & __TASK_STOPPED) != 0) 1215eca1c10SIngo Molnar 1222f064a59SPeter Zijlstra #define task_is_stopped_or_traced(task) ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0) 1235eca1c10SIngo Molnar 1248eb23b9fSPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 1258eb23b9fSPeter Zijlstra 126b5bf9a90SPeter Zijlstra /* 127b5bf9a90SPeter Zijlstra * Special states are those that do not use the normal wait-loop pattern. See 128b5bf9a90SPeter Zijlstra * the comment with set_special_state(). 129b5bf9a90SPeter Zijlstra */ 130b5bf9a90SPeter Zijlstra #define is_special_task_state(state) \ 1311cef1150SPeter Zijlstra ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) 132b5bf9a90SPeter Zijlstra 1338eb23b9fSPeter Zijlstra #define __set_current_state(state_value) \ 1348eb23b9fSPeter Zijlstra do { \ 135b5bf9a90SPeter Zijlstra WARN_ON_ONCE(is_special_task_state(state_value));\ 1368eb23b9fSPeter Zijlstra current->task_state_change = _THIS_IP_; \ 1372f064a59SPeter Zijlstra WRITE_ONCE(current->__state, (state_value)); \ 1388eb23b9fSPeter Zijlstra } while (0) 139b5bf9a90SPeter Zijlstra 1408eb23b9fSPeter Zijlstra #define set_current_state(state_value) \ 1418eb23b9fSPeter Zijlstra do { \ 142b5bf9a90SPeter Zijlstra WARN_ON_ONCE(is_special_task_state(state_value));\ 1438eb23b9fSPeter Zijlstra current->task_state_change = _THIS_IP_; \ 1442f064a59SPeter Zijlstra smp_store_mb(current->__state, (state_value)); \ 1458eb23b9fSPeter Zijlstra } while (0) 1468eb23b9fSPeter Zijlstra 147b5bf9a90SPeter Zijlstra #define set_special_state(state_value) \ 148b5bf9a90SPeter Zijlstra do { \ 149b5bf9a90SPeter Zijlstra unsigned long flags; /* may shadow */ \ 150b5bf9a90SPeter Zijlstra WARN_ON_ONCE(!is_special_task_state(state_value)); \ 151b5bf9a90SPeter Zijlstra raw_spin_lock_irqsave(¤t->pi_lock, flags); \ 152b5bf9a90SPeter Zijlstra current->task_state_change = _THIS_IP_; \ 1532f064a59SPeter Zijlstra WRITE_ONCE(current->__state, (state_value)); \ 154b5bf9a90SPeter Zijlstra raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ 155b5bf9a90SPeter Zijlstra } while (0) 1568eb23b9fSPeter Zijlstra #else 157498d0c57SAndrew Morton /* 158498d0c57SAndrew Morton * set_current_state() includes a barrier so that the write of current->state 159498d0c57SAndrew Morton * is correctly serialised wrt the caller's subsequent test of whether to 160498d0c57SAndrew Morton * actually sleep: 161498d0c57SAndrew Morton * 162a2250238SPeter Zijlstra * for (;;) { 163498d0c57SAndrew Morton * set_current_state(TASK_UNINTERRUPTIBLE); 16458877d34SPeter Zijlstra * if (CONDITION) 165a2250238SPeter Zijlstra * break; 166498d0c57SAndrew Morton * 167a2250238SPeter Zijlstra * schedule(); 168a2250238SPeter Zijlstra * } 169a2250238SPeter Zijlstra * __set_current_state(TASK_RUNNING); 170a2250238SPeter Zijlstra * 171a2250238SPeter Zijlstra * If the caller does not need such serialisation (because, for instance, the 17258877d34SPeter Zijlstra * CONDITION test and condition change and wakeup are under the same lock) then 173a2250238SPeter Zijlstra * use __set_current_state(). 174a2250238SPeter Zijlstra * 175a2250238SPeter Zijlstra * The above is typically ordered against the wakeup, which does: 176a2250238SPeter Zijlstra * 17758877d34SPeter Zijlstra * CONDITION = 1; 178a2250238SPeter Zijlstra * wake_up_state(p, TASK_UNINTERRUPTIBLE); 179a2250238SPeter Zijlstra * 18058877d34SPeter Zijlstra * where wake_up_state()/try_to_wake_up() executes a full memory barrier before 18158877d34SPeter Zijlstra * accessing p->state. 182a2250238SPeter Zijlstra * 183a2250238SPeter Zijlstra * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, 184a2250238SPeter Zijlstra * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a 185a2250238SPeter Zijlstra * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). 186a2250238SPeter Zijlstra * 187b5bf9a90SPeter Zijlstra * However, with slightly different timing the wakeup TASK_RUNNING store can 188dfcb245eSIngo Molnar * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not 189b5bf9a90SPeter Zijlstra * a problem either because that will result in one extra go around the loop 190b5bf9a90SPeter Zijlstra * and our @cond test will save the day. 191a2250238SPeter Zijlstra * 192a2250238SPeter Zijlstra * Also see the comments of try_to_wake_up(). 193498d0c57SAndrew Morton */ 194b5bf9a90SPeter Zijlstra #define __set_current_state(state_value) \ 1952f064a59SPeter Zijlstra WRITE_ONCE(current->__state, (state_value)) 196b5bf9a90SPeter Zijlstra 197b5bf9a90SPeter Zijlstra #define set_current_state(state_value) \ 1982f064a59SPeter Zijlstra smp_store_mb(current->__state, (state_value)) 199b5bf9a90SPeter Zijlstra 200b5bf9a90SPeter Zijlstra /* 201b5bf9a90SPeter Zijlstra * set_special_state() should be used for those states when the blocking task 202b5bf9a90SPeter Zijlstra * can not use the regular condition based wait-loop. In that case we must 203b5bf9a90SPeter Zijlstra * serialize against wakeups such that any possible in-flight TASK_RUNNING stores 204b5bf9a90SPeter Zijlstra * will not collide with our state change. 205b5bf9a90SPeter Zijlstra */ 206b5bf9a90SPeter Zijlstra #define set_special_state(state_value) \ 207b5bf9a90SPeter Zijlstra do { \ 208b5bf9a90SPeter Zijlstra unsigned long flags; /* may shadow */ \ 209b5bf9a90SPeter Zijlstra raw_spin_lock_irqsave(¤t->pi_lock, flags); \ 2102f064a59SPeter Zijlstra WRITE_ONCE(current->__state, (state_value)); \ 211b5bf9a90SPeter Zijlstra raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ 212b5bf9a90SPeter Zijlstra } while (0) 213b5bf9a90SPeter Zijlstra 2148eb23b9fSPeter Zijlstra #endif 2158eb23b9fSPeter Zijlstra 2162f064a59SPeter Zijlstra #define get_current_state() READ_ONCE(current->__state) 217d6c23bb3SPeter Zijlstra 2185eca1c10SIngo Molnar /* Task command name length: */ 2191da177e4SLinus Torvalds #define TASK_COMM_LEN 16 2201da177e4SLinus Torvalds 2211da177e4SLinus Torvalds extern void scheduler_tick(void); 2221da177e4SLinus Torvalds 2231da177e4SLinus Torvalds #define MAX_SCHEDULE_TIMEOUT LONG_MAX 2245eca1c10SIngo Molnar 2255eca1c10SIngo Molnar extern long schedule_timeout(long timeout); 2265eca1c10SIngo Molnar extern long schedule_timeout_interruptible(long timeout); 2275eca1c10SIngo Molnar extern long schedule_timeout_killable(long timeout); 2285eca1c10SIngo Molnar extern long schedule_timeout_uninterruptible(long timeout); 2295eca1c10SIngo Molnar extern long schedule_timeout_idle(long timeout); 2301da177e4SLinus Torvalds asmlinkage void schedule(void); 231c5491ea7SThomas Gleixner extern void schedule_preempt_disabled(void); 23219c95f26SJulien Thierry asmlinkage void preempt_schedule_irq(void); 2331da177e4SLinus Torvalds 23410ab5643STejun Heo extern int __must_check io_schedule_prepare(void); 23510ab5643STejun Heo extern void io_schedule_finish(int token); 2369cff8adeSNeilBrown extern long io_schedule_timeout(long timeout); 23710ab5643STejun Heo extern void io_schedule(void); 2389cff8adeSNeilBrown 239f06febc9SFrank Mayhar /** 2400ba42a59SMasanari Iida * struct prev_cputime - snapshot of system and user cputime 241d37f761dSFrederic Weisbecker * @utime: time spent in user mode 242d37f761dSFrederic Weisbecker * @stime: time spent in system mode 2439d7fb042SPeter Zijlstra * @lock: protects the above two fields 244d37f761dSFrederic Weisbecker * 2459d7fb042SPeter Zijlstra * Stores previous user/system time values such that we can guarantee 2469d7fb042SPeter Zijlstra * monotonicity. 247d37f761dSFrederic Weisbecker */ 2489d7fb042SPeter Zijlstra struct prev_cputime { 2499d7fb042SPeter Zijlstra #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 2505613fda9SFrederic Weisbecker u64 utime; 2515613fda9SFrederic Weisbecker u64 stime; 2529d7fb042SPeter Zijlstra raw_spinlock_t lock; 2539d7fb042SPeter Zijlstra #endif 254d37f761dSFrederic Weisbecker }; 255d37f761dSFrederic Weisbecker 256bac5b6b6SFrederic Weisbecker enum vtime_state { 257bac5b6b6SFrederic Weisbecker /* Task is sleeping or running in a CPU with VTIME inactive: */ 258bac5b6b6SFrederic Weisbecker VTIME_INACTIVE = 0, 25914faf6fcSFrederic Weisbecker /* Task is idle */ 26014faf6fcSFrederic Weisbecker VTIME_IDLE, 261bac5b6b6SFrederic Weisbecker /* Task runs in kernelspace in a CPU with VTIME active: */ 262bac5b6b6SFrederic Weisbecker VTIME_SYS, 26314faf6fcSFrederic Weisbecker /* Task runs in userspace in a CPU with VTIME active: */ 26414faf6fcSFrederic Weisbecker VTIME_USER, 265e6d5bf3eSFrederic Weisbecker /* Task runs as guests in a CPU with VTIME active: */ 266e6d5bf3eSFrederic Weisbecker VTIME_GUEST, 267bac5b6b6SFrederic Weisbecker }; 268bac5b6b6SFrederic Weisbecker 269bac5b6b6SFrederic Weisbecker struct vtime { 270bac5b6b6SFrederic Weisbecker seqcount_t seqcount; 271bac5b6b6SFrederic Weisbecker unsigned long long starttime; 272bac5b6b6SFrederic Weisbecker enum vtime_state state; 273802f4a82SFrederic Weisbecker unsigned int cpu; 2742a42eb95SWanpeng Li u64 utime; 2752a42eb95SWanpeng Li u64 stime; 2762a42eb95SWanpeng Li u64 gtime; 277bac5b6b6SFrederic Weisbecker }; 278bac5b6b6SFrederic Weisbecker 27969842cbaSPatrick Bellasi /* 28069842cbaSPatrick Bellasi * Utilization clamp constraints. 28169842cbaSPatrick Bellasi * @UCLAMP_MIN: Minimum utilization 28269842cbaSPatrick Bellasi * @UCLAMP_MAX: Maximum utilization 28369842cbaSPatrick Bellasi * @UCLAMP_CNT: Utilization clamp constraints count 28469842cbaSPatrick Bellasi */ 28569842cbaSPatrick Bellasi enum uclamp_id { 28669842cbaSPatrick Bellasi UCLAMP_MIN = 0, 28769842cbaSPatrick Bellasi UCLAMP_MAX, 28869842cbaSPatrick Bellasi UCLAMP_CNT 28969842cbaSPatrick Bellasi }; 29069842cbaSPatrick Bellasi 291f9a25f77SMathieu Poirier #ifdef CONFIG_SMP 292f9a25f77SMathieu Poirier extern struct root_domain def_root_domain; 293f9a25f77SMathieu Poirier extern struct mutex sched_domains_mutex; 294f9a25f77SMathieu Poirier #endif 295f9a25f77SMathieu Poirier 2961da177e4SLinus Torvalds struct sched_info { 2977f5f8e8dSIngo Molnar #ifdef CONFIG_SCHED_INFO 2985eca1c10SIngo Molnar /* Cumulative counters: */ 2991da177e4SLinus Torvalds 3005eca1c10SIngo Molnar /* # of times we have run on this CPU: */ 3015eca1c10SIngo Molnar unsigned long pcount; 3025eca1c10SIngo Molnar 3035eca1c10SIngo Molnar /* Time spent waiting on a runqueue: */ 3045eca1c10SIngo Molnar unsigned long long run_delay; 3055eca1c10SIngo Molnar 3065eca1c10SIngo Molnar /* Timestamps: */ 3075eca1c10SIngo Molnar 3085eca1c10SIngo Molnar /* When did we last run on a CPU? */ 3095eca1c10SIngo Molnar unsigned long long last_arrival; 3105eca1c10SIngo Molnar 3115eca1c10SIngo Molnar /* When were we last queued to run? */ 3125eca1c10SIngo Molnar unsigned long long last_queued; 3135eca1c10SIngo Molnar 314f6db8347SNaveen N. Rao #endif /* CONFIG_SCHED_INFO */ 3157f5f8e8dSIngo Molnar }; 3161da177e4SLinus Torvalds 3171da177e4SLinus Torvalds /* 3186ecdd749SYuyang Du * Integer metrics need fixed point arithmetic, e.g., sched/fair 3196ecdd749SYuyang Du * has a few: load, load_avg, util_avg, freq, and capacity. 3206ecdd749SYuyang Du * 3216ecdd749SYuyang Du * We define a basic fixed point arithmetic range, and then formalize 3226ecdd749SYuyang Du * all these metrics based on that basic range. 3236ecdd749SYuyang Du */ 3246ecdd749SYuyang Du # define SCHED_FIXEDPOINT_SHIFT 10 3256ecdd749SYuyang Du # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) 3266ecdd749SYuyang Du 32769842cbaSPatrick Bellasi /* Increase resolution of cpu_capacity calculations */ 32869842cbaSPatrick Bellasi # define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT 32969842cbaSPatrick Bellasi # define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) 33069842cbaSPatrick Bellasi 33120b8a59fSIngo Molnar struct load_weight { 3329dbdb155SPeter Zijlstra unsigned long weight; 3339dbdb155SPeter Zijlstra u32 inv_weight; 33420b8a59fSIngo Molnar }; 33520b8a59fSIngo Molnar 3367f65ea42SPatrick Bellasi /** 3377f65ea42SPatrick Bellasi * struct util_est - Estimation utilization of FAIR tasks 3387f65ea42SPatrick Bellasi * @enqueued: instantaneous estimated utilization of a task/cpu 3397f65ea42SPatrick Bellasi * @ewma: the Exponential Weighted Moving Average (EWMA) 3407f65ea42SPatrick Bellasi * utilization of a task 3417f65ea42SPatrick Bellasi * 3427f65ea42SPatrick Bellasi * Support data structure to track an Exponential Weighted Moving Average 3437f65ea42SPatrick Bellasi * (EWMA) of a FAIR task's utilization. New samples are added to the moving 3447f65ea42SPatrick Bellasi * average each time a task completes an activation. Sample's weight is chosen 3457f65ea42SPatrick Bellasi * so that the EWMA will be relatively insensitive to transient changes to the 3467f65ea42SPatrick Bellasi * task's workload. 3477f65ea42SPatrick Bellasi * 3487f65ea42SPatrick Bellasi * The enqueued attribute has a slightly different meaning for tasks and cpus: 3497f65ea42SPatrick Bellasi * - task: the task's util_avg at last task dequeue time 3507f65ea42SPatrick Bellasi * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU 3517f65ea42SPatrick Bellasi * Thus, the util_est.enqueued of a task represents the contribution on the 3527f65ea42SPatrick Bellasi * estimated utilization of the CPU where that task is currently enqueued. 3537f65ea42SPatrick Bellasi * 3547f65ea42SPatrick Bellasi * Only for tasks we track a moving average of the past instantaneous 3557f65ea42SPatrick Bellasi * estimated utilization. This allows to absorb sporadic drops in utilization 3567f65ea42SPatrick Bellasi * of an otherwise almost periodic task. 35768d7a190SDietmar Eggemann * 35868d7a190SDietmar Eggemann * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg 35968d7a190SDietmar Eggemann * updates. When a task is dequeued, its util_est should not be updated if its 36068d7a190SDietmar Eggemann * util_avg has not been updated in the meantime. 36168d7a190SDietmar Eggemann * This information is mapped into the MSB bit of util_est.enqueued at dequeue 36268d7a190SDietmar Eggemann * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg 36368d7a190SDietmar Eggemann * for a task) it is safe to use MSB. 3647f65ea42SPatrick Bellasi */ 3657f65ea42SPatrick Bellasi struct util_est { 3667f65ea42SPatrick Bellasi unsigned int enqueued; 3677f65ea42SPatrick Bellasi unsigned int ewma; 3687f65ea42SPatrick Bellasi #define UTIL_EST_WEIGHT_SHIFT 2 36968d7a190SDietmar Eggemann #define UTIL_AVG_UNCHANGED 0x80000000 370317d359dSPeter Zijlstra } __attribute__((__aligned__(sizeof(u64)))); 3717f65ea42SPatrick Bellasi 3729d89c257SYuyang Du /* 3739f683953SVincent Guittot * The load/runnable/util_avg accumulates an infinite geometric series 3740dacee1bSVincent Guittot * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c). 3757b595334SYuyang Du * 3767b595334SYuyang Du * [load_avg definition] 3777b595334SYuyang Du * 3787b595334SYuyang Du * load_avg = runnable% * scale_load_down(load) 3797b595334SYuyang Du * 3809f683953SVincent Guittot * [runnable_avg definition] 3819f683953SVincent Guittot * 3829f683953SVincent Guittot * runnable_avg = runnable% * SCHED_CAPACITY_SCALE 3837b595334SYuyang Du * 3847b595334SYuyang Du * [util_avg definition] 3857b595334SYuyang Du * 3867b595334SYuyang Du * util_avg = running% * SCHED_CAPACITY_SCALE 3877b595334SYuyang Du * 3889f683953SVincent Guittot * where runnable% is the time ratio that a sched_entity is runnable and 3899f683953SVincent Guittot * running% the time ratio that a sched_entity is running. 3907b595334SYuyang Du * 3919f683953SVincent Guittot * For cfs_rq, they are the aggregated values of all runnable and blocked 3929f683953SVincent Guittot * sched_entities. 3939f683953SVincent Guittot * 394c1b7b8d4S王文虎 * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU 3959f683953SVincent Guittot * capacity scaling. The scaling is done through the rq_clock_pelt that is used 3969f683953SVincent Guittot * for computing those signals (see update_rq_clock_pelt()) 3977b595334SYuyang Du * 39823127296SVincent Guittot * N.B., the above ratios (runnable% and running%) themselves are in the 39923127296SVincent Guittot * range of [0, 1]. To do fixed point arithmetics, we therefore scale them 40023127296SVincent Guittot * to as large a range as necessary. This is for example reflected by 40123127296SVincent Guittot * util_avg's SCHED_CAPACITY_SCALE. 4027b595334SYuyang Du * 4037b595334SYuyang Du * [Overflow issue] 4047b595334SYuyang Du * 4057b595334SYuyang Du * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities 4067b595334SYuyang Du * with the highest load (=88761), always runnable on a single cfs_rq, 4077b595334SYuyang Du * and should not overflow as the number already hits PID_MAX_LIMIT. 4087b595334SYuyang Du * 4097b595334SYuyang Du * For all other cases (including 32-bit kernels), struct load_weight's 4107b595334SYuyang Du * weight will overflow first before we do, because: 4117b595334SYuyang Du * 4127b595334SYuyang Du * Max(load_avg) <= Max(load.weight) 4137b595334SYuyang Du * 4147b595334SYuyang Du * Then it is the load_weight's responsibility to consider overflow 4157b595334SYuyang Du * issues. 4169d89c257SYuyang Du */ 4179d85f21cSPaul Turner struct sched_avg { 4185eca1c10SIngo Molnar u64 last_update_time; 4195eca1c10SIngo Molnar u64 load_sum; 4209f683953SVincent Guittot u64 runnable_sum; 4215eca1c10SIngo Molnar u32 util_sum; 4225eca1c10SIngo Molnar u32 period_contrib; 4235eca1c10SIngo Molnar unsigned long load_avg; 4249f683953SVincent Guittot unsigned long runnable_avg; 4255eca1c10SIngo Molnar unsigned long util_avg; 4267f65ea42SPatrick Bellasi struct util_est util_est; 427317d359dSPeter Zijlstra } ____cacheline_aligned; 4289d85f21cSPaul Turner 42941acab88SLucas De Marchi struct sched_statistics { 4307f5f8e8dSIngo Molnar #ifdef CONFIG_SCHEDSTATS 43194c18227SIngo Molnar u64 wait_start; 43294c18227SIngo Molnar u64 wait_max; 4336d082592SArjan van de Ven u64 wait_count; 4346d082592SArjan van de Ven u64 wait_sum; 4358f0dfc34SArjan van de Ven u64 iowait_count; 4368f0dfc34SArjan van de Ven u64 iowait_sum; 43794c18227SIngo Molnar 43894c18227SIngo Molnar u64 sleep_start; 43920b8a59fSIngo Molnar u64 sleep_max; 44094c18227SIngo Molnar s64 sum_sleep_runtime; 44194c18227SIngo Molnar 44294c18227SIngo Molnar u64 block_start; 44320b8a59fSIngo Molnar u64 block_max; 44420b8a59fSIngo Molnar u64 exec_max; 445eba1ed4bSIngo Molnar u64 slice_max; 446cc367732SIngo Molnar 447cc367732SIngo Molnar u64 nr_migrations_cold; 448cc367732SIngo Molnar u64 nr_failed_migrations_affine; 449cc367732SIngo Molnar u64 nr_failed_migrations_running; 450cc367732SIngo Molnar u64 nr_failed_migrations_hot; 451cc367732SIngo Molnar u64 nr_forced_migrations; 452cc367732SIngo Molnar 453cc367732SIngo Molnar u64 nr_wakeups; 454cc367732SIngo Molnar u64 nr_wakeups_sync; 455cc367732SIngo Molnar u64 nr_wakeups_migrate; 456cc367732SIngo Molnar u64 nr_wakeups_local; 457cc367732SIngo Molnar u64 nr_wakeups_remote; 458cc367732SIngo Molnar u64 nr_wakeups_affine; 459cc367732SIngo Molnar u64 nr_wakeups_affine_attempts; 460cc367732SIngo Molnar u64 nr_wakeups_passive; 461cc367732SIngo Molnar u64 nr_wakeups_idle; 46241acab88SLucas De Marchi #endif 4637f5f8e8dSIngo Molnar }; 46441acab88SLucas De Marchi 46541acab88SLucas De Marchi struct sched_entity { 4665eca1c10SIngo Molnar /* For load-balancing: */ 4675eca1c10SIngo Molnar struct load_weight load; 46841acab88SLucas De Marchi struct rb_node run_node; 46941acab88SLucas De Marchi struct list_head group_node; 47041acab88SLucas De Marchi unsigned int on_rq; 47141acab88SLucas De Marchi 47241acab88SLucas De Marchi u64 exec_start; 47341acab88SLucas De Marchi u64 sum_exec_runtime; 47441acab88SLucas De Marchi u64 vruntime; 47541acab88SLucas De Marchi u64 prev_sum_exec_runtime; 47641acab88SLucas De Marchi 47741acab88SLucas De Marchi u64 nr_migrations; 47841acab88SLucas De Marchi 47941acab88SLucas De Marchi struct sched_statistics statistics; 48094c18227SIngo Molnar 48120b8a59fSIngo Molnar #ifdef CONFIG_FAIR_GROUP_SCHED 482fed14d45SPeter Zijlstra int depth; 48320b8a59fSIngo Molnar struct sched_entity *parent; 48420b8a59fSIngo Molnar /* rq on which this entity is (to be) queued: */ 48520b8a59fSIngo Molnar struct cfs_rq *cfs_rq; 48620b8a59fSIngo Molnar /* rq "owned" by this entity/group: */ 48720b8a59fSIngo Molnar struct cfs_rq *my_q; 4889f683953SVincent Guittot /* cached value of my_q->h_nr_running */ 4899f683953SVincent Guittot unsigned long runnable_weight; 49020b8a59fSIngo Molnar #endif 4918bd75c77SClark Williams 492141965c7SAlex Shi #ifdef CONFIG_SMP 4935a107804SJiri Olsa /* 4945a107804SJiri Olsa * Per entity load average tracking. 4955a107804SJiri Olsa * 4965a107804SJiri Olsa * Put into separate cache line so it does not 4975a107804SJiri Olsa * collide with read-mostly values above. 4985a107804SJiri Olsa */ 499317d359dSPeter Zijlstra struct sched_avg avg; 5009d85f21cSPaul Turner #endif 50120b8a59fSIngo Molnar }; 50270b97a7fSIngo Molnar 503fa717060SPeter Zijlstra struct sched_rt_entity { 504fa717060SPeter Zijlstra struct list_head run_list; 50578f2c7dbSPeter Zijlstra unsigned long timeout; 50657d2aa00SYing Xue unsigned long watchdog_stamp; 507bee367edSRichard Kennedy unsigned int time_slice; 508ff77e468SPeter Zijlstra unsigned short on_rq; 509ff77e468SPeter Zijlstra unsigned short on_list; 5106f505b16SPeter Zijlstra 51158d6c2d7SPeter Zijlstra struct sched_rt_entity *back; 512052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 5136f505b16SPeter Zijlstra struct sched_rt_entity *parent; 5146f505b16SPeter Zijlstra /* rq on which this entity is (to be) queued: */ 5156f505b16SPeter Zijlstra struct rt_rq *rt_rq; 5166f505b16SPeter Zijlstra /* rq "owned" by this entity/group: */ 5176f505b16SPeter Zijlstra struct rt_rq *my_q; 5186f505b16SPeter Zijlstra #endif 5193859a271SKees Cook } __randomize_layout; 520fa717060SPeter Zijlstra 521aab03e05SDario Faggioli struct sched_dl_entity { 522aab03e05SDario Faggioli struct rb_node rb_node; 523aab03e05SDario Faggioli 524aab03e05SDario Faggioli /* 525aab03e05SDario Faggioli * Original scheduling parameters. Copied here from sched_attr 5264027d080Sxiaofeng.yan * during sched_setattr(), they will remain the same until 5274027d080Sxiaofeng.yan * the next sched_setattr(). 528aab03e05SDario Faggioli */ 5295eca1c10SIngo Molnar u64 dl_runtime; /* Maximum runtime for each instance */ 5305eca1c10SIngo Molnar u64 dl_deadline; /* Relative deadline of each instance */ 5315eca1c10SIngo Molnar u64 dl_period; /* Separation of two instances (period) */ 53254d6d303SDaniel Bristot de Oliveira u64 dl_bw; /* dl_runtime / dl_period */ 5333effcb42SDaniel Bristot de Oliveira u64 dl_density; /* dl_runtime / dl_deadline */ 534aab03e05SDario Faggioli 535aab03e05SDario Faggioli /* 536aab03e05SDario Faggioli * Actual scheduling parameters. Initialized with the values above, 537dfcb245eSIngo Molnar * they are continuously updated during task execution. Note that 538aab03e05SDario Faggioli * the remaining runtime could be < 0 in case we are in overrun. 539aab03e05SDario Faggioli */ 5405eca1c10SIngo Molnar s64 runtime; /* Remaining runtime for this instance */ 5415eca1c10SIngo Molnar u64 deadline; /* Absolute deadline for this instance */ 5425eca1c10SIngo Molnar unsigned int flags; /* Specifying the scheduler behaviour */ 543aab03e05SDario Faggioli 544aab03e05SDario Faggioli /* 545aab03e05SDario Faggioli * Some bool flags: 546aab03e05SDario Faggioli * 547aab03e05SDario Faggioli * @dl_throttled tells if we exhausted the runtime. If so, the 548aab03e05SDario Faggioli * task has to wait for a replenishment to be performed at the 549aab03e05SDario Faggioli * next firing of dl_timer. 550aab03e05SDario Faggioli * 5512d3d891dSDario Faggioli * @dl_boosted tells if we are boosted due to DI. If so we are 5522d3d891dSDario Faggioli * outside bandwidth enforcement mechanism (but only until we 5535bfd126eSJuri Lelli * exit the critical section); 5545bfd126eSJuri Lelli * 5555eca1c10SIngo Molnar * @dl_yielded tells if task gave up the CPU before consuming 5565bfd126eSJuri Lelli * all its available runtime during the last job. 557209a0cbdSLuca Abeni * 558209a0cbdSLuca Abeni * @dl_non_contending tells if the task is inactive while still 559209a0cbdSLuca Abeni * contributing to the active utilization. In other words, it 560209a0cbdSLuca Abeni * indicates if the inactive timer has been armed and its handler 561209a0cbdSLuca Abeni * has not been executed yet. This flag is useful to avoid race 562209a0cbdSLuca Abeni * conditions between the inactive timer handler and the wakeup 563209a0cbdSLuca Abeni * code. 56434be3930SJuri Lelli * 56534be3930SJuri Lelli * @dl_overrun tells if the task asked to be informed about runtime 56634be3930SJuri Lelli * overruns. 567aab03e05SDario Faggioli */ 568aa5222e9SDan Carpenter unsigned int dl_throttled : 1; 569aa5222e9SDan Carpenter unsigned int dl_yielded : 1; 570aa5222e9SDan Carpenter unsigned int dl_non_contending : 1; 57134be3930SJuri Lelli unsigned int dl_overrun : 1; 572aab03e05SDario Faggioli 573aab03e05SDario Faggioli /* 574aab03e05SDario Faggioli * Bandwidth enforcement timer. Each -deadline task has its 575aab03e05SDario Faggioli * own bandwidth to be enforced, thus we need one timer per task. 576aab03e05SDario Faggioli */ 577aab03e05SDario Faggioli struct hrtimer dl_timer; 578209a0cbdSLuca Abeni 579209a0cbdSLuca Abeni /* 580209a0cbdSLuca Abeni * Inactive timer, responsible for decreasing the active utilization 581209a0cbdSLuca Abeni * at the "0-lag time". When a -deadline task blocks, it contributes 582209a0cbdSLuca Abeni * to GRUB's active utilization until the "0-lag time", hence a 583209a0cbdSLuca Abeni * timer is needed to decrease the active utilization at the correct 584209a0cbdSLuca Abeni * time. 585209a0cbdSLuca Abeni */ 586209a0cbdSLuca Abeni struct hrtimer inactive_timer; 5872279f540SJuri Lelli 5882279f540SJuri Lelli #ifdef CONFIG_RT_MUTEXES 5892279f540SJuri Lelli /* 5902279f540SJuri Lelli * Priority Inheritance. When a DEADLINE scheduling entity is boosted 5912279f540SJuri Lelli * pi_se points to the donor, otherwise points to the dl_se it belongs 5922279f540SJuri Lelli * to (the original one/itself). 5932279f540SJuri Lelli */ 5942279f540SJuri Lelli struct sched_dl_entity *pi_se; 5952279f540SJuri Lelli #endif 596aab03e05SDario Faggioli }; 5978bd75c77SClark Williams 59869842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 59969842cbaSPatrick Bellasi /* Number of utilization clamp buckets (shorter alias) */ 60069842cbaSPatrick Bellasi #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT 60169842cbaSPatrick Bellasi 60269842cbaSPatrick Bellasi /* 60369842cbaSPatrick Bellasi * Utilization clamp for a scheduling entity 60469842cbaSPatrick Bellasi * @value: clamp value "assigned" to a se 60569842cbaSPatrick Bellasi * @bucket_id: bucket index corresponding to the "assigned" value 606e8f14172SPatrick Bellasi * @active: the se is currently refcounted in a rq's bucket 607a509a7cdSPatrick Bellasi * @user_defined: the requested clamp value comes from user-space 60869842cbaSPatrick Bellasi * 60969842cbaSPatrick Bellasi * The bucket_id is the index of the clamp bucket matching the clamp value 61069842cbaSPatrick Bellasi * which is pre-computed and stored to avoid expensive integer divisions from 61169842cbaSPatrick Bellasi * the fast path. 612e8f14172SPatrick Bellasi * 613e8f14172SPatrick Bellasi * The active bit is set whenever a task has got an "effective" value assigned, 614e8f14172SPatrick Bellasi * which can be different from the clamp value "requested" from user-space. 615e8f14172SPatrick Bellasi * This allows to know a task is refcounted in the rq's bucket corresponding 616e8f14172SPatrick Bellasi * to the "effective" bucket_id. 617a509a7cdSPatrick Bellasi * 618a509a7cdSPatrick Bellasi * The user_defined bit is set whenever a task has got a task-specific clamp 619a509a7cdSPatrick Bellasi * value requested from userspace, i.e. the system defaults apply to this task 620a509a7cdSPatrick Bellasi * just as a restriction. This allows to relax default clamps when a less 621a509a7cdSPatrick Bellasi * restrictive task-specific value has been requested, thus allowing to 622a509a7cdSPatrick Bellasi * implement a "nice" semantic. For example, a task running with a 20% 623a509a7cdSPatrick Bellasi * default boost can still drop its own boosting to 0%. 62469842cbaSPatrick Bellasi */ 62569842cbaSPatrick Bellasi struct uclamp_se { 62669842cbaSPatrick Bellasi unsigned int value : bits_per(SCHED_CAPACITY_SCALE); 62769842cbaSPatrick Bellasi unsigned int bucket_id : bits_per(UCLAMP_BUCKETS); 628e8f14172SPatrick Bellasi unsigned int active : 1; 629a509a7cdSPatrick Bellasi unsigned int user_defined : 1; 63069842cbaSPatrick Bellasi }; 63169842cbaSPatrick Bellasi #endif /* CONFIG_UCLAMP_TASK */ 63269842cbaSPatrick Bellasi 6331d082fd0SPaul E. McKenney union rcu_special { 6341d082fd0SPaul E. McKenney struct { 6358203d6d0SPaul E. McKenney u8 blocked; 6368203d6d0SPaul E. McKenney u8 need_qs; 63705f41571SPaul E. McKenney u8 exp_hint; /* Hint for performance. */ 638276c4104SPaul E. McKenney u8 need_mb; /* Readers need smp_mb(). */ 6398203d6d0SPaul E. McKenney } b; /* Bits. */ 64005f41571SPaul E. McKenney u32 s; /* Set of bits. */ 6411d082fd0SPaul E. McKenney }; 64286848966SPaul E. McKenney 6438dc85d54SPeter Zijlstra enum perf_event_task_context { 6448dc85d54SPeter Zijlstra perf_invalid_context = -1, 6458dc85d54SPeter Zijlstra perf_hw_context = 0, 64689a1e187SPeter Zijlstra perf_sw_context, 6478dc85d54SPeter Zijlstra perf_nr_task_contexts, 6488dc85d54SPeter Zijlstra }; 6498dc85d54SPeter Zijlstra 650eb61baf6SIngo Molnar struct wake_q_node { 651eb61baf6SIngo Molnar struct wake_q_node *next; 652eb61baf6SIngo Molnar }; 653eb61baf6SIngo Molnar 6545fbda3ecSThomas Gleixner struct kmap_ctrl { 6555fbda3ecSThomas Gleixner #ifdef CONFIG_KMAP_LOCAL 6565fbda3ecSThomas Gleixner int idx; 6575fbda3ecSThomas Gleixner pte_t pteval[KM_MAX_IDX]; 6585fbda3ecSThomas Gleixner #endif 6595fbda3ecSThomas Gleixner }; 6605fbda3ecSThomas Gleixner 6611da177e4SLinus Torvalds struct task_struct { 662c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 663c65eacbeSAndy Lutomirski /* 664c65eacbeSAndy Lutomirski * For reasons of header soup (see current_thread_info()), this 665c65eacbeSAndy Lutomirski * must be the first element of task_struct. 666c65eacbeSAndy Lutomirski */ 667c65eacbeSAndy Lutomirski struct thread_info thread_info; 668c65eacbeSAndy Lutomirski #endif 6692f064a59SPeter Zijlstra unsigned int __state; 67029e48ce8SKees Cook 67129e48ce8SKees Cook /* 67229e48ce8SKees Cook * This begins the randomizable portion of task_struct. Only 67329e48ce8SKees Cook * scheduling-critical items should be added above here. 67429e48ce8SKees Cook */ 67529e48ce8SKees Cook randomized_struct_fields_start 67629e48ce8SKees Cook 677f7e4217bSRoman Zippel void *stack; 678ec1d2819SElena Reshetova refcount_t usage; 6795eca1c10SIngo Molnar /* Per task flags (PF_*), defined further below: */ 6805eca1c10SIngo Molnar unsigned int flags; 68197dc32cdSWilliam Cohen unsigned int ptrace; 6821da177e4SLinus Torvalds 6832dd73a4fSPeter Williams #ifdef CONFIG_SMP 6843ca7a440SPeter Zijlstra int on_cpu; 6858c4890d1SPeter Zijlstra struct __call_single_node wake_entry; 686c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 6875eca1c10SIngo Molnar /* Current CPU: */ 6885eca1c10SIngo Molnar unsigned int cpu; 689c65eacbeSAndy Lutomirski #endif 69063b0e9edSMike Galbraith unsigned int wakee_flips; 69162470419SMichael Wang unsigned long wakee_flip_decay_ts; 69263b0e9edSMike Galbraith struct task_struct *last_wakee; 693ac66f547SPeter Zijlstra 69432e839ddSMel Gorman /* 69532e839ddSMel Gorman * recent_used_cpu is initially set as the last CPU used by a task 69632e839ddSMel Gorman * that wakes affine another task. Waker/wakee relationships can 69732e839ddSMel Gorman * push tasks around a CPU where each wakeup moves to the next one. 69832e839ddSMel Gorman * Tracking a recently used CPU allows a quick search for a recently 69932e839ddSMel Gorman * used CPU that may be idle. 70032e839ddSMel Gorman */ 70132e839ddSMel Gorman int recent_used_cpu; 702ac66f547SPeter Zijlstra int wake_cpu; 7034866cde0SNick Piggin #endif 704fd2f4419SPeter Zijlstra int on_rq; 70550e645a8SIngo Molnar 7065eca1c10SIngo Molnar int prio; 7075eca1c10SIngo Molnar int static_prio; 7085eca1c10SIngo Molnar int normal_prio; 709c7aceabaSRichard Kennedy unsigned int rt_priority; 7105eca1c10SIngo Molnar 7115522d5d5SIngo Molnar const struct sched_class *sched_class; 71220b8a59fSIngo Molnar struct sched_entity se; 713fa717060SPeter Zijlstra struct sched_rt_entity rt; 7148a311c74SPeter Zijlstra struct sched_dl_entity dl; 7158a311c74SPeter Zijlstra 7168a311c74SPeter Zijlstra #ifdef CONFIG_SCHED_CORE 7178a311c74SPeter Zijlstra struct rb_node core_node; 7188a311c74SPeter Zijlstra unsigned long core_cookie; 719d2dfa17bSPeter Zijlstra unsigned int core_occupation; 7208a311c74SPeter Zijlstra #endif 7218a311c74SPeter Zijlstra 7228323f26cSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED 7238323f26cSPeter Zijlstra struct task_group *sched_task_group; 7248323f26cSPeter Zijlstra #endif 7251da177e4SLinus Torvalds 72669842cbaSPatrick Bellasi #ifdef CONFIG_UCLAMP_TASK 72713685c4aSQais Yousef /* 72813685c4aSQais Yousef * Clamp values requested for a scheduling entity. 72913685c4aSQais Yousef * Must be updated with task_rq_lock() held. 73013685c4aSQais Yousef */ 731e8f14172SPatrick Bellasi struct uclamp_se uclamp_req[UCLAMP_CNT]; 73213685c4aSQais Yousef /* 73313685c4aSQais Yousef * Effective clamp values used for a scheduling entity. 73413685c4aSQais Yousef * Must be updated with task_rq_lock() held. 73513685c4aSQais Yousef */ 73669842cbaSPatrick Bellasi struct uclamp_se uclamp[UCLAMP_CNT]; 73769842cbaSPatrick Bellasi #endif 73869842cbaSPatrick Bellasi 739e107be36SAvi Kivity #ifdef CONFIG_PREEMPT_NOTIFIERS 7405eca1c10SIngo Molnar /* List of struct preempt_notifier: */ 741e107be36SAvi Kivity struct hlist_head preempt_notifiers; 742e107be36SAvi Kivity #endif 743e107be36SAvi Kivity 7446c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE 7452056a782SJens Axboe unsigned int btrace_seq; 7466c5c9341SAlexey Dobriyan #endif 7471da177e4SLinus Torvalds 74897dc32cdSWilliam Cohen unsigned int policy; 74929baa747SPeter Zijlstra int nr_cpus_allowed; 7503bd37062SSebastian Andrzej Siewior const cpumask_t *cpus_ptr; 751b90ca8baSWill Deacon cpumask_t *user_cpus_ptr; 7523bd37062SSebastian Andrzej Siewior cpumask_t cpus_mask; 7536d337eabSPeter Zijlstra void *migration_pending; 75474d862b6SThomas Gleixner #ifdef CONFIG_SMP 755a7c81556SPeter Zijlstra unsigned short migration_disabled; 756af449901SPeter Zijlstra #endif 757a7c81556SPeter Zijlstra unsigned short migration_flags; 7581da177e4SLinus Torvalds 759a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU 760e260be67SPaul E. McKenney int rcu_read_lock_nesting; 7611d082fd0SPaul E. McKenney union rcu_special rcu_read_unlock_special; 762f41d911fSPaul E. McKenney struct list_head rcu_node_entry; 763a57eb940SPaul E. McKenney struct rcu_node *rcu_blocked_node; 76428f6569aSPranith Kumar #endif /* #ifdef CONFIG_PREEMPT_RCU */ 7655eca1c10SIngo Molnar 7668315f422SPaul E. McKenney #ifdef CONFIG_TASKS_RCU 7678315f422SPaul E. McKenney unsigned long rcu_tasks_nvcsw; 768ccdd29ffSPaul E. McKenney u8 rcu_tasks_holdout; 769ccdd29ffSPaul E. McKenney u8 rcu_tasks_idx; 770176f8f7aSPaul E. McKenney int rcu_tasks_idle_cpu; 771ccdd29ffSPaul E. McKenney struct list_head rcu_tasks_holdout_list; 7728315f422SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_RCU */ 773e260be67SPaul E. McKenney 774d5f177d3SPaul E. McKenney #ifdef CONFIG_TASKS_TRACE_RCU 775d5f177d3SPaul E. McKenney int trc_reader_nesting; 776d5f177d3SPaul E. McKenney int trc_ipi_to_cpu; 777276c4104SPaul E. McKenney union rcu_special trc_reader_special; 778d5f177d3SPaul E. McKenney bool trc_reader_checked; 779d5f177d3SPaul E. McKenney struct list_head trc_holdout_list; 780d5f177d3SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ 781d5f177d3SPaul E. McKenney 7821da177e4SLinus Torvalds struct sched_info sched_info; 7831da177e4SLinus Torvalds 7841da177e4SLinus Torvalds struct list_head tasks; 785806c09a7SDario Faggioli #ifdef CONFIG_SMP 786917b627dSGregory Haskins struct plist_node pushable_tasks; 7871baca4ceSJuri Lelli struct rb_node pushable_dl_tasks; 788806c09a7SDario Faggioli #endif 7891da177e4SLinus Torvalds 7905eca1c10SIngo Molnar struct mm_struct *mm; 7915eca1c10SIngo Molnar struct mm_struct *active_mm; 792314ff785SIngo Molnar 793314ff785SIngo Molnar /* Per-thread vma caching: */ 794314ff785SIngo Molnar struct vmacache vmacache; 795314ff785SIngo Molnar 7965eca1c10SIngo Molnar #ifdef SPLIT_RSS_COUNTING 79734e55232SKAMEZAWA Hiroyuki struct task_rss_stat rss_stat; 79834e55232SKAMEZAWA Hiroyuki #endif 79997dc32cdSWilliam Cohen int exit_state; 8005eca1c10SIngo Molnar int exit_code; 8015eca1c10SIngo Molnar int exit_signal; 8025eca1c10SIngo Molnar /* The signal sent when the parent dies: */ 8035eca1c10SIngo Molnar int pdeath_signal; 8045eca1c10SIngo Molnar /* JOBCTL_*, siglock protected: */ 8055eca1c10SIngo Molnar unsigned long jobctl; 8069b89f6baSAndrei Epure 8075eca1c10SIngo Molnar /* Used for emulating ABI behavior of previous Linux versions: */ 80897dc32cdSWilliam Cohen unsigned int personality; 8099b89f6baSAndrei Epure 8105eca1c10SIngo Molnar /* Scheduler bits, serialized by scheduler locks: */ 811ca94c442SLennart Poettering unsigned sched_reset_on_fork:1; 812a8e4f2eaSPeter Zijlstra unsigned sched_contributes_to_load:1; 813ff303e66SPeter Zijlstra unsigned sched_migrated:1; 814eb414681SJohannes Weiner #ifdef CONFIG_PSI 815eb414681SJohannes Weiner unsigned sched_psi_wake_requeue:1; 816eb414681SJohannes Weiner #endif 817eb414681SJohannes Weiner 8185eca1c10SIngo Molnar /* Force alignment to the next boundary: */ 8195eca1c10SIngo Molnar unsigned :0; 820be958bdcSPeter Zijlstra 8215eca1c10SIngo Molnar /* Unserialized, strictly 'current' */ 8225eca1c10SIngo Molnar 823f97bb527SPeter Zijlstra /* 824f97bb527SPeter Zijlstra * This field must not be in the scheduler word above due to wakelist 825f97bb527SPeter Zijlstra * queueing no longer being serialized by p->on_cpu. However: 826f97bb527SPeter Zijlstra * 827f97bb527SPeter Zijlstra * p->XXX = X; ttwu() 828f97bb527SPeter Zijlstra * schedule() if (p->on_rq && ..) // false 829f97bb527SPeter Zijlstra * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true 830f97bb527SPeter Zijlstra * deactivate_task() ttwu_queue_wakelist()) 831f97bb527SPeter Zijlstra * p->on_rq = 0; p->sched_remote_wakeup = Y; 832f97bb527SPeter Zijlstra * 833f97bb527SPeter Zijlstra * guarantees all stores of 'current' are visible before 834f97bb527SPeter Zijlstra * ->sched_remote_wakeup gets used, so it can be in this word. 835f97bb527SPeter Zijlstra */ 836f97bb527SPeter Zijlstra unsigned sched_remote_wakeup:1; 837f97bb527SPeter Zijlstra 8385eca1c10SIngo Molnar /* Bit to tell LSMs we're in execve(): */ 8395eca1c10SIngo Molnar unsigned in_execve:1; 840be958bdcSPeter Zijlstra unsigned in_iowait:1; 8415eca1c10SIngo Molnar #ifndef TIF_RESTORE_SIGMASK 8427e781418SAndy Lutomirski unsigned restore_sigmask:1; 8437e781418SAndy Lutomirski #endif 844626ebc41STejun Heo #ifdef CONFIG_MEMCG 84529ef680aSMichal Hocko unsigned in_user_fault:1; 846127424c8SJohannes Weiner #endif 847ff303e66SPeter Zijlstra #ifdef CONFIG_COMPAT_BRK 848ff303e66SPeter Zijlstra unsigned brk_randomized:1; 849ff303e66SPeter Zijlstra #endif 85077f88796STejun Heo #ifdef CONFIG_CGROUPS 85177f88796STejun Heo /* disallow userland-initiated cgroup migration */ 85277f88796STejun Heo unsigned no_cgroup_migration:1; 85376f969e8SRoman Gushchin /* task is frozen/stopped (used by the cgroup freezer) */ 85476f969e8SRoman Gushchin unsigned frozen:1; 85577f88796STejun Heo #endif 856d09d8df3SJosef Bacik #ifdef CONFIG_BLK_CGROUP 857d09d8df3SJosef Bacik unsigned use_memdelay:1; 858d09d8df3SJosef Bacik #endif 8591066d1b6SYafang Shao #ifdef CONFIG_PSI 8601066d1b6SYafang Shao /* Stalled due to lack of memory */ 8611066d1b6SYafang Shao unsigned in_memstall:1; 8621066d1b6SYafang Shao #endif 8638e9b16c4SSergei Trofimovich #ifdef CONFIG_PAGE_OWNER 8648e9b16c4SSergei Trofimovich /* Used by page_owner=on to detect recursion in page tracking. */ 8658e9b16c4SSergei Trofimovich unsigned in_page_owner:1; 8668e9b16c4SSergei Trofimovich #endif 867*b542e383SThomas Gleixner #ifdef CONFIG_EVENTFD 868*b542e383SThomas Gleixner /* Recursion prevention for eventfd_signal() */ 869*b542e383SThomas Gleixner unsigned in_eventfd_signal:1; 870*b542e383SThomas Gleixner #endif 8716f185c29SVladimir Davydov 8725eca1c10SIngo Molnar unsigned long atomic_flags; /* Flags requiring atomic access. */ 8731d4457f9SKees Cook 874f56141e3SAndy Lutomirski struct restart_block restart_block; 875f56141e3SAndy Lutomirski 8761da177e4SLinus Torvalds pid_t pid; 8771da177e4SLinus Torvalds pid_t tgid; 8780a425405SArjan van de Ven 879050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR 8805eca1c10SIngo Molnar /* Canary value for the -fstack-protector GCC feature: */ 8810a425405SArjan van de Ven unsigned long stack_canary; 8821314562aSHiroshi Shimamoto #endif 8831da177e4SLinus Torvalds /* 8845eca1c10SIngo Molnar * Pointers to the (original) parent process, youngest child, younger sibling, 8851da177e4SLinus Torvalds * older sibling, respectively. (p->father can be replaced with 886f470021aSRoland McGrath * p->real_parent->pid) 8871da177e4SLinus Torvalds */ 8885eca1c10SIngo Molnar 8895eca1c10SIngo Molnar /* Real parent process: */ 8905eca1c10SIngo Molnar struct task_struct __rcu *real_parent; 8915eca1c10SIngo Molnar 8925eca1c10SIngo Molnar /* Recipient of SIGCHLD, wait4() reports: */ 8935eca1c10SIngo Molnar struct task_struct __rcu *parent; 8941da177e4SLinus Torvalds 895f470021aSRoland McGrath /* 8965eca1c10SIngo Molnar * Children/sibling form the list of natural children: 8975eca1c10SIngo Molnar */ 8985eca1c10SIngo Molnar struct list_head children; 8995eca1c10SIngo Molnar struct list_head sibling; 9005eca1c10SIngo Molnar struct task_struct *group_leader; 9015eca1c10SIngo Molnar 9025eca1c10SIngo Molnar /* 9035eca1c10SIngo Molnar * 'ptraced' is the list of tasks this task is using ptrace() on. 9045eca1c10SIngo Molnar * 905f470021aSRoland McGrath * This includes both natural children and PTRACE_ATTACH targets. 9065eca1c10SIngo Molnar * 'ptrace_entry' is this task's link on the p->parent->ptraced list. 907f470021aSRoland McGrath */ 908f470021aSRoland McGrath struct list_head ptraced; 909f470021aSRoland McGrath struct list_head ptrace_entry; 910f470021aSRoland McGrath 9111da177e4SLinus Torvalds /* PID/PID hash table linkage. */ 9122c470475SEric W. Biederman struct pid *thread_pid; 9132c470475SEric W. Biederman struct hlist_node pid_links[PIDTYPE_MAX]; 91447e65328SOleg Nesterov struct list_head thread_group; 9150c740d0aSOleg Nesterov struct list_head thread_node; 9161da177e4SLinus Torvalds 9175eca1c10SIngo Molnar struct completion *vfork_done; 9181da177e4SLinus Torvalds 9195eca1c10SIngo Molnar /* CLONE_CHILD_SETTID: */ 9205eca1c10SIngo Molnar int __user *set_child_tid; 9215eca1c10SIngo Molnar 9225eca1c10SIngo Molnar /* CLONE_CHILD_CLEARTID: */ 9235eca1c10SIngo Molnar int __user *clear_child_tid; 9245eca1c10SIngo Molnar 9253bfe6106SJens Axboe /* PF_IO_WORKER */ 9263bfe6106SJens Axboe void *pf_io_worker; 9273bfe6106SJens Axboe 9285eca1c10SIngo Molnar u64 utime; 9295eca1c10SIngo Molnar u64 stime; 93040565b5aSStanislaw Gruszka #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME 9315eca1c10SIngo Molnar u64 utimescaled; 9325eca1c10SIngo Molnar u64 stimescaled; 93340565b5aSStanislaw Gruszka #endif 93416a6d9beSFrederic Weisbecker u64 gtime; 9359d7fb042SPeter Zijlstra struct prev_cputime prev_cputime; 9366a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 937bac5b6b6SFrederic Weisbecker struct vtime vtime; 9386a61671bSFrederic Weisbecker #endif 939d027d45dSFrederic Weisbecker 940d027d45dSFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL 941f009a7a7SFrederic Weisbecker atomic_t tick_dep_mask; 942d027d45dSFrederic Weisbecker #endif 9435eca1c10SIngo Molnar /* Context switch counts: */ 9445eca1c10SIngo Molnar unsigned long nvcsw; 9455eca1c10SIngo Molnar unsigned long nivcsw; 9465eca1c10SIngo Molnar 9475eca1c10SIngo Molnar /* Monotonic time in nsecs: */ 9485eca1c10SIngo Molnar u64 start_time; 9495eca1c10SIngo Molnar 9505eca1c10SIngo Molnar /* Boot based time in nsecs: */ 951cf25e24dSPeter Zijlstra u64 start_boottime; 9525eca1c10SIngo Molnar 9535eca1c10SIngo Molnar /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ 9545eca1c10SIngo Molnar unsigned long min_flt; 9555eca1c10SIngo Molnar unsigned long maj_flt; 9561da177e4SLinus Torvalds 9572b69942fSThomas Gleixner /* Empty if CONFIG_POSIX_CPUTIMERS=n */ 9582b69942fSThomas Gleixner struct posix_cputimers posix_cputimers; 9591da177e4SLinus Torvalds 9601fb497ddSThomas Gleixner #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK 9611fb497ddSThomas Gleixner struct posix_cputimers_work posix_cputimers_work; 9621fb497ddSThomas Gleixner #endif 9631fb497ddSThomas Gleixner 9645eca1c10SIngo Molnar /* Process credentials: */ 9655eca1c10SIngo Molnar 9665eca1c10SIngo Molnar /* Tracer's credentials at attach: */ 9675eca1c10SIngo Molnar const struct cred __rcu *ptracer_cred; 9685eca1c10SIngo Molnar 9695eca1c10SIngo Molnar /* Objective and real subjective task credentials (COW): */ 9705eca1c10SIngo Molnar const struct cred __rcu *real_cred; 9715eca1c10SIngo Molnar 9725eca1c10SIngo Molnar /* Effective (overridable) subjective task credentials (COW): */ 9735eca1c10SIngo Molnar const struct cred __rcu *cred; 9745eca1c10SIngo Molnar 9757743c48eSDavid Howells #ifdef CONFIG_KEYS 9767743c48eSDavid Howells /* Cached requested key. */ 9777743c48eSDavid Howells struct key *cached_requested_key; 9787743c48eSDavid Howells #endif 9797743c48eSDavid Howells 9805eca1c10SIngo Molnar /* 9815eca1c10SIngo Molnar * executable name, excluding path. 9825eca1c10SIngo Molnar * 9835eca1c10SIngo Molnar * - normally initialized setup_new_exec() 9845eca1c10SIngo Molnar * - access it with [gs]et_task_comm() 9855eca1c10SIngo Molnar * - lock it with task_lock() 9865eca1c10SIngo Molnar */ 9875eca1c10SIngo Molnar char comm[TASK_COMM_LEN]; 9885eca1c10SIngo Molnar 989756daf26SNeilBrown struct nameidata *nameidata; 9905eca1c10SIngo Molnar 9913d5b6fccSAlexey Dobriyan #ifdef CONFIG_SYSVIPC 9921da177e4SLinus Torvalds struct sysv_sem sysvsem; 993ab602f79SJack Miller struct sysv_shm sysvshm; 9943d5b6fccSAlexey Dobriyan #endif 995e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK 99682a1fcb9SIngo Molnar unsigned long last_switch_count; 997a2e51445SDmitry Vyukov unsigned long last_switch_time; 99882a1fcb9SIngo Molnar #endif 9995eca1c10SIngo Molnar /* Filesystem information: */ 10001da177e4SLinus Torvalds struct fs_struct *fs; 10015eca1c10SIngo Molnar 10025eca1c10SIngo Molnar /* Open file information: */ 10031da177e4SLinus Torvalds struct files_struct *files; 10045eca1c10SIngo Molnar 10050f212204SJens Axboe #ifdef CONFIG_IO_URING 10060f212204SJens Axboe struct io_uring_task *io_uring; 10070f212204SJens Axboe #endif 10080f212204SJens Axboe 10095eca1c10SIngo Molnar /* Namespaces: */ 1010ab516013SSerge E. Hallyn struct nsproxy *nsproxy; 10115eca1c10SIngo Molnar 10125eca1c10SIngo Molnar /* Signal handlers: */ 10131da177e4SLinus Torvalds struct signal_struct *signal; 1014913292c9SMadhuparna Bhowmik struct sighand_struct __rcu *sighand; 10154bad58ebSThomas Gleixner struct sigqueue *sigqueue_cache; 10165eca1c10SIngo Molnar sigset_t blocked; 10175eca1c10SIngo Molnar sigset_t real_blocked; 10185eca1c10SIngo Molnar /* Restored if set_restore_sigmask() was used: */ 10195eca1c10SIngo Molnar sigset_t saved_sigmask; 10201da177e4SLinus Torvalds struct sigpending pending; 10211da177e4SLinus Torvalds unsigned long sas_ss_sp; 10221da177e4SLinus Torvalds size_t sas_ss_size; 10235eca1c10SIngo Molnar unsigned int sas_ss_flags; 10242e01fabeSOleg Nesterov 102567d12145SAl Viro struct callback_head *task_works; 1026e73f8959SOleg Nesterov 10274b7d248bSRichard Guy Briggs #ifdef CONFIG_AUDIT 1028bfef93a5SAl Viro #ifdef CONFIG_AUDITSYSCALL 10295f3d544fSRichard Guy Briggs struct audit_context *audit_context; 10305f3d544fSRichard Guy Briggs #endif 1031e1760bd5SEric W. Biederman kuid_t loginuid; 10324746ec5bSEric Paris unsigned int sessionid; 1033bfef93a5SAl Viro #endif 1034932ecebbSWill Drewry struct seccomp seccomp; 10351446e1dfSGabriel Krisman Bertazi struct syscall_user_dispatch syscall_dispatch; 10361da177e4SLinus Torvalds 10375eca1c10SIngo Molnar /* Thread group tracking: */ 1038d1e7fd64SEric W. Biederman u64 parent_exec_id; 1039d1e7fd64SEric W. Biederman u64 self_exec_id; 10405eca1c10SIngo Molnar 10415eca1c10SIngo Molnar /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ 10421da177e4SLinus Torvalds spinlock_t alloc_lock; 10431da177e4SLinus Torvalds 1044b29739f9SIngo Molnar /* Protection of the PI data structures: */ 10451d615482SThomas Gleixner raw_spinlock_t pi_lock; 1046b29739f9SIngo Molnar 104776751049SPeter Zijlstra struct wake_q_node wake_q; 104876751049SPeter Zijlstra 104923f78d4aSIngo Molnar #ifdef CONFIG_RT_MUTEXES 10505eca1c10SIngo Molnar /* PI waiters blocked on a rt_mutex held by this task: */ 1051a23ba907SDavidlohr Bueso struct rb_root_cached pi_waiters; 1052e96a7705SXunlei Pang /* Updated under owner's pi_lock and rq lock */ 1053e96a7705SXunlei Pang struct task_struct *pi_top_task; 10545eca1c10SIngo Molnar /* Deadlock detection and priority inheritance handling: */ 105523f78d4aSIngo Molnar struct rt_mutex_waiter *pi_blocked_on; 105623f78d4aSIngo Molnar #endif 105723f78d4aSIngo Molnar 1058408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES 10595eca1c10SIngo Molnar /* Mutex deadlock detection: */ 1060408894eeSIngo Molnar struct mutex_waiter *blocked_on; 1061408894eeSIngo Molnar #endif 10625eca1c10SIngo Molnar 1063312364f3SDaniel Vetter #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 1064312364f3SDaniel Vetter int non_block_count; 1065312364f3SDaniel Vetter #endif 1066312364f3SDaniel Vetter 1067de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS 10680584df9cSMarco Elver struct irqtrace_events irqtrace; 1069de8f5e4fSPeter Zijlstra unsigned int hardirq_threaded; 1070c86e9b98SPeter Zijlstra u64 hardirq_chain_key; 1071fa1452e8SHiroshi Shimamoto int softirqs_enabled; 1072de30a2b3SIngo Molnar int softirq_context; 107340db1739SSebastian Andrzej Siewior int irq_config; 1074de30a2b3SIngo Molnar #endif 1075728b478dSThomas Gleixner #ifdef CONFIG_PREEMPT_RT 1076728b478dSThomas Gleixner int softirq_disable_cnt; 1077728b478dSThomas Gleixner #endif 10785eca1c10SIngo Molnar 1079fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP 1080bdb9441eSPeter Zijlstra # define MAX_LOCK_DEPTH 48UL 1081fbb9ce95SIngo Molnar u64 curr_chain_key; 1082fbb9ce95SIngo Molnar int lockdep_depth; 1083fbb9ce95SIngo Molnar unsigned int lockdep_recursion; 1084c7aceabaSRichard Kennedy struct held_lock held_locks[MAX_LOCK_DEPTH]; 1085fbb9ce95SIngo Molnar #endif 10865eca1c10SIngo Molnar 10875cf53f3cSElena Petrova #if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP) 1088c6d30853SAndrey Ryabinin unsigned int in_ubsan; 1089c6d30853SAndrey Ryabinin #endif 1090408894eeSIngo Molnar 10915eca1c10SIngo Molnar /* Journalling filesystem info: */ 10921da177e4SLinus Torvalds void *journal_info; 10931da177e4SLinus Torvalds 10945eca1c10SIngo Molnar /* Stacked block device info: */ 1095bddd87c7SAkinobu Mita struct bio_list *bio_list; 1096d89d8796SNeil Brown 109773c10101SJens Axboe #ifdef CONFIG_BLOCK 10985eca1c10SIngo Molnar /* Stack plugging: */ 109973c10101SJens Axboe struct blk_plug *plug; 110073c10101SJens Axboe #endif 110173c10101SJens Axboe 11025eca1c10SIngo Molnar /* VM state: */ 11031da177e4SLinus Torvalds struct reclaim_state *reclaim_state; 11041da177e4SLinus Torvalds 11051da177e4SLinus Torvalds struct backing_dev_info *backing_dev_info; 11061da177e4SLinus Torvalds 11071da177e4SLinus Torvalds struct io_context *io_context; 11081da177e4SLinus Torvalds 11095e1f0f09SMel Gorman #ifdef CONFIG_COMPACTION 11105e1f0f09SMel Gorman struct capture_control *capture_control; 11115e1f0f09SMel Gorman #endif 11125eca1c10SIngo Molnar /* Ptrace state: */ 11131da177e4SLinus Torvalds unsigned long ptrace_message; 1114ae7795bcSEric W. Biederman kernel_siginfo_t *last_siginfo; 11155eca1c10SIngo Molnar 11167c3ab738SAndrew Morton struct task_io_accounting ioac; 1117eb414681SJohannes Weiner #ifdef CONFIG_PSI 1118eb414681SJohannes Weiner /* Pressure stall state */ 1119eb414681SJohannes Weiner unsigned int psi_flags; 1120eb414681SJohannes Weiner #endif 11215eca1c10SIngo Molnar #ifdef CONFIG_TASK_XACCT 11225eca1c10SIngo Molnar /* Accumulated RSS usage: */ 11235eca1c10SIngo Molnar u64 acct_rss_mem1; 11245eca1c10SIngo Molnar /* Accumulated virtual memory usage: */ 11255eca1c10SIngo Molnar u64 acct_vm_mem1; 11265eca1c10SIngo Molnar /* stime + utime since last update: */ 11275eca1c10SIngo Molnar u64 acct_timexpd; 11281da177e4SLinus Torvalds #endif 11291da177e4SLinus Torvalds #ifdef CONFIG_CPUSETS 11305eca1c10SIngo Molnar /* Protected by ->alloc_lock: */ 11315eca1c10SIngo Molnar nodemask_t mems_allowed; 11323b03706fSIngo Molnar /* Sequence number to catch updates: */ 1133b7505861SAhmed S. Darwish seqcount_spinlock_t mems_allowed_seq; 1134825a46afSPaul Jackson int cpuset_mem_spread_rotor; 11356adef3ebSJack Steiner int cpuset_slab_spread_rotor; 11361da177e4SLinus Torvalds #endif 1137ddbcc7e8SPaul Menage #ifdef CONFIG_CGROUPS 11385eca1c10SIngo Molnar /* Control Group info protected by css_set_lock: */ 11392c392b8cSArnd Bergmann struct css_set __rcu *cgroups; 11405eca1c10SIngo Molnar /* cg_list protected by css_set_lock and tsk->alloc_lock: */ 1141817929ecSPaul Menage struct list_head cg_list; 1142ddbcc7e8SPaul Menage #endif 1143e6d42931SJohannes Weiner #ifdef CONFIG_X86_CPU_RESCTRL 11440734ded1SVikas Shivappa u32 closid; 1145d6aaba61SVikas Shivappa u32 rmid; 1146e02737d5SFenghua Yu #endif 114742b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX 11480771dfefSIngo Molnar struct robust_list_head __user *robust_list; 114934f192c6SIngo Molnar #ifdef CONFIG_COMPAT 115034f192c6SIngo Molnar struct compat_robust_list_head __user *compat_robust_list; 115134f192c6SIngo Molnar #endif 1152c87e2837SIngo Molnar struct list_head pi_state_list; 1153c87e2837SIngo Molnar struct futex_pi_state *pi_state_cache; 11543f186d97SThomas Gleixner struct mutex futex_exit_mutex; 11553d4775dfSThomas Gleixner unsigned int futex_state; 115642b2dd0aSAlexey Dobriyan #endif 1157cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS 11588dc85d54SPeter Zijlstra struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; 1159cdd6c482SIngo Molnar struct mutex perf_event_mutex; 1160cdd6c482SIngo Molnar struct list_head perf_event_list; 1161a63eaf34SPaul Mackerras #endif 11628f47b187SThomas Gleixner #ifdef CONFIG_DEBUG_PREEMPT 11638f47b187SThomas Gleixner unsigned long preempt_disable_ip; 11648f47b187SThomas Gleixner #endif 1165c7aceabaSRichard Kennedy #ifdef CONFIG_NUMA 11665eca1c10SIngo Molnar /* Protected by alloc_lock: */ 11675eca1c10SIngo Molnar struct mempolicy *mempolicy; 116845816682SVlastimil Babka short il_prev; 1169207205a2SEric Dumazet short pref_node_fork; 1170c7aceabaSRichard Kennedy #endif 1171cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING 1172cbee9f88SPeter Zijlstra int numa_scan_seq; 1173cbee9f88SPeter Zijlstra unsigned int numa_scan_period; 1174598f0ec0SMel Gorman unsigned int numa_scan_period_max; 1175de1c9ce6SRik van Riel int numa_preferred_nid; 11766b9a7460SMel Gorman unsigned long numa_migrate_retry; 11775eca1c10SIngo Molnar /* Migration stamp: */ 11785eca1c10SIngo Molnar u64 node_stamp; 11797e2703e6SRik van Riel u64 last_task_numa_placement; 11807e2703e6SRik van Riel u64 last_sum_exec_runtime; 1181cbee9f88SPeter Zijlstra struct callback_head numa_work; 1182f809ca9aSMel Gorman 1183cb361d8cSJann Horn /* 1184cb361d8cSJann Horn * This pointer is only modified for current in syscall and 1185cb361d8cSJann Horn * pagefault context (and for tasks being destroyed), so it can be read 1186cb361d8cSJann Horn * from any of the following contexts: 1187cb361d8cSJann Horn * - RCU read-side critical section 1188cb361d8cSJann Horn * - current->numa_group from everywhere 1189cb361d8cSJann Horn * - task's runqueue locked, task not running 1190cb361d8cSJann Horn */ 1191cb361d8cSJann Horn struct numa_group __rcu *numa_group; 11928c8a743cSPeter Zijlstra 1193745d6147SMel Gorman /* 119444dba3d5SIulia Manda * numa_faults is an array split into four regions: 119544dba3d5SIulia Manda * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer 119644dba3d5SIulia Manda * in this precise order. 119744dba3d5SIulia Manda * 119844dba3d5SIulia Manda * faults_memory: Exponential decaying average of faults on a per-node 119944dba3d5SIulia Manda * basis. Scheduling placement decisions are made based on these 120044dba3d5SIulia Manda * counts. The values remain static for the duration of a PTE scan. 120144dba3d5SIulia Manda * faults_cpu: Track the nodes the process was running on when a NUMA 120244dba3d5SIulia Manda * hinting fault was incurred. 120344dba3d5SIulia Manda * faults_memory_buffer and faults_cpu_buffer: Record faults per node 120444dba3d5SIulia Manda * during the current scan window. When the scan completes, the counts 120544dba3d5SIulia Manda * in faults_memory and faults_cpu decay and these values are copied. 1206745d6147SMel Gorman */ 120744dba3d5SIulia Manda unsigned long *numa_faults; 120883e1d2cdSMel Gorman unsigned long total_numa_faults; 1209745d6147SMel Gorman 1210745d6147SMel Gorman /* 121104bb2f94SRik van Riel * numa_faults_locality tracks if faults recorded during the last 1212074c2381SMel Gorman * scan window were remote/local or failed to migrate. The task scan 1213074c2381SMel Gorman * period is adapted based on the locality of the faults with different 1214074c2381SMel Gorman * weights depending on whether they were shared or private faults 121504bb2f94SRik van Riel */ 1216074c2381SMel Gorman unsigned long numa_faults_locality[3]; 121704bb2f94SRik van Riel 1218b32e86b4SIngo Molnar unsigned long numa_pages_migrated; 1219cbee9f88SPeter Zijlstra #endif /* CONFIG_NUMA_BALANCING */ 1220cbee9f88SPeter Zijlstra 1221d7822b1eSMathieu Desnoyers #ifdef CONFIG_RSEQ 1222d7822b1eSMathieu Desnoyers struct rseq __user *rseq; 1223d7822b1eSMathieu Desnoyers u32 rseq_sig; 1224d7822b1eSMathieu Desnoyers /* 1225d7822b1eSMathieu Desnoyers * RmW on rseq_event_mask must be performed atomically 1226d7822b1eSMathieu Desnoyers * with respect to preemption. 1227d7822b1eSMathieu Desnoyers */ 1228d7822b1eSMathieu Desnoyers unsigned long rseq_event_mask; 1229d7822b1eSMathieu Desnoyers #endif 1230d7822b1eSMathieu Desnoyers 123172b252aeSMel Gorman struct tlbflush_unmap_batch tlb_ubc; 123272b252aeSMel Gorman 12333fbd7ee2SEric W. Biederman union { 12343fbd7ee2SEric W. Biederman refcount_t rcu_users; 1235e56d0903SIngo Molnar struct rcu_head rcu; 12363fbd7ee2SEric W. Biederman }; 1237b92ce558SJens Axboe 12385eca1c10SIngo Molnar /* Cache last used pipe for splice(): */ 1239b92ce558SJens Axboe struct pipe_inode_info *splice_pipe; 12405640f768SEric Dumazet 12415640f768SEric Dumazet struct page_frag task_frag; 12425640f768SEric Dumazet 1243ca74e92bSShailabh Nagar #ifdef CONFIG_TASK_DELAY_ACCT 1244ca74e92bSShailabh Nagar struct task_delay_info *delays; 1245ca74e92bSShailabh Nagar #endif 124647913d4eSIngo Molnar 1247f4f154fdSAkinobu Mita #ifdef CONFIG_FAULT_INJECTION 1248f4f154fdSAkinobu Mita int make_it_fail; 12499049f2f6SAkinobu Mita unsigned int fail_nth; 1250f4f154fdSAkinobu Mita #endif 12519d823e8fSWu Fengguang /* 12525eca1c10SIngo Molnar * When (nr_dirtied >= nr_dirtied_pause), it's time to call 12535eca1c10SIngo Molnar * balance_dirty_pages() for a dirty throttling pause: 12549d823e8fSWu Fengguang */ 12559d823e8fSWu Fengguang int nr_dirtied; 12569d823e8fSWu Fengguang int nr_dirtied_pause; 12575eca1c10SIngo Molnar /* Start of a write-and-pause period: */ 12585eca1c10SIngo Molnar unsigned long dirty_paused_when; 12599d823e8fSWu Fengguang 12609745512cSArjan van de Ven #ifdef CONFIG_LATENCYTOP 12619745512cSArjan van de Ven int latency_record_count; 12629745512cSArjan van de Ven struct latency_record latency_record[LT_SAVECOUNT]; 12639745512cSArjan van de Ven #endif 12646976675dSArjan van de Ven /* 12655eca1c10SIngo Molnar * Time slack values; these are used to round up poll() and 12666976675dSArjan van de Ven * select() etc timeout values. These are in nanoseconds. 12676976675dSArjan van de Ven */ 1268da8b44d5SJohn Stultz u64 timer_slack_ns; 1269da8b44d5SJohn Stultz u64 default_timer_slack_ns; 1270f8d570a4SDavid Miller 1271d73b4936SAndrey Konovalov #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 12720b24beccSAndrey Ryabinin unsigned int kasan_depth; 12730b24beccSAndrey Ryabinin #endif 127492c209acSMarco Elver 1275dfd402a4SMarco Elver #ifdef CONFIG_KCSAN 1276dfd402a4SMarco Elver struct kcsan_ctx kcsan_ctx; 127792c209acSMarco Elver #ifdef CONFIG_TRACE_IRQFLAGS 127892c209acSMarco Elver struct irqtrace_events kcsan_save_irqtrace; 127992c209acSMarco Elver #endif 1280dfd402a4SMarco Elver #endif 12815eca1c10SIngo Molnar 1282393824f6SPatricia Alfonso #if IS_ENABLED(CONFIG_KUNIT) 1283393824f6SPatricia Alfonso struct kunit *kunit_test; 1284393824f6SPatricia Alfonso #endif 1285393824f6SPatricia Alfonso 1286fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER 12875eca1c10SIngo Molnar /* Index of current stored address in ret_stack: */ 1288f201ae23SFrederic Weisbecker int curr_ret_stack; 128939eb456dSSteven Rostedt (VMware) int curr_ret_depth; 12905eca1c10SIngo Molnar 12915eca1c10SIngo Molnar /* Stack of return addresses for return function tracing: */ 1292f201ae23SFrederic Weisbecker struct ftrace_ret_stack *ret_stack; 12935eca1c10SIngo Molnar 12945eca1c10SIngo Molnar /* Timestamp for last schedule: */ 12958aef2d28SSteven Rostedt unsigned long long ftrace_timestamp; 12965eca1c10SIngo Molnar 1297f201ae23SFrederic Weisbecker /* 1298f201ae23SFrederic Weisbecker * Number of functions that haven't been traced 12995eca1c10SIngo Molnar * because of depth overrun: 1300f201ae23SFrederic Weisbecker */ 1301f201ae23SFrederic Weisbecker atomic_t trace_overrun; 13025eca1c10SIngo Molnar 13035eca1c10SIngo Molnar /* Pause tracing: */ 1304380c4b14SFrederic Weisbecker atomic_t tracing_graph_pause; 1305f201ae23SFrederic Weisbecker #endif 13065eca1c10SIngo Molnar 1307ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING 13085eca1c10SIngo Molnar /* State flags for use by tracers: */ 1309ea4e2bc4SSteven Rostedt unsigned long trace; 13105eca1c10SIngo Molnar 13115eca1c10SIngo Molnar /* Bitmask and counter of trace recursion: */ 1312261842b7SSteven Rostedt unsigned long trace_recursion; 1313261842b7SSteven Rostedt #endif /* CONFIG_TRACING */ 13145eca1c10SIngo Molnar 13155c9a8750SDmitry Vyukov #ifdef CONFIG_KCOV 1316eec028c9SAndrey Konovalov /* See kernel/kcov.c for more details. */ 1317eec028c9SAndrey Konovalov 13185eca1c10SIngo Molnar /* Coverage collection mode enabled for this task (0 if disabled): */ 13190ed557aaSMark Rutland unsigned int kcov_mode; 13205eca1c10SIngo Molnar 13215eca1c10SIngo Molnar /* Size of the kcov_area: */ 13225eca1c10SIngo Molnar unsigned int kcov_size; 13235eca1c10SIngo Molnar 13245eca1c10SIngo Molnar /* Buffer for coverage collection: */ 13255c9a8750SDmitry Vyukov void *kcov_area; 13265eca1c10SIngo Molnar 13275eca1c10SIngo Molnar /* KCOV descriptor wired with this task or NULL: */ 13285c9a8750SDmitry Vyukov struct kcov *kcov; 1329eec028c9SAndrey Konovalov 1330eec028c9SAndrey Konovalov /* KCOV common handle for remote coverage collection: */ 1331eec028c9SAndrey Konovalov u64 kcov_handle; 1332eec028c9SAndrey Konovalov 1333eec028c9SAndrey Konovalov /* KCOV sequence number: */ 1334eec028c9SAndrey Konovalov int kcov_sequence; 13355ff3b30aSAndrey Konovalov 13365ff3b30aSAndrey Konovalov /* Collect coverage from softirq context: */ 13375ff3b30aSAndrey Konovalov unsigned int kcov_softirq; 13385c9a8750SDmitry Vyukov #endif 13395eca1c10SIngo Molnar 13406f185c29SVladimir Davydov #ifdef CONFIG_MEMCG 1341626ebc41STejun Heo struct mem_cgroup *memcg_in_oom; 1342626ebc41STejun Heo gfp_t memcg_oom_gfp_mask; 1343626ebc41STejun Heo int memcg_oom_order; 1344b23afb93STejun Heo 13455eca1c10SIngo Molnar /* Number of pages to reclaim on returning to userland: */ 1346b23afb93STejun Heo unsigned int memcg_nr_pages_over_high; 1347d46eb14bSShakeel Butt 1348d46eb14bSShakeel Butt /* Used by memcontrol for targeted memcg charge: */ 1349d46eb14bSShakeel Butt struct mem_cgroup *active_memcg; 1350569b846dSKAMEZAWA Hiroyuki #endif 13515eca1c10SIngo Molnar 1352d09d8df3SJosef Bacik #ifdef CONFIG_BLK_CGROUP 1353d09d8df3SJosef Bacik struct request_queue *throttle_queue; 1354d09d8df3SJosef Bacik #endif 1355d09d8df3SJosef Bacik 13560326f5a9SSrikar Dronamraju #ifdef CONFIG_UPROBES 13570326f5a9SSrikar Dronamraju struct uprobe_task *utask; 13580326f5a9SSrikar Dronamraju #endif 1359cafe5635SKent Overstreet #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) 1360cafe5635SKent Overstreet unsigned int sequential_io; 1361cafe5635SKent Overstreet unsigned int sequential_io_avg; 1362cafe5635SKent Overstreet #endif 13635fbda3ecSThomas Gleixner struct kmap_ctrl kmap_ctrl; 13648eb23b9fSPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP 13658eb23b9fSPeter Zijlstra unsigned long task_state_change; 13668eb23b9fSPeter Zijlstra #endif 13678bcbde54SDavid Hildenbrand int pagefault_disabled; 136803049269SMichal Hocko #ifdef CONFIG_MMU 136929c696e1SVladimir Davydov struct task_struct *oom_reaper_list; 137003049269SMichal Hocko #endif 1371ba14a194SAndy Lutomirski #ifdef CONFIG_VMAP_STACK 1372ba14a194SAndy Lutomirski struct vm_struct *stack_vm_area; 1373ba14a194SAndy Lutomirski #endif 137468f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 13755eca1c10SIngo Molnar /* A live task holds one reference: */ 1376f0b89d39SElena Reshetova refcount_t stack_refcount; 137768f24b08SAndy Lutomirski #endif 1378d83a7cb3SJosh Poimboeuf #ifdef CONFIG_LIVEPATCH 1379d83a7cb3SJosh Poimboeuf int patch_state; 1380d83a7cb3SJosh Poimboeuf #endif 1381e4e55b47STetsuo Handa #ifdef CONFIG_SECURITY 1382e4e55b47STetsuo Handa /* Used by LSM modules for access restriction: */ 1383e4e55b47STetsuo Handa void *security; 1384e4e55b47STetsuo Handa #endif 1385a10787e6SSong Liu #ifdef CONFIG_BPF_SYSCALL 1386a10787e6SSong Liu /* Used by BPF task local storage */ 1387a10787e6SSong Liu struct bpf_local_storage __rcu *bpf_storage; 1388a10787e6SSong Liu #endif 138929e48ce8SKees Cook 1390afaef01cSAlexander Popov #ifdef CONFIG_GCC_PLUGIN_STACKLEAK 1391afaef01cSAlexander Popov unsigned long lowest_stack; 1392c8d12627SAlexander Popov unsigned long prev_lowest_stack; 1393afaef01cSAlexander Popov #endif 1394afaef01cSAlexander Popov 13955567d11cSPeter Zijlstra #ifdef CONFIG_X86_MCE 1396c0ab7ffcSTony Luck void __user *mce_vaddr; 1397c0ab7ffcSTony Luck __u64 mce_kflags; 13985567d11cSPeter Zijlstra u64 mce_addr; 139917fae129STony Luck __u64 mce_ripv : 1, 140017fae129STony Luck mce_whole_page : 1, 140117fae129STony Luck __mce_reserved : 62; 14025567d11cSPeter Zijlstra struct callback_head mce_kill_me; 14035567d11cSPeter Zijlstra #endif 14045567d11cSPeter Zijlstra 1405d741bf41SPeter Zijlstra #ifdef CONFIG_KRETPROBES 1406d741bf41SPeter Zijlstra struct llist_head kretprobe_instances; 1407d741bf41SPeter Zijlstra #endif 1408d741bf41SPeter Zijlstra 140929e48ce8SKees Cook /* 141029e48ce8SKees Cook * New fields for task_struct should be added above here, so that 141129e48ce8SKees Cook * they are included in the randomized portion of task_struct. 141229e48ce8SKees Cook */ 141329e48ce8SKees Cook randomized_struct_fields_end 141429e48ce8SKees Cook 14155eca1c10SIngo Molnar /* CPU-specific state of this task: */ 14160c8c0f03SDave Hansen struct thread_struct thread; 14175eca1c10SIngo Molnar 14180c8c0f03SDave Hansen /* 14190c8c0f03SDave Hansen * WARNING: on x86, 'thread_struct' contains a variable-sized 14200c8c0f03SDave Hansen * structure. It *MUST* be at the end of 'task_struct'. 14210c8c0f03SDave Hansen * 14220c8c0f03SDave Hansen * Do not put anything below here! 14230c8c0f03SDave Hansen */ 14241da177e4SLinus Torvalds }; 14251da177e4SLinus Torvalds 1426e868171aSAlexey Dobriyan static inline struct pid *task_pid(struct task_struct *task) 142722c935f4SEric W. Biederman { 14282c470475SEric W. Biederman return task->thread_pid; 142922c935f4SEric W. Biederman } 143022c935f4SEric W. Biederman 14317af57294SPavel Emelyanov /* 14327af57294SPavel Emelyanov * the helpers to get the task's different pids as they are seen 14337af57294SPavel Emelyanov * from various namespaces 14347af57294SPavel Emelyanov * 14357af57294SPavel Emelyanov * task_xid_nr() : global id, i.e. the id seen from the init namespace; 143644c4e1b2SEric W. Biederman * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of 143744c4e1b2SEric W. Biederman * current. 14387af57294SPavel Emelyanov * task_xid_nr_ns() : id seen from the ns specified; 14397af57294SPavel Emelyanov * 14407af57294SPavel Emelyanov * see also pid_nr() etc in include/linux/pid.h 14417af57294SPavel Emelyanov */ 14425eca1c10SIngo Molnar pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); 14437af57294SPavel Emelyanov 1444e868171aSAlexey Dobriyan static inline pid_t task_pid_nr(struct task_struct *tsk) 14457af57294SPavel Emelyanov { 14467af57294SPavel Emelyanov return tsk->pid; 14477af57294SPavel Emelyanov } 14487af57294SPavel Emelyanov 14495eca1c10SIngo Molnar static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 145052ee2dfdSOleg Nesterov { 145152ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); 145252ee2dfdSOleg Nesterov } 14537af57294SPavel Emelyanov 14547af57294SPavel Emelyanov static inline pid_t task_pid_vnr(struct task_struct *tsk) 14557af57294SPavel Emelyanov { 145652ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); 14577af57294SPavel Emelyanov } 14587af57294SPavel Emelyanov 14597af57294SPavel Emelyanov 1460e868171aSAlexey Dobriyan static inline pid_t task_tgid_nr(struct task_struct *tsk) 14617af57294SPavel Emelyanov { 14627af57294SPavel Emelyanov return tsk->tgid; 14637af57294SPavel Emelyanov } 14647af57294SPavel Emelyanov 14655eca1c10SIngo Molnar /** 14665eca1c10SIngo Molnar * pid_alive - check that a task structure is not stale 14675eca1c10SIngo Molnar * @p: Task structure to be checked. 14685eca1c10SIngo Molnar * 14695eca1c10SIngo Molnar * Test if a process is not yet dead (at most zombie state) 14705eca1c10SIngo Molnar * If pid_alive fails, then pointers within the task structure 14715eca1c10SIngo Molnar * can be stale and must not be dereferenced. 14725eca1c10SIngo Molnar * 14735eca1c10SIngo Molnar * Return: 1 if the process is alive. 0 otherwise. 14745eca1c10SIngo Molnar */ 14755eca1c10SIngo Molnar static inline int pid_alive(const struct task_struct *p) 14765eca1c10SIngo Molnar { 14772c470475SEric W. Biederman return p->thread_pid != NULL; 14785eca1c10SIngo Molnar } 14797af57294SPavel Emelyanov 14805eca1c10SIngo Molnar static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 14817af57294SPavel Emelyanov { 148252ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); 14837af57294SPavel Emelyanov } 14847af57294SPavel Emelyanov 14857af57294SPavel Emelyanov static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 14867af57294SPavel Emelyanov { 148752ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); 14887af57294SPavel Emelyanov } 14897af57294SPavel Emelyanov 14907af57294SPavel Emelyanov 14915eca1c10SIngo Molnar static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 14927af57294SPavel Emelyanov { 149352ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); 14947af57294SPavel Emelyanov } 14957af57294SPavel Emelyanov 14967af57294SPavel Emelyanov static inline pid_t task_session_vnr(struct task_struct *tsk) 14977af57294SPavel Emelyanov { 149852ee2dfdSOleg Nesterov return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); 14997af57294SPavel Emelyanov } 15007af57294SPavel Emelyanov 1501dd1c1f2fSOleg Nesterov static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 1502dd1c1f2fSOleg Nesterov { 15036883f81aSEric W. Biederman return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns); 1504dd1c1f2fSOleg Nesterov } 1505dd1c1f2fSOleg Nesterov 1506dd1c1f2fSOleg Nesterov static inline pid_t task_tgid_vnr(struct task_struct *tsk) 1507dd1c1f2fSOleg Nesterov { 15086883f81aSEric W. Biederman return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL); 1509dd1c1f2fSOleg Nesterov } 1510dd1c1f2fSOleg Nesterov 1511dd1c1f2fSOleg Nesterov static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) 1512dd1c1f2fSOleg Nesterov { 1513dd1c1f2fSOleg Nesterov pid_t pid = 0; 1514dd1c1f2fSOleg Nesterov 1515dd1c1f2fSOleg Nesterov rcu_read_lock(); 1516dd1c1f2fSOleg Nesterov if (pid_alive(tsk)) 1517dd1c1f2fSOleg Nesterov pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); 1518dd1c1f2fSOleg Nesterov rcu_read_unlock(); 1519dd1c1f2fSOleg Nesterov 1520dd1c1f2fSOleg Nesterov return pid; 1521dd1c1f2fSOleg Nesterov } 1522dd1c1f2fSOleg Nesterov 1523dd1c1f2fSOleg Nesterov static inline pid_t task_ppid_nr(const struct task_struct *tsk) 1524dd1c1f2fSOleg Nesterov { 1525dd1c1f2fSOleg Nesterov return task_ppid_nr_ns(tsk, &init_pid_ns); 1526dd1c1f2fSOleg Nesterov } 1527dd1c1f2fSOleg Nesterov 15285eca1c10SIngo Molnar /* Obsolete, do not use: */ 15291b0f7ffdSOleg Nesterov static inline pid_t task_pgrp_nr(struct task_struct *tsk) 15301b0f7ffdSOleg Nesterov { 15311b0f7ffdSOleg Nesterov return task_pgrp_nr_ns(tsk, &init_pid_ns); 15321b0f7ffdSOleg Nesterov } 15337af57294SPavel Emelyanov 153406eb6184SPeter Zijlstra #define TASK_REPORT_IDLE (TASK_REPORT + 1) 153506eb6184SPeter Zijlstra #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) 153606eb6184SPeter Zijlstra 15371d48b080SPeter Zijlstra static inline unsigned int task_state_index(struct task_struct *tsk) 153820435d84SXie XiuQi { 15392f064a59SPeter Zijlstra unsigned int tsk_state = READ_ONCE(tsk->__state); 15401593baabSPeter Zijlstra unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT; 154120435d84SXie XiuQi 154206eb6184SPeter Zijlstra BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); 154306eb6184SPeter Zijlstra 154406eb6184SPeter Zijlstra if (tsk_state == TASK_IDLE) 154506eb6184SPeter Zijlstra state = TASK_REPORT_IDLE; 154606eb6184SPeter Zijlstra 15471593baabSPeter Zijlstra return fls(state); 15481593baabSPeter Zijlstra } 154920435d84SXie XiuQi 15501d48b080SPeter Zijlstra static inline char task_index_to_char(unsigned int state) 15511593baabSPeter Zijlstra { 15528ef9925bSPeter Zijlstra static const char state_char[] = "RSDTtXZPI"; 15531593baabSPeter Zijlstra 155406eb6184SPeter Zijlstra BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); 15551593baabSPeter Zijlstra 15561593baabSPeter Zijlstra return state_char[state]; 15571593baabSPeter Zijlstra } 15581593baabSPeter Zijlstra 15591593baabSPeter Zijlstra static inline char task_state_to_char(struct task_struct *tsk) 15601593baabSPeter Zijlstra { 15611d48b080SPeter Zijlstra return task_index_to_char(task_state_index(tsk)); 156220435d84SXie XiuQi } 156320435d84SXie XiuQi 15641da177e4SLinus Torvalds /** 1565570f5241SSergey Senozhatsky * is_global_init - check if a task structure is init. Since init 1566570f5241SSergey Senozhatsky * is free to have sub-threads we need to check tgid. 15673260259fSHenne * @tsk: Task structure to be checked. 15683260259fSHenne * 15693260259fSHenne * Check if a task structure is the first user space task the kernel created. 1570e69f6186SYacine Belkadi * 1571e69f6186SYacine Belkadi * Return: 1 if the task structure is init. 0 otherwise. 1572f400e198SSukadev Bhattiprolu */ 1573e868171aSAlexey Dobriyan static inline int is_global_init(struct task_struct *tsk) 1574b461cc03SPavel Emelyanov { 1575570f5241SSergey Senozhatsky return task_tgid_nr(tsk) == 1; 1576b461cc03SPavel Emelyanov } 1577b460cbc5SSerge E. Hallyn 15789ec52099SCedric Le Goater extern struct pid *cad_pid; 15799ec52099SCedric Le Goater 15801da177e4SLinus Torvalds /* 15811da177e4SLinus Torvalds * Per process flags 15821da177e4SLinus Torvalds */ 158301ccf592SSebastian Andrzej Siewior #define PF_VCPU 0x00000001 /* I'm a virtual CPU */ 1584c1de45caSPeter Zijlstra #define PF_IDLE 0x00000002 /* I am an IDLE thread */ 15855eca1c10SIngo Molnar #define PF_EXITING 0x00000004 /* Getting shut down */ 158601ccf592SSebastian Andrzej Siewior #define PF_IO_WORKER 0x00000010 /* Task is an IO worker */ 158721aa9af0STejun Heo #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 15885eca1c10SIngo Molnar #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ 15895eca1c10SIngo Molnar #define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ 15905eca1c10SIngo Molnar #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ 15915eca1c10SIngo Molnar #define PF_DUMPCORE 0x00000200 /* Dumped core */ 15925eca1c10SIngo Molnar #define PF_SIGNALED 0x00000400 /* Killed by a signal */ 15931da177e4SLinus Torvalds #define PF_MEMALLOC 0x00000800 /* Allocating memory */ 15945eca1c10SIngo Molnar #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ 15955eca1c10SIngo Molnar #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ 15965eca1c10SIngo Molnar #define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */ 15975eca1c10SIngo Molnar #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ 15985eca1c10SIngo Molnar #define PF_FROZEN 0x00010000 /* Frozen for system suspend */ 15997dea19f9SMichal Hocko #define PF_KSWAPD 0x00020000 /* I am kswapd */ 16007dea19f9SMichal Hocko #define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */ 16017dea19f9SMichal Hocko #define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */ 1602a37b0715SNeilBrown #define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to, 1603a37b0715SNeilBrown * I am cleaning dirty pages from some other bdi. */ 1604246bb0b1SOleg Nesterov #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 16055eca1c10SIngo Molnar #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ 1606b31dc66aSJens Axboe #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 16073bd37062SSebastian Andrzej Siewior #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ 16084db96cf0SAndi Kleen #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 16091a08ae36SPavel Tatashin #define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */ 161058a69cb4STejun Heo #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ 16115eca1c10SIngo Molnar #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ 16121da177e4SLinus Torvalds 16131da177e4SLinus Torvalds /* 16141da177e4SLinus Torvalds * Only the _current_ task can read/write to tsk->flags, but other 16151da177e4SLinus Torvalds * tasks can access tsk->flags in readonly mode for example 16161da177e4SLinus Torvalds * with tsk_used_math (like during threaded core dumping). 16171da177e4SLinus Torvalds * There is however an exception to this rule during ptrace 16181da177e4SLinus Torvalds * or during fork: the ptracer task is allowed to write to the 16191da177e4SLinus Torvalds * child->flags of its traced child (same goes for fork, the parent 16201da177e4SLinus Torvalds * can write to the child->flags), because we're guaranteed the 16211da177e4SLinus Torvalds * child is not running and in turn not changing child->flags 16221da177e4SLinus Torvalds * at the same time the parent does it. 16231da177e4SLinus Torvalds */ 16241da177e4SLinus Torvalds #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) 16251da177e4SLinus Torvalds #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) 16261da177e4SLinus Torvalds #define clear_used_math() clear_stopped_child_used_math(current) 16271da177e4SLinus Torvalds #define set_used_math() set_stopped_child_used_math(current) 16285eca1c10SIngo Molnar 16291da177e4SLinus Torvalds #define conditional_stopped_child_used_math(condition, child) \ 16301da177e4SLinus Torvalds do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) 16315eca1c10SIngo Molnar 16325eca1c10SIngo Molnar #define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) 16335eca1c10SIngo Molnar 16341da177e4SLinus Torvalds #define copy_to_stopped_child_used_math(child) \ 16351da177e4SLinus Torvalds do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) 16365eca1c10SIngo Molnar 16371da177e4SLinus Torvalds /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ 16381da177e4SLinus Torvalds #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 16391da177e4SLinus Torvalds #define used_math() tsk_used_math(current) 16401da177e4SLinus Torvalds 164162ec05ddSThomas Gleixner static inline bool is_percpu_thread(void) 164262ec05ddSThomas Gleixner { 164362ec05ddSThomas Gleixner #ifdef CONFIG_SMP 164462ec05ddSThomas Gleixner return (current->flags & PF_NO_SETAFFINITY) && 164562ec05ddSThomas Gleixner (current->nr_cpus_allowed == 1); 164662ec05ddSThomas Gleixner #else 164762ec05ddSThomas Gleixner return true; 164862ec05ddSThomas Gleixner #endif 164962ec05ddSThomas Gleixner } 165062ec05ddSThomas Gleixner 16511d4457f9SKees Cook /* Per-process atomic flags. */ 1652a2b86f77SZefan Li #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ 16532ad654bcSZefan Li #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ 16542ad654bcSZefan Li #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 1655356e4bffSThomas Gleixner #define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ 1656356e4bffSThomas Gleixner #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ 16579137bb27SThomas Gleixner #define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ 16589137bb27SThomas Gleixner #define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ 165971368af9SWaiman Long #define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */ 16601d4457f9SKees Cook 1661e0e5070bSZefan Li #define TASK_PFA_TEST(name, func) \ 1662e0e5070bSZefan Li static inline bool task_##func(struct task_struct *p) \ 1663e0e5070bSZefan Li { return test_bit(PFA_##name, &p->atomic_flags); } 16645eca1c10SIngo Molnar 1665e0e5070bSZefan Li #define TASK_PFA_SET(name, func) \ 1666e0e5070bSZefan Li static inline void task_set_##func(struct task_struct *p) \ 1667e0e5070bSZefan Li { set_bit(PFA_##name, &p->atomic_flags); } 16685eca1c10SIngo Molnar 1669e0e5070bSZefan Li #define TASK_PFA_CLEAR(name, func) \ 1670e0e5070bSZefan Li static inline void task_clear_##func(struct task_struct *p) \ 1671e0e5070bSZefan Li { clear_bit(PFA_##name, &p->atomic_flags); } 16721d4457f9SKees Cook 1673e0e5070bSZefan Li TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) 1674e0e5070bSZefan Li TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) 16751d4457f9SKees Cook 16762ad654bcSZefan Li TASK_PFA_TEST(SPREAD_PAGE, spread_page) 16772ad654bcSZefan Li TASK_PFA_SET(SPREAD_PAGE, spread_page) 16782ad654bcSZefan Li TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) 16792ad654bcSZefan Li 16802ad654bcSZefan Li TASK_PFA_TEST(SPREAD_SLAB, spread_slab) 16812ad654bcSZefan Li TASK_PFA_SET(SPREAD_SLAB, spread_slab) 16822ad654bcSZefan Li TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) 1683544b2c91STejun Heo 1684356e4bffSThomas Gleixner TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) 1685356e4bffSThomas Gleixner TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) 1686356e4bffSThomas Gleixner TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) 1687356e4bffSThomas Gleixner 168871368af9SWaiman Long TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec) 168971368af9SWaiman Long TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec) 169071368af9SWaiman Long TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec) 169171368af9SWaiman Long 1692356e4bffSThomas Gleixner TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 1693356e4bffSThomas Gleixner TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) 1694356e4bffSThomas Gleixner 16959137bb27SThomas Gleixner TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable) 16969137bb27SThomas Gleixner TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable) 16979137bb27SThomas Gleixner TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable) 16989137bb27SThomas Gleixner 16999137bb27SThomas Gleixner TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) 17009137bb27SThomas Gleixner TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) 17019137bb27SThomas Gleixner 17025eca1c10SIngo Molnar static inline void 1703717a94b5SNeilBrown current_restore_flags(unsigned long orig_flags, unsigned long flags) 1704907aed48SMel Gorman { 1705717a94b5SNeilBrown current->flags &= ~flags; 1706717a94b5SNeilBrown current->flags |= orig_flags & flags; 1707907aed48SMel Gorman } 1708907aed48SMel Gorman 17095eca1c10SIngo Molnar extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); 17105eca1c10SIngo Molnar extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); 17111da177e4SLinus Torvalds #ifdef CONFIG_SMP 17125eca1c10SIngo Molnar extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); 17135eca1c10SIngo Molnar extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); 1714b90ca8baSWill Deacon extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node); 1715b90ca8baSWill Deacon extern void release_user_cpus_ptr(struct task_struct *p); 1716234b8ab6SWill Deacon extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask); 171707ec77a1SWill Deacon extern void force_compatible_cpus_allowed_ptr(struct task_struct *p); 171807ec77a1SWill Deacon extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p); 17191da177e4SLinus Torvalds #else 17205eca1c10SIngo Molnar static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 17211e1b6c51SKOSAKI Motohiro { 17221e1b6c51SKOSAKI Motohiro } 17235eca1c10SIngo Molnar static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 17241da177e4SLinus Torvalds { 172596f874e2SRusty Russell if (!cpumask_test_cpu(0, new_mask)) 17261da177e4SLinus Torvalds return -EINVAL; 17271da177e4SLinus Torvalds return 0; 17281da177e4SLinus Torvalds } 1729b90ca8baSWill Deacon static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node) 1730b90ca8baSWill Deacon { 1731b90ca8baSWill Deacon if (src->user_cpus_ptr) 1732b90ca8baSWill Deacon return -EINVAL; 1733b90ca8baSWill Deacon return 0; 1734b90ca8baSWill Deacon } 1735b90ca8baSWill Deacon static inline void release_user_cpus_ptr(struct task_struct *p) 1736b90ca8baSWill Deacon { 1737b90ca8baSWill Deacon WARN_ON(p->user_cpus_ptr); 1738b90ca8baSWill Deacon } 1739234b8ab6SWill Deacon 1740234b8ab6SWill Deacon static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 1741234b8ab6SWill Deacon { 1742234b8ab6SWill Deacon return 0; 1743234b8ab6SWill Deacon } 17441da177e4SLinus Torvalds #endif 1745e0ad9556SRusty Russell 1746fa93384fSDan Carpenter extern int yield_to(struct task_struct *p, bool preempt); 174736c8b586SIngo Molnar extern void set_user_nice(struct task_struct *p, long nice); 174836c8b586SIngo Molnar extern int task_prio(const struct task_struct *p); 17495eca1c10SIngo Molnar 1750d0ea0268SDongsheng Yang /** 1751d0ea0268SDongsheng Yang * task_nice - return the nice value of a given task. 1752d0ea0268SDongsheng Yang * @p: the task in question. 1753d0ea0268SDongsheng Yang * 1754d0ea0268SDongsheng Yang * Return: The nice value [ -20 ... 0 ... 19 ]. 1755d0ea0268SDongsheng Yang */ 1756d0ea0268SDongsheng Yang static inline int task_nice(const struct task_struct *p) 1757d0ea0268SDongsheng Yang { 1758d0ea0268SDongsheng Yang return PRIO_TO_NICE((p)->static_prio); 1759d0ea0268SDongsheng Yang } 17605eca1c10SIngo Molnar 176136c8b586SIngo Molnar extern int can_nice(const struct task_struct *p, const int nice); 176236c8b586SIngo Molnar extern int task_curr(const struct task_struct *p); 17631da177e4SLinus Torvalds extern int idle_cpu(int cpu); 1764943d355dSRohit Jain extern int available_idle_cpu(int cpu); 17655eca1c10SIngo Molnar extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); 17665eca1c10SIngo Molnar extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); 17678b700983SPeter Zijlstra extern void sched_set_fifo(struct task_struct *p); 17688b700983SPeter Zijlstra extern void sched_set_fifo_low(struct task_struct *p); 17698b700983SPeter Zijlstra extern void sched_set_normal(struct task_struct *p, int nice); 17705eca1c10SIngo Molnar extern int sched_setattr(struct task_struct *, const struct sched_attr *); 1771794a56ebSJuri Lelli extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); 177236c8b586SIngo Molnar extern struct task_struct *idle_task(int cpu); 17735eca1c10SIngo Molnar 1774c4f30608SPaul E. McKenney /** 1775c4f30608SPaul E. McKenney * is_idle_task - is the specified task an idle task? 1776fa757281SRandy Dunlap * @p: the task in question. 1777e69f6186SYacine Belkadi * 1778e69f6186SYacine Belkadi * Return: 1 if @p is an idle task. 0 otherwise. 1779c4f30608SPaul E. McKenney */ 1780c94a88f3SMarco Elver static __always_inline bool is_idle_task(const struct task_struct *p) 1781c4f30608SPaul E. McKenney { 1782c1de45caSPeter Zijlstra return !!(p->flags & PF_IDLE); 1783c4f30608SPaul E. McKenney } 17845eca1c10SIngo Molnar 178536c8b586SIngo Molnar extern struct task_struct *curr_task(int cpu); 1786a458ae2eSPeter Zijlstra extern void ia64_set_curr_task(int cpu, struct task_struct *p); 17871da177e4SLinus Torvalds 17881da177e4SLinus Torvalds void yield(void); 17891da177e4SLinus Torvalds 17901da177e4SLinus Torvalds union thread_union { 17910500871fSDavid Howells #ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK 17920500871fSDavid Howells struct task_struct task; 17930500871fSDavid Howells #endif 1794c65eacbeSAndy Lutomirski #ifndef CONFIG_THREAD_INFO_IN_TASK 17951da177e4SLinus Torvalds struct thread_info thread_info; 1796c65eacbeSAndy Lutomirski #endif 17971da177e4SLinus Torvalds unsigned long stack[THREAD_SIZE/sizeof(long)]; 17981da177e4SLinus Torvalds }; 17991da177e4SLinus Torvalds 18000500871fSDavid Howells #ifndef CONFIG_THREAD_INFO_IN_TASK 18010500871fSDavid Howells extern struct thread_info init_thread_info; 18020500871fSDavid Howells #endif 18030500871fSDavid Howells 18040500871fSDavid Howells extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)]; 18050500871fSDavid Howells 1806f3ac6067SIngo Molnar #ifdef CONFIG_THREAD_INFO_IN_TASK 1807f3ac6067SIngo Molnar static inline struct thread_info *task_thread_info(struct task_struct *task) 1808f3ac6067SIngo Molnar { 1809f3ac6067SIngo Molnar return &task->thread_info; 1810f3ac6067SIngo Molnar } 1811f3ac6067SIngo Molnar #elif !defined(__HAVE_THREAD_FUNCTIONS) 1812f3ac6067SIngo Molnar # define task_thread_info(task) ((struct thread_info *)(task)->stack) 1813f3ac6067SIngo Molnar #endif 1814f3ac6067SIngo Molnar 1815198fe21bSPavel Emelyanov /* 1816198fe21bSPavel Emelyanov * find a task by one of its numerical ids 1817198fe21bSPavel Emelyanov * 1818198fe21bSPavel Emelyanov * find_task_by_pid_ns(): 1819198fe21bSPavel Emelyanov * finds a task by its pid in the specified namespace 1820228ebcbeSPavel Emelyanov * find_task_by_vpid(): 1821228ebcbeSPavel Emelyanov * finds a task by its virtual pid 1822198fe21bSPavel Emelyanov * 1823e49859e7SPavel Emelyanov * see also find_vpid() etc in include/linux/pid.h 1824198fe21bSPavel Emelyanov */ 1825198fe21bSPavel Emelyanov 1826228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_vpid(pid_t nr); 18275eca1c10SIngo Molnar extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); 1828198fe21bSPavel Emelyanov 18292ee08260SMike Rapoport /* 18302ee08260SMike Rapoport * find a task by its virtual pid and get the task struct 18312ee08260SMike Rapoport */ 18322ee08260SMike Rapoport extern struct task_struct *find_get_task_by_vpid(pid_t nr); 18332ee08260SMike Rapoport 1834b3c97528SHarvey Harrison extern int wake_up_state(struct task_struct *tsk, unsigned int state); 1835b3c97528SHarvey Harrison extern int wake_up_process(struct task_struct *tsk); 18363e51e3edSSamir Bellabes extern void wake_up_new_task(struct task_struct *tsk); 18375eca1c10SIngo Molnar 18381da177e4SLinus Torvalds #ifdef CONFIG_SMP 18391da177e4SLinus Torvalds extern void kick_process(struct task_struct *tsk); 18401da177e4SLinus Torvalds #else 18411da177e4SLinus Torvalds static inline void kick_process(struct task_struct *tsk) { } 18421da177e4SLinus Torvalds #endif 18431da177e4SLinus Torvalds 184482b89778SAdrian Hunter extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); 18455eca1c10SIngo Molnar 184682b89778SAdrian Hunter static inline void set_task_comm(struct task_struct *tsk, const char *from) 184782b89778SAdrian Hunter { 184882b89778SAdrian Hunter __set_task_comm(tsk, from, false); 184982b89778SAdrian Hunter } 18505eca1c10SIngo Molnar 18513756f640SArnd Bergmann extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); 18523756f640SArnd Bergmann #define get_task_comm(buf, tsk) ({ \ 18533756f640SArnd Bergmann BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \ 18543756f640SArnd Bergmann __get_task_comm(buf, sizeof(buf), tsk); \ 18553756f640SArnd Bergmann }) 18561da177e4SLinus Torvalds 18571da177e4SLinus Torvalds #ifdef CONFIG_SMP 18582a0a24ebSThomas Gleixner static __always_inline void scheduler_ipi(void) 18592a0a24ebSThomas Gleixner { 18602a0a24ebSThomas Gleixner /* 18612a0a24ebSThomas Gleixner * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting 18622a0a24ebSThomas Gleixner * TIF_NEED_RESCHED remotely (for the first time) will also send 18632a0a24ebSThomas Gleixner * this IPI. 18642a0a24ebSThomas Gleixner */ 18652a0a24ebSThomas Gleixner preempt_fold_need_resched(); 18662a0a24ebSThomas Gleixner } 18672f064a59SPeter Zijlstra extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state); 18681da177e4SLinus Torvalds #else 1869184748ccSPeter Zijlstra static inline void scheduler_ipi(void) { } 18702f064a59SPeter Zijlstra static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) 187185ba2d86SRoland McGrath { 187285ba2d86SRoland McGrath return 1; 187385ba2d86SRoland McGrath } 18741da177e4SLinus Torvalds #endif 18751da177e4SLinus Torvalds 18765eca1c10SIngo Molnar /* 18775eca1c10SIngo Molnar * Set thread flags in other task's structures. 18785eca1c10SIngo Molnar * See asm/thread_info.h for TIF_xxxx flags available: 18791da177e4SLinus Torvalds */ 18801da177e4SLinus Torvalds static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) 18811da177e4SLinus Torvalds { 1882a1261f54SAl Viro set_ti_thread_flag(task_thread_info(tsk), flag); 18831da177e4SLinus Torvalds } 18841da177e4SLinus Torvalds 18851da177e4SLinus Torvalds static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) 18861da177e4SLinus Torvalds { 1887a1261f54SAl Viro clear_ti_thread_flag(task_thread_info(tsk), flag); 18881da177e4SLinus Torvalds } 18891da177e4SLinus Torvalds 189093ee37c2SDave Martin static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag, 189193ee37c2SDave Martin bool value) 189293ee37c2SDave Martin { 189393ee37c2SDave Martin update_ti_thread_flag(task_thread_info(tsk), flag, value); 189493ee37c2SDave Martin } 189593ee37c2SDave Martin 18961da177e4SLinus Torvalds static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) 18971da177e4SLinus Torvalds { 1898a1261f54SAl Viro return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); 18991da177e4SLinus Torvalds } 19001da177e4SLinus Torvalds 19011da177e4SLinus Torvalds static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) 19021da177e4SLinus Torvalds { 1903a1261f54SAl Viro return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); 19041da177e4SLinus Torvalds } 19051da177e4SLinus Torvalds 19061da177e4SLinus Torvalds static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) 19071da177e4SLinus Torvalds { 1908a1261f54SAl Viro return test_ti_thread_flag(task_thread_info(tsk), flag); 19091da177e4SLinus Torvalds } 19101da177e4SLinus Torvalds 19111da177e4SLinus Torvalds static inline void set_tsk_need_resched(struct task_struct *tsk) 19121da177e4SLinus Torvalds { 19131da177e4SLinus Torvalds set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 19141da177e4SLinus Torvalds } 19151da177e4SLinus Torvalds 19161da177e4SLinus Torvalds static inline void clear_tsk_need_resched(struct task_struct *tsk) 19171da177e4SLinus Torvalds { 19181da177e4SLinus Torvalds clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); 19191da177e4SLinus Torvalds } 19201da177e4SLinus Torvalds 19218ae121acSGregory Haskins static inline int test_tsk_need_resched(struct task_struct *tsk) 19228ae121acSGregory Haskins { 19238ae121acSGregory Haskins return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); 19248ae121acSGregory Haskins } 19258ae121acSGregory Haskins 19261da177e4SLinus Torvalds /* 19271da177e4SLinus Torvalds * cond_resched() and cond_resched_lock(): latency reduction via 19281da177e4SLinus Torvalds * explicit rescheduling in places that are safe. The return 19291da177e4SLinus Torvalds * value indicates whether a reschedule was done in fact. 19301da177e4SLinus Torvalds * cond_resched_lock() will drop the spinlock before scheduling, 19311da177e4SLinus Torvalds */ 1932b965f1ddSPeter Zijlstra (Intel) #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) 1933b965f1ddSPeter Zijlstra (Intel) extern int __cond_resched(void); 1934b965f1ddSPeter Zijlstra (Intel) 1935b965f1ddSPeter Zijlstra (Intel) #ifdef CONFIG_PREEMPT_DYNAMIC 1936b965f1ddSPeter Zijlstra (Intel) 1937b965f1ddSPeter Zijlstra (Intel) DECLARE_STATIC_CALL(cond_resched, __cond_resched); 1938b965f1ddSPeter Zijlstra (Intel) 1939b965f1ddSPeter Zijlstra (Intel) static __always_inline int _cond_resched(void) 1940b965f1ddSPeter Zijlstra (Intel) { 1941ef72661eSPeter Zijlstra return static_call_mod(cond_resched)(); 1942b965f1ddSPeter Zijlstra (Intel) } 1943b965f1ddSPeter Zijlstra (Intel) 194435a773a0SPeter Zijlstra #else 1945b965f1ddSPeter Zijlstra (Intel) 1946b965f1ddSPeter Zijlstra (Intel) static inline int _cond_resched(void) 1947b965f1ddSPeter Zijlstra (Intel) { 1948b965f1ddSPeter Zijlstra (Intel) return __cond_resched(); 1949b965f1ddSPeter Zijlstra (Intel) } 1950b965f1ddSPeter Zijlstra (Intel) 1951b965f1ddSPeter Zijlstra (Intel) #endif /* CONFIG_PREEMPT_DYNAMIC */ 1952b965f1ddSPeter Zijlstra (Intel) 1953b965f1ddSPeter Zijlstra (Intel) #else 1954b965f1ddSPeter Zijlstra (Intel) 195535a773a0SPeter Zijlstra static inline int _cond_resched(void) { return 0; } 1956b965f1ddSPeter Zijlstra (Intel) 1957b965f1ddSPeter Zijlstra (Intel) #endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */ 19586f80bd98SFrederic Weisbecker 1959613afbf8SFrederic Weisbecker #define cond_resched() ({ \ 19603427445aSPeter Zijlstra ___might_sleep(__FILE__, __LINE__, 0); \ 1961613afbf8SFrederic Weisbecker _cond_resched(); \ 1962613afbf8SFrederic Weisbecker }) 19636f80bd98SFrederic Weisbecker 1964613afbf8SFrederic Weisbecker extern int __cond_resched_lock(spinlock_t *lock); 1965f3d4b4b1SBen Gardon extern int __cond_resched_rwlock_read(rwlock_t *lock); 1966f3d4b4b1SBen Gardon extern int __cond_resched_rwlock_write(rwlock_t *lock); 1967613afbf8SFrederic Weisbecker 1968613afbf8SFrederic Weisbecker #define cond_resched_lock(lock) ({ \ 19693427445aSPeter Zijlstra ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ 1970613afbf8SFrederic Weisbecker __cond_resched_lock(lock); \ 1971613afbf8SFrederic Weisbecker }) 1972613afbf8SFrederic Weisbecker 1973f3d4b4b1SBen Gardon #define cond_resched_rwlock_read(lock) ({ \ 1974f3d4b4b1SBen Gardon __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ 1975f3d4b4b1SBen Gardon __cond_resched_rwlock_read(lock); \ 1976f3d4b4b1SBen Gardon }) 1977f3d4b4b1SBen Gardon 1978f3d4b4b1SBen Gardon #define cond_resched_rwlock_write(lock) ({ \ 1979f3d4b4b1SBen Gardon __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ 1980f3d4b4b1SBen Gardon __cond_resched_rwlock_write(lock); \ 1981f3d4b4b1SBen Gardon }) 1982f3d4b4b1SBen Gardon 1983f6f3c437SSimon Horman static inline void cond_resched_rcu(void) 1984f6f3c437SSimon Horman { 1985f6f3c437SSimon Horman #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) 1986f6f3c437SSimon Horman rcu_read_unlock(); 1987f6f3c437SSimon Horman cond_resched(); 1988f6f3c437SSimon Horman rcu_read_lock(); 1989f6f3c437SSimon Horman #endif 1990f6f3c437SSimon Horman } 1991f6f3c437SSimon Horman 19921da177e4SLinus Torvalds /* 19931da177e4SLinus Torvalds * Does a critical section need to be broken due to another 1994c1a280b6SThomas Gleixner * task waiting?: (technically does not depend on CONFIG_PREEMPTION, 199595c354feSNick Piggin * but a general need for low latency) 19961da177e4SLinus Torvalds */ 199795c354feSNick Piggin static inline int spin_needbreak(spinlock_t *lock) 19981da177e4SLinus Torvalds { 1999c1a280b6SThomas Gleixner #ifdef CONFIG_PREEMPTION 200095c354feSNick Piggin return spin_is_contended(lock); 200195c354feSNick Piggin #else 20021da177e4SLinus Torvalds return 0; 200395c354feSNick Piggin #endif 20041da177e4SLinus Torvalds } 20051da177e4SLinus Torvalds 2006a09a689aSBen Gardon /* 2007a09a689aSBen Gardon * Check if a rwlock is contended. 2008a09a689aSBen Gardon * Returns non-zero if there is another task waiting on the rwlock. 2009a09a689aSBen Gardon * Returns zero if the lock is not contended or the system / underlying 2010a09a689aSBen Gardon * rwlock implementation does not support contention detection. 2011a09a689aSBen Gardon * Technically does not depend on CONFIG_PREEMPTION, but a general need 2012a09a689aSBen Gardon * for low latency. 2013a09a689aSBen Gardon */ 2014a09a689aSBen Gardon static inline int rwlock_needbreak(rwlock_t *lock) 2015a09a689aSBen Gardon { 2016a09a689aSBen Gardon #ifdef CONFIG_PREEMPTION 2017a09a689aSBen Gardon return rwlock_is_contended(lock); 2018a09a689aSBen Gardon #else 2019a09a689aSBen Gardon return 0; 2020a09a689aSBen Gardon #endif 2021a09a689aSBen Gardon } 2022a09a689aSBen Gardon 202375f93fedSPeter Zijlstra static __always_inline bool need_resched(void) 202475f93fedSPeter Zijlstra { 202575f93fedSPeter Zijlstra return unlikely(tif_need_resched()); 202675f93fedSPeter Zijlstra } 202775f93fedSPeter Zijlstra 2028ee761f62SThomas Gleixner /* 20291da177e4SLinus Torvalds * Wrappers for p->thread_info->cpu access. No-op on UP. 20301da177e4SLinus Torvalds */ 20311da177e4SLinus Torvalds #ifdef CONFIG_SMP 20321da177e4SLinus Torvalds 20331da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p) 20341da177e4SLinus Torvalds { 2035c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK 2036c546951dSAndrea Parri return READ_ONCE(p->cpu); 2037c65eacbeSAndy Lutomirski #else 2038c546951dSAndrea Parri return READ_ONCE(task_thread_info(p)->cpu); 2039c65eacbeSAndy Lutomirski #endif 20401da177e4SLinus Torvalds } 20411da177e4SLinus Torvalds 2042c65cc870SIngo Molnar extern void set_task_cpu(struct task_struct *p, unsigned int cpu); 20431da177e4SLinus Torvalds 20441da177e4SLinus Torvalds #else 20451da177e4SLinus Torvalds 20461da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p) 20471da177e4SLinus Torvalds { 20481da177e4SLinus Torvalds return 0; 20491da177e4SLinus Torvalds } 20501da177e4SLinus Torvalds 20511da177e4SLinus Torvalds static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) 20521da177e4SLinus Torvalds { 20531da177e4SLinus Torvalds } 20541da177e4SLinus Torvalds 20551da177e4SLinus Torvalds #endif /* CONFIG_SMP */ 20561da177e4SLinus Torvalds 2057d9345c65SPan Xinhui /* 2058d9345c65SPan Xinhui * In order to reduce various lock holder preemption latencies provide an 2059d9345c65SPan Xinhui * interface to see if a vCPU is currently running or not. 2060d9345c65SPan Xinhui * 2061d9345c65SPan Xinhui * This allows us to terminate optimistic spin loops and block, analogous to 2062d9345c65SPan Xinhui * the native optimistic spin heuristic of testing if the lock owner task is 2063d9345c65SPan Xinhui * running or not. 2064d9345c65SPan Xinhui */ 2065d9345c65SPan Xinhui #ifndef vcpu_is_preempted 206642fd8baaSQian Cai static inline bool vcpu_is_preempted(int cpu) 206742fd8baaSQian Cai { 206842fd8baaSQian Cai return false; 206942fd8baaSQian Cai } 2070d9345c65SPan Xinhui #endif 2071d9345c65SPan Xinhui 207296f874e2SRusty Russell extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 207396f874e2SRusty Russell extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 20745c45bf27SSiddha, Suresh B 207582455257SDave Hansen #ifndef TASK_SIZE_OF 207682455257SDave Hansen #define TASK_SIZE_OF(tsk) TASK_SIZE 207782455257SDave Hansen #endif 207882455257SDave Hansen 2079a5418be9SViresh Kumar #ifdef CONFIG_SMP 2080a5418be9SViresh Kumar /* Returns effective CPU energy utilization, as seen by the scheduler */ 2081a5418be9SViresh Kumar unsigned long sched_cpu_util(int cpu, unsigned long max); 2082a5418be9SViresh Kumar #endif /* CONFIG_SMP */ 2083a5418be9SViresh Kumar 2084d7822b1eSMathieu Desnoyers #ifdef CONFIG_RSEQ 2085d7822b1eSMathieu Desnoyers 2086d7822b1eSMathieu Desnoyers /* 2087d7822b1eSMathieu Desnoyers * Map the event mask on the user-space ABI enum rseq_cs_flags 2088d7822b1eSMathieu Desnoyers * for direct mask checks. 2089d7822b1eSMathieu Desnoyers */ 2090d7822b1eSMathieu Desnoyers enum rseq_event_mask_bits { 2091d7822b1eSMathieu Desnoyers RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT, 2092d7822b1eSMathieu Desnoyers RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT, 2093d7822b1eSMathieu Desnoyers RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT, 2094d7822b1eSMathieu Desnoyers }; 2095d7822b1eSMathieu Desnoyers 2096d7822b1eSMathieu Desnoyers enum rseq_event_mask { 2097d7822b1eSMathieu Desnoyers RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT), 2098d7822b1eSMathieu Desnoyers RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT), 2099d7822b1eSMathieu Desnoyers RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT), 2100d7822b1eSMathieu Desnoyers }; 2101d7822b1eSMathieu Desnoyers 2102d7822b1eSMathieu Desnoyers static inline void rseq_set_notify_resume(struct task_struct *t) 2103d7822b1eSMathieu Desnoyers { 2104d7822b1eSMathieu Desnoyers if (t->rseq) 2105d7822b1eSMathieu Desnoyers set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); 2106d7822b1eSMathieu Desnoyers } 2107d7822b1eSMathieu Desnoyers 2108784e0300SWill Deacon void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs); 2109d7822b1eSMathieu Desnoyers 2110784e0300SWill Deacon static inline void rseq_handle_notify_resume(struct ksignal *ksig, 2111784e0300SWill Deacon struct pt_regs *regs) 2112d7822b1eSMathieu Desnoyers { 2113d7822b1eSMathieu Desnoyers if (current->rseq) 2114784e0300SWill Deacon __rseq_handle_notify_resume(ksig, regs); 2115d7822b1eSMathieu Desnoyers } 2116d7822b1eSMathieu Desnoyers 2117784e0300SWill Deacon static inline void rseq_signal_deliver(struct ksignal *ksig, 2118784e0300SWill Deacon struct pt_regs *regs) 2119d7822b1eSMathieu Desnoyers { 2120d7822b1eSMathieu Desnoyers preempt_disable(); 2121d7822b1eSMathieu Desnoyers __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); 2122d7822b1eSMathieu Desnoyers preempt_enable(); 2123784e0300SWill Deacon rseq_handle_notify_resume(ksig, regs); 2124d7822b1eSMathieu Desnoyers } 2125d7822b1eSMathieu Desnoyers 2126d7822b1eSMathieu Desnoyers /* rseq_preempt() requires preemption to be disabled. */ 2127d7822b1eSMathieu Desnoyers static inline void rseq_preempt(struct task_struct *t) 2128d7822b1eSMathieu Desnoyers { 2129d7822b1eSMathieu Desnoyers __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); 2130d7822b1eSMathieu Desnoyers rseq_set_notify_resume(t); 2131d7822b1eSMathieu Desnoyers } 2132d7822b1eSMathieu Desnoyers 2133d7822b1eSMathieu Desnoyers /* rseq_migrate() requires preemption to be disabled. */ 2134d7822b1eSMathieu Desnoyers static inline void rseq_migrate(struct task_struct *t) 2135d7822b1eSMathieu Desnoyers { 2136d7822b1eSMathieu Desnoyers __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); 2137d7822b1eSMathieu Desnoyers rseq_set_notify_resume(t); 2138d7822b1eSMathieu Desnoyers } 2139d7822b1eSMathieu Desnoyers 2140d7822b1eSMathieu Desnoyers /* 2141d7822b1eSMathieu Desnoyers * If parent process has a registered restartable sequences area, the 2142463f550fSMathieu Desnoyers * child inherits. Unregister rseq for a clone with CLONE_VM set. 2143d7822b1eSMathieu Desnoyers */ 2144d7822b1eSMathieu Desnoyers static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) 2145d7822b1eSMathieu Desnoyers { 2146463f550fSMathieu Desnoyers if (clone_flags & CLONE_VM) { 2147d7822b1eSMathieu Desnoyers t->rseq = NULL; 2148d7822b1eSMathieu Desnoyers t->rseq_sig = 0; 2149d7822b1eSMathieu Desnoyers t->rseq_event_mask = 0; 2150d7822b1eSMathieu Desnoyers } else { 2151d7822b1eSMathieu Desnoyers t->rseq = current->rseq; 2152d7822b1eSMathieu Desnoyers t->rseq_sig = current->rseq_sig; 2153d7822b1eSMathieu Desnoyers t->rseq_event_mask = current->rseq_event_mask; 2154d7822b1eSMathieu Desnoyers } 2155d7822b1eSMathieu Desnoyers } 2156d7822b1eSMathieu Desnoyers 2157d7822b1eSMathieu Desnoyers static inline void rseq_execve(struct task_struct *t) 2158d7822b1eSMathieu Desnoyers { 2159d7822b1eSMathieu Desnoyers t->rseq = NULL; 2160d7822b1eSMathieu Desnoyers t->rseq_sig = 0; 2161d7822b1eSMathieu Desnoyers t->rseq_event_mask = 0; 2162d7822b1eSMathieu Desnoyers } 2163d7822b1eSMathieu Desnoyers 2164d7822b1eSMathieu Desnoyers #else 2165d7822b1eSMathieu Desnoyers 2166d7822b1eSMathieu Desnoyers static inline void rseq_set_notify_resume(struct task_struct *t) 2167d7822b1eSMathieu Desnoyers { 2168d7822b1eSMathieu Desnoyers } 2169784e0300SWill Deacon static inline void rseq_handle_notify_resume(struct ksignal *ksig, 2170784e0300SWill Deacon struct pt_regs *regs) 2171d7822b1eSMathieu Desnoyers { 2172d7822b1eSMathieu Desnoyers } 2173784e0300SWill Deacon static inline void rseq_signal_deliver(struct ksignal *ksig, 2174784e0300SWill Deacon struct pt_regs *regs) 2175d7822b1eSMathieu Desnoyers { 2176d7822b1eSMathieu Desnoyers } 2177d7822b1eSMathieu Desnoyers static inline void rseq_preempt(struct task_struct *t) 2178d7822b1eSMathieu Desnoyers { 2179d7822b1eSMathieu Desnoyers } 2180d7822b1eSMathieu Desnoyers static inline void rseq_migrate(struct task_struct *t) 2181d7822b1eSMathieu Desnoyers { 2182d7822b1eSMathieu Desnoyers } 2183d7822b1eSMathieu Desnoyers static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) 2184d7822b1eSMathieu Desnoyers { 2185d7822b1eSMathieu Desnoyers } 2186d7822b1eSMathieu Desnoyers static inline void rseq_execve(struct task_struct *t) 2187d7822b1eSMathieu Desnoyers { 2188d7822b1eSMathieu Desnoyers } 2189d7822b1eSMathieu Desnoyers 2190d7822b1eSMathieu Desnoyers #endif 2191d7822b1eSMathieu Desnoyers 2192d7822b1eSMathieu Desnoyers #ifdef CONFIG_DEBUG_RSEQ 2193d7822b1eSMathieu Desnoyers 2194d7822b1eSMathieu Desnoyers void rseq_syscall(struct pt_regs *regs); 2195d7822b1eSMathieu Desnoyers 2196d7822b1eSMathieu Desnoyers #else 2197d7822b1eSMathieu Desnoyers 2198d7822b1eSMathieu Desnoyers static inline void rseq_syscall(struct pt_regs *regs) 2199d7822b1eSMathieu Desnoyers { 2200d7822b1eSMathieu Desnoyers } 2201d7822b1eSMathieu Desnoyers 2202d7822b1eSMathieu Desnoyers #endif 2203d7822b1eSMathieu Desnoyers 22043c93a0c0SQais Yousef const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq); 22053c93a0c0SQais Yousef char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len); 22063c93a0c0SQais Yousef int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq); 22073c93a0c0SQais Yousef 22083c93a0c0SQais Yousef const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq); 22093c93a0c0SQais Yousef const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq); 22103c93a0c0SQais Yousef const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq); 22113c93a0c0SQais Yousef 22123c93a0c0SQais Yousef int sched_trace_rq_cpu(struct rq *rq); 221351cf18c9SVincent Donnefort int sched_trace_rq_cpu_capacity(struct rq *rq); 22149d246053SPhil Auld int sched_trace_rq_nr_running(struct rq *rq); 22153c93a0c0SQais Yousef 22163c93a0c0SQais Yousef const struct cpumask *sched_trace_rd_span(struct root_domain *rd); 22173c93a0c0SQais Yousef 22186e33cad0SPeter Zijlstra #ifdef CONFIG_SCHED_CORE 22196e33cad0SPeter Zijlstra extern void sched_core_free(struct task_struct *tsk); 222085dd3f61SPeter Zijlstra extern void sched_core_fork(struct task_struct *p); 22217ac592aaSChris Hyser extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, 22227ac592aaSChris Hyser unsigned long uaddr); 22236e33cad0SPeter Zijlstra #else 22246e33cad0SPeter Zijlstra static inline void sched_core_free(struct task_struct *tsk) { } 222585dd3f61SPeter Zijlstra static inline void sched_core_fork(struct task_struct *p) { } 22266e33cad0SPeter Zijlstra #endif 22276e33cad0SPeter Zijlstra 22281da177e4SLinus Torvalds #endif 2229