xref: /linux/include/linux/sched.h (revision ec1d281923cf81cc660343d0cb8ffc837ffb991d)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds #ifndef _LINUX_SCHED_H
31da177e4SLinus Torvalds #define _LINUX_SCHED_H
41da177e4SLinus Torvalds 
55eca1c10SIngo Molnar /*
65eca1c10SIngo Molnar  * Define 'struct task_struct' and provide the main scheduler
75eca1c10SIngo Molnar  * APIs (schedule(), wakeup variants, etc.)
85eca1c10SIngo Molnar  */
95eca1c10SIngo Molnar 
10607ca46eSDavid Howells #include <uapi/linux/sched.h>
11b7b3c76aSDavid Woodhouse 
1270b8157eSIngo Molnar #include <asm/current.h>
1370b8157eSIngo Molnar 
145eca1c10SIngo Molnar #include <linux/pid.h>
155eca1c10SIngo Molnar #include <linux/sem.h>
165eca1c10SIngo Molnar #include <linux/shm.h>
175eca1c10SIngo Molnar #include <linux/kcov.h>
185eca1c10SIngo Molnar #include <linux/mutex.h>
195eca1c10SIngo Molnar #include <linux/plist.h>
205eca1c10SIngo Molnar #include <linux/hrtimer.h>
215eca1c10SIngo Molnar #include <linux/seccomp.h>
225eca1c10SIngo Molnar #include <linux/nodemask.h>
235eca1c10SIngo Molnar #include <linux/rcupdate.h>
24*ec1d2819SElena Reshetova #include <linux/refcount.h>
255eca1c10SIngo Molnar #include <linux/resource.h>
265eca1c10SIngo Molnar #include <linux/latencytop.h>
275eca1c10SIngo Molnar #include <linux/sched/prio.h>
285eca1c10SIngo Molnar #include <linux/signal_types.h>
29eb414681SJohannes Weiner #include <linux/psi_types.h>
305eca1c10SIngo Molnar #include <linux/mm_types_task.h>
315eca1c10SIngo Molnar #include <linux/task_io_accounting.h>
32d7822b1eSMathieu Desnoyers #include <linux/rseq.h>
335eca1c10SIngo Molnar 
345eca1c10SIngo Molnar /* task_struct member predeclarations (sorted alphabetically): */
35c7af7877SIngo Molnar struct audit_context;
36c7af7877SIngo Molnar struct backing_dev_info;
37c7af7877SIngo Molnar struct bio_list;
38c7af7877SIngo Molnar struct blk_plug;
39c7af7877SIngo Molnar struct cfs_rq;
40c7af7877SIngo Molnar struct fs_struct;
41c7af7877SIngo Molnar struct futex_pi_state;
42c7af7877SIngo Molnar struct io_context;
43c7af7877SIngo Molnar struct mempolicy;
44c7af7877SIngo Molnar struct nameidata;
45c7af7877SIngo Molnar struct nsproxy;
46c7af7877SIngo Molnar struct perf_event_context;
47c7af7877SIngo Molnar struct pid_namespace;
48c7af7877SIngo Molnar struct pipe_inode_info;
49c7af7877SIngo Molnar struct rcu_node;
50c7af7877SIngo Molnar struct reclaim_state;
51c7af7877SIngo Molnar struct robust_list_head;
52e2d1e2aeSIngo Molnar struct sched_attr;
53e2d1e2aeSIngo Molnar struct sched_param;
5443ae34cbSIngo Molnar struct seq_file;
55c7af7877SIngo Molnar struct sighand_struct;
56c7af7877SIngo Molnar struct signal_struct;
57c7af7877SIngo Molnar struct task_delay_info;
584cf86d77SIngo Molnar struct task_group;
591da177e4SLinus Torvalds 
604a8342d2SLinus Torvalds /*
614a8342d2SLinus Torvalds  * Task state bitmask. NOTE! These bits are also
624a8342d2SLinus Torvalds  * encoded in fs/proc/array.c: get_task_state().
634a8342d2SLinus Torvalds  *
644a8342d2SLinus Torvalds  * We have two separate sets of flags: task->state
654a8342d2SLinus Torvalds  * is about runnability, while task->exit_state are
664a8342d2SLinus Torvalds  * about the task exiting. Confusing, but this way
674a8342d2SLinus Torvalds  * modifying one set can't modify the other one by
684a8342d2SLinus Torvalds  * mistake.
694a8342d2SLinus Torvalds  */
705eca1c10SIngo Molnar 
715eca1c10SIngo Molnar /* Used in tsk->state: */
7292c4bc9fSPeter Zijlstra #define TASK_RUNNING			0x0000
7392c4bc9fSPeter Zijlstra #define TASK_INTERRUPTIBLE		0x0001
7492c4bc9fSPeter Zijlstra #define TASK_UNINTERRUPTIBLE		0x0002
7592c4bc9fSPeter Zijlstra #define __TASK_STOPPED			0x0004
7692c4bc9fSPeter Zijlstra #define __TASK_TRACED			0x0008
775eca1c10SIngo Molnar /* Used in tsk->exit_state: */
7892c4bc9fSPeter Zijlstra #define EXIT_DEAD			0x0010
7992c4bc9fSPeter Zijlstra #define EXIT_ZOMBIE			0x0020
80abd50b39SOleg Nesterov #define EXIT_TRACE			(EXIT_ZOMBIE | EXIT_DEAD)
815eca1c10SIngo Molnar /* Used in tsk->state again: */
828ef9925bSPeter Zijlstra #define TASK_PARKED			0x0040
838ef9925bSPeter Zijlstra #define TASK_DEAD			0x0080
848ef9925bSPeter Zijlstra #define TASK_WAKEKILL			0x0100
858ef9925bSPeter Zijlstra #define TASK_WAKING			0x0200
8692c4bc9fSPeter Zijlstra #define TASK_NOLOAD			0x0400
8792c4bc9fSPeter Zijlstra #define TASK_NEW			0x0800
8892c4bc9fSPeter Zijlstra #define TASK_STATE_MAX			0x1000
89f021a3c2SMatthew Wilcox 
905eca1c10SIngo Molnar /* Convenience macros for the sake of set_current_state: */
91f021a3c2SMatthew Wilcox #define TASK_KILLABLE			(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
92f021a3c2SMatthew Wilcox #define TASK_STOPPED			(TASK_WAKEKILL | __TASK_STOPPED)
93f021a3c2SMatthew Wilcox #define TASK_TRACED			(TASK_WAKEKILL | __TASK_TRACED)
941da177e4SLinus Torvalds 
9580ed87c8SPeter Zijlstra #define TASK_IDLE			(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
9680ed87c8SPeter Zijlstra 
975eca1c10SIngo Molnar /* Convenience macros for the sake of wake_up(): */
9892a1f4bcSMatthew Wilcox #define TASK_NORMAL			(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
9992a1f4bcSMatthew Wilcox 
1005eca1c10SIngo Molnar /* get_task_state(): */
10192a1f4bcSMatthew Wilcox #define TASK_REPORT			(TASK_RUNNING | TASK_INTERRUPTIBLE | \
102f021a3c2SMatthew Wilcox 					 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
1038ef9925bSPeter Zijlstra 					 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
1048ef9925bSPeter Zijlstra 					 TASK_PARKED)
10592a1f4bcSMatthew Wilcox 
106f021a3c2SMatthew Wilcox #define task_is_traced(task)		((task->state & __TASK_TRACED) != 0)
1075eca1c10SIngo Molnar 
108f021a3c2SMatthew Wilcox #define task_is_stopped(task)		((task->state & __TASK_STOPPED) != 0)
1095eca1c10SIngo Molnar 
1105eca1c10SIngo Molnar #define task_is_stopped_or_traced(task)	((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
1115eca1c10SIngo Molnar 
1125eca1c10SIngo Molnar #define task_contributes_to_load(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
11380ed87c8SPeter Zijlstra 					 (task->flags & PF_FROZEN) == 0 && \
11480ed87c8SPeter Zijlstra 					 (task->state & TASK_NOLOAD) == 0)
1151da177e4SLinus Torvalds 
1168eb23b9fSPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1178eb23b9fSPeter Zijlstra 
118b5bf9a90SPeter Zijlstra /*
119b5bf9a90SPeter Zijlstra  * Special states are those that do not use the normal wait-loop pattern. See
120b5bf9a90SPeter Zijlstra  * the comment with set_special_state().
121b5bf9a90SPeter Zijlstra  */
122b5bf9a90SPeter Zijlstra #define is_special_task_state(state)				\
1231cef1150SPeter Zijlstra 	((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
124b5bf9a90SPeter Zijlstra 
1258eb23b9fSPeter Zijlstra #define __set_current_state(state_value)			\
1268eb23b9fSPeter Zijlstra 	do {							\
127b5bf9a90SPeter Zijlstra 		WARN_ON_ONCE(is_special_task_state(state_value));\
1288eb23b9fSPeter Zijlstra 		current->task_state_change = _THIS_IP_;		\
1298eb23b9fSPeter Zijlstra 		current->state = (state_value);			\
1308eb23b9fSPeter Zijlstra 	} while (0)
131b5bf9a90SPeter Zijlstra 
1328eb23b9fSPeter Zijlstra #define set_current_state(state_value)				\
1338eb23b9fSPeter Zijlstra 	do {							\
134b5bf9a90SPeter Zijlstra 		WARN_ON_ONCE(is_special_task_state(state_value));\
1358eb23b9fSPeter Zijlstra 		current->task_state_change = _THIS_IP_;		\
136b92b8b35SPeter Zijlstra 		smp_store_mb(current->state, (state_value));	\
1378eb23b9fSPeter Zijlstra 	} while (0)
1388eb23b9fSPeter Zijlstra 
139b5bf9a90SPeter Zijlstra #define set_special_state(state_value)					\
140b5bf9a90SPeter Zijlstra 	do {								\
141b5bf9a90SPeter Zijlstra 		unsigned long flags; /* may shadow */			\
142b5bf9a90SPeter Zijlstra 		WARN_ON_ONCE(!is_special_task_state(state_value));	\
143b5bf9a90SPeter Zijlstra 		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
144b5bf9a90SPeter Zijlstra 		current->task_state_change = _THIS_IP_;			\
145b5bf9a90SPeter Zijlstra 		current->state = (state_value);				\
146b5bf9a90SPeter Zijlstra 		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
147b5bf9a90SPeter Zijlstra 	} while (0)
1488eb23b9fSPeter Zijlstra #else
149498d0c57SAndrew Morton /*
150498d0c57SAndrew Morton  * set_current_state() includes a barrier so that the write of current->state
151498d0c57SAndrew Morton  * is correctly serialised wrt the caller's subsequent test of whether to
152498d0c57SAndrew Morton  * actually sleep:
153498d0c57SAndrew Morton  *
154a2250238SPeter Zijlstra  *   for (;;) {
155498d0c57SAndrew Morton  *	set_current_state(TASK_UNINTERRUPTIBLE);
156a2250238SPeter Zijlstra  *	if (!need_sleep)
157a2250238SPeter Zijlstra  *		break;
158498d0c57SAndrew Morton  *
159a2250238SPeter Zijlstra  *	schedule();
160a2250238SPeter Zijlstra  *   }
161a2250238SPeter Zijlstra  *   __set_current_state(TASK_RUNNING);
162a2250238SPeter Zijlstra  *
163a2250238SPeter Zijlstra  * If the caller does not need such serialisation (because, for instance, the
164a2250238SPeter Zijlstra  * condition test and condition change and wakeup are under the same lock) then
165a2250238SPeter Zijlstra  * use __set_current_state().
166a2250238SPeter Zijlstra  *
167a2250238SPeter Zijlstra  * The above is typically ordered against the wakeup, which does:
168a2250238SPeter Zijlstra  *
169a2250238SPeter Zijlstra  *   need_sleep = false;
170a2250238SPeter Zijlstra  *   wake_up_state(p, TASK_UNINTERRUPTIBLE);
171a2250238SPeter Zijlstra  *
1727696f991SAndrea Parri  * where wake_up_state() executes a full memory barrier before accessing the
1737696f991SAndrea Parri  * task state.
174a2250238SPeter Zijlstra  *
175a2250238SPeter Zijlstra  * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
176a2250238SPeter Zijlstra  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
177a2250238SPeter Zijlstra  * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
178a2250238SPeter Zijlstra  *
179b5bf9a90SPeter Zijlstra  * However, with slightly different timing the wakeup TASK_RUNNING store can
180dfcb245eSIngo Molnar  * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
181b5bf9a90SPeter Zijlstra  * a problem either because that will result in one extra go around the loop
182b5bf9a90SPeter Zijlstra  * and our @cond test will save the day.
183a2250238SPeter Zijlstra  *
184a2250238SPeter Zijlstra  * Also see the comments of try_to_wake_up().
185498d0c57SAndrew Morton  */
186b5bf9a90SPeter Zijlstra #define __set_current_state(state_value)				\
187b5bf9a90SPeter Zijlstra 	current->state = (state_value)
188b5bf9a90SPeter Zijlstra 
189b5bf9a90SPeter Zijlstra #define set_current_state(state_value)					\
190b5bf9a90SPeter Zijlstra 	smp_store_mb(current->state, (state_value))
191b5bf9a90SPeter Zijlstra 
192b5bf9a90SPeter Zijlstra /*
193b5bf9a90SPeter Zijlstra  * set_special_state() should be used for those states when the blocking task
194b5bf9a90SPeter Zijlstra  * can not use the regular condition based wait-loop. In that case we must
195b5bf9a90SPeter Zijlstra  * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
196b5bf9a90SPeter Zijlstra  * will not collide with our state change.
197b5bf9a90SPeter Zijlstra  */
198b5bf9a90SPeter Zijlstra #define set_special_state(state_value)					\
199b5bf9a90SPeter Zijlstra 	do {								\
200b5bf9a90SPeter Zijlstra 		unsigned long flags; /* may shadow */			\
201b5bf9a90SPeter Zijlstra 		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
202b5bf9a90SPeter Zijlstra 		current->state = (state_value);				\
203b5bf9a90SPeter Zijlstra 		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
204b5bf9a90SPeter Zijlstra 	} while (0)
205b5bf9a90SPeter Zijlstra 
2068eb23b9fSPeter Zijlstra #endif
2078eb23b9fSPeter Zijlstra 
2085eca1c10SIngo Molnar /* Task command name length: */
2091da177e4SLinus Torvalds #define TASK_COMM_LEN			16
2101da177e4SLinus Torvalds 
2111da177e4SLinus Torvalds extern void scheduler_tick(void);
2121da177e4SLinus Torvalds 
2131da177e4SLinus Torvalds #define	MAX_SCHEDULE_TIMEOUT		LONG_MAX
2145eca1c10SIngo Molnar 
2155eca1c10SIngo Molnar extern long schedule_timeout(long timeout);
2165eca1c10SIngo Molnar extern long schedule_timeout_interruptible(long timeout);
2175eca1c10SIngo Molnar extern long schedule_timeout_killable(long timeout);
2185eca1c10SIngo Molnar extern long schedule_timeout_uninterruptible(long timeout);
2195eca1c10SIngo Molnar extern long schedule_timeout_idle(long timeout);
2201da177e4SLinus Torvalds asmlinkage void schedule(void);
221c5491ea7SThomas Gleixner extern void schedule_preempt_disabled(void);
2221da177e4SLinus Torvalds 
22310ab5643STejun Heo extern int __must_check io_schedule_prepare(void);
22410ab5643STejun Heo extern void io_schedule_finish(int token);
2259cff8adeSNeilBrown extern long io_schedule_timeout(long timeout);
22610ab5643STejun Heo extern void io_schedule(void);
2279cff8adeSNeilBrown 
228f06febc9SFrank Mayhar /**
2290ba42a59SMasanari Iida  * struct prev_cputime - snapshot of system and user cputime
230d37f761dSFrederic Weisbecker  * @utime: time spent in user mode
231d37f761dSFrederic Weisbecker  * @stime: time spent in system mode
2329d7fb042SPeter Zijlstra  * @lock: protects the above two fields
233d37f761dSFrederic Weisbecker  *
2349d7fb042SPeter Zijlstra  * Stores previous user/system time values such that we can guarantee
2359d7fb042SPeter Zijlstra  * monotonicity.
236d37f761dSFrederic Weisbecker  */
2379d7fb042SPeter Zijlstra struct prev_cputime {
2389d7fb042SPeter Zijlstra #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
2395613fda9SFrederic Weisbecker 	u64				utime;
2405613fda9SFrederic Weisbecker 	u64				stime;
2419d7fb042SPeter Zijlstra 	raw_spinlock_t			lock;
2429d7fb042SPeter Zijlstra #endif
243d37f761dSFrederic Weisbecker };
244d37f761dSFrederic Weisbecker 
245d37f761dSFrederic Weisbecker /**
246f06febc9SFrank Mayhar  * struct task_cputime - collected CPU time counts
2475613fda9SFrederic Weisbecker  * @utime:		time spent in user mode, in nanoseconds
2485613fda9SFrederic Weisbecker  * @stime:		time spent in kernel mode, in nanoseconds
249f06febc9SFrank Mayhar  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
250f06febc9SFrank Mayhar  *
2519d7fb042SPeter Zijlstra  * This structure groups together three kinds of CPU time that are tracked for
2529d7fb042SPeter Zijlstra  * threads and thread groups.  Most things considering CPU time want to group
2539d7fb042SPeter Zijlstra  * these counts together and treat all three of them in parallel.
254f06febc9SFrank Mayhar  */
255f06febc9SFrank Mayhar struct task_cputime {
2565613fda9SFrederic Weisbecker 	u64				utime;
2575613fda9SFrederic Weisbecker 	u64				stime;
258f06febc9SFrank Mayhar 	unsigned long long		sum_exec_runtime;
259f06febc9SFrank Mayhar };
2609d7fb042SPeter Zijlstra 
2615eca1c10SIngo Molnar /* Alternate field names when used on cache expirations: */
262f06febc9SFrank Mayhar #define virt_exp			utime
2639d7fb042SPeter Zijlstra #define prof_exp			stime
264f06febc9SFrank Mayhar #define sched_exp			sum_exec_runtime
265f06febc9SFrank Mayhar 
266bac5b6b6SFrederic Weisbecker enum vtime_state {
267bac5b6b6SFrederic Weisbecker 	/* Task is sleeping or running in a CPU with VTIME inactive: */
268bac5b6b6SFrederic Weisbecker 	VTIME_INACTIVE = 0,
269bac5b6b6SFrederic Weisbecker 	/* Task runs in userspace in a CPU with VTIME active: */
270bac5b6b6SFrederic Weisbecker 	VTIME_USER,
271bac5b6b6SFrederic Weisbecker 	/* Task runs in kernelspace in a CPU with VTIME active: */
272bac5b6b6SFrederic Weisbecker 	VTIME_SYS,
273bac5b6b6SFrederic Weisbecker };
274bac5b6b6SFrederic Weisbecker 
275bac5b6b6SFrederic Weisbecker struct vtime {
276bac5b6b6SFrederic Weisbecker 	seqcount_t		seqcount;
277bac5b6b6SFrederic Weisbecker 	unsigned long long	starttime;
278bac5b6b6SFrederic Weisbecker 	enum vtime_state	state;
2792a42eb95SWanpeng Li 	u64			utime;
2802a42eb95SWanpeng Li 	u64			stime;
2812a42eb95SWanpeng Li 	u64			gtime;
282bac5b6b6SFrederic Weisbecker };
283bac5b6b6SFrederic Weisbecker 
2841da177e4SLinus Torvalds struct sched_info {
2857f5f8e8dSIngo Molnar #ifdef CONFIG_SCHED_INFO
2865eca1c10SIngo Molnar 	/* Cumulative counters: */
2871da177e4SLinus Torvalds 
2885eca1c10SIngo Molnar 	/* # of times we have run on this CPU: */
2895eca1c10SIngo Molnar 	unsigned long			pcount;
2905eca1c10SIngo Molnar 
2915eca1c10SIngo Molnar 	/* Time spent waiting on a runqueue: */
2925eca1c10SIngo Molnar 	unsigned long long		run_delay;
2935eca1c10SIngo Molnar 
2945eca1c10SIngo Molnar 	/* Timestamps: */
2955eca1c10SIngo Molnar 
2965eca1c10SIngo Molnar 	/* When did we last run on a CPU? */
2975eca1c10SIngo Molnar 	unsigned long long		last_arrival;
2985eca1c10SIngo Molnar 
2995eca1c10SIngo Molnar 	/* When were we last queued to run? */
3005eca1c10SIngo Molnar 	unsigned long long		last_queued;
3015eca1c10SIngo Molnar 
302f6db8347SNaveen N. Rao #endif /* CONFIG_SCHED_INFO */
3037f5f8e8dSIngo Molnar };
3041da177e4SLinus Torvalds 
3051da177e4SLinus Torvalds /*
3066ecdd749SYuyang Du  * Integer metrics need fixed point arithmetic, e.g., sched/fair
3076ecdd749SYuyang Du  * has a few: load, load_avg, util_avg, freq, and capacity.
3086ecdd749SYuyang Du  *
3096ecdd749SYuyang Du  * We define a basic fixed point arithmetic range, and then formalize
3106ecdd749SYuyang Du  * all these metrics based on that basic range.
3116ecdd749SYuyang Du  */
3126ecdd749SYuyang Du # define SCHED_FIXEDPOINT_SHIFT		10
3136ecdd749SYuyang Du # define SCHED_FIXEDPOINT_SCALE		(1L << SCHED_FIXEDPOINT_SHIFT)
3146ecdd749SYuyang Du 
31520b8a59fSIngo Molnar struct load_weight {
3169dbdb155SPeter Zijlstra 	unsigned long			weight;
3179dbdb155SPeter Zijlstra 	u32				inv_weight;
31820b8a59fSIngo Molnar };
31920b8a59fSIngo Molnar 
3207f65ea42SPatrick Bellasi /**
3217f65ea42SPatrick Bellasi  * struct util_est - Estimation utilization of FAIR tasks
3227f65ea42SPatrick Bellasi  * @enqueued: instantaneous estimated utilization of a task/cpu
3237f65ea42SPatrick Bellasi  * @ewma:     the Exponential Weighted Moving Average (EWMA)
3247f65ea42SPatrick Bellasi  *            utilization of a task
3257f65ea42SPatrick Bellasi  *
3267f65ea42SPatrick Bellasi  * Support data structure to track an Exponential Weighted Moving Average
3277f65ea42SPatrick Bellasi  * (EWMA) of a FAIR task's utilization. New samples are added to the moving
3287f65ea42SPatrick Bellasi  * average each time a task completes an activation. Sample's weight is chosen
3297f65ea42SPatrick Bellasi  * so that the EWMA will be relatively insensitive to transient changes to the
3307f65ea42SPatrick Bellasi  * task's workload.
3317f65ea42SPatrick Bellasi  *
3327f65ea42SPatrick Bellasi  * The enqueued attribute has a slightly different meaning for tasks and cpus:
3337f65ea42SPatrick Bellasi  * - task:   the task's util_avg at last task dequeue time
3347f65ea42SPatrick Bellasi  * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
3357f65ea42SPatrick Bellasi  * Thus, the util_est.enqueued of a task represents the contribution on the
3367f65ea42SPatrick Bellasi  * estimated utilization of the CPU where that task is currently enqueued.
3377f65ea42SPatrick Bellasi  *
3387f65ea42SPatrick Bellasi  * Only for tasks we track a moving average of the past instantaneous
3397f65ea42SPatrick Bellasi  * estimated utilization. This allows to absorb sporadic drops in utilization
3407f65ea42SPatrick Bellasi  * of an otherwise almost periodic task.
3417f65ea42SPatrick Bellasi  */
3427f65ea42SPatrick Bellasi struct util_est {
3437f65ea42SPatrick Bellasi 	unsigned int			enqueued;
3447f65ea42SPatrick Bellasi 	unsigned int			ewma;
3457f65ea42SPatrick Bellasi #define UTIL_EST_WEIGHT_SHIFT		2
346317d359dSPeter Zijlstra } __attribute__((__aligned__(sizeof(u64))));
3477f65ea42SPatrick Bellasi 
3489d89c257SYuyang Du /*
3497b595334SYuyang Du  * The load_avg/util_avg accumulates an infinite geometric series
3507b595334SYuyang Du  * (see __update_load_avg() in kernel/sched/fair.c).
3517b595334SYuyang Du  *
3527b595334SYuyang Du  * [load_avg definition]
3537b595334SYuyang Du  *
3547b595334SYuyang Du  *   load_avg = runnable% * scale_load_down(load)
3557b595334SYuyang Du  *
3567b595334SYuyang Du  * where runnable% is the time ratio that a sched_entity is runnable.
3577b595334SYuyang Du  * For cfs_rq, it is the aggregated load_avg of all runnable and
3589d89c257SYuyang Du  * blocked sched_entities.
3597b595334SYuyang Du  *
3607b595334SYuyang Du  * load_avg may also take frequency scaling into account:
3617b595334SYuyang Du  *
3627b595334SYuyang Du  *   load_avg = runnable% * scale_load_down(load) * freq%
3637b595334SYuyang Du  *
3647b595334SYuyang Du  * where freq% is the CPU frequency normalized to the highest frequency.
3657b595334SYuyang Du  *
3667b595334SYuyang Du  * [util_avg definition]
3677b595334SYuyang Du  *
3687b595334SYuyang Du  *   util_avg = running% * SCHED_CAPACITY_SCALE
3697b595334SYuyang Du  *
3707b595334SYuyang Du  * where running% is the time ratio that a sched_entity is running on
3717b595334SYuyang Du  * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
3727b595334SYuyang Du  * and blocked sched_entities.
3737b595334SYuyang Du  *
3747b595334SYuyang Du  * util_avg may also factor frequency scaling and CPU capacity scaling:
3757b595334SYuyang Du  *
3767b595334SYuyang Du  *   util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
3777b595334SYuyang Du  *
3787b595334SYuyang Du  * where freq% is the same as above, and capacity% is the CPU capacity
3797b595334SYuyang Du  * normalized to the greatest capacity (due to uarch differences, etc).
3807b595334SYuyang Du  *
3817b595334SYuyang Du  * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
3827b595334SYuyang Du  * themselves are in the range of [0, 1]. To do fixed point arithmetics,
3837b595334SYuyang Du  * we therefore scale them to as large a range as necessary. This is for
3847b595334SYuyang Du  * example reflected by util_avg's SCHED_CAPACITY_SCALE.
3857b595334SYuyang Du  *
3867b595334SYuyang Du  * [Overflow issue]
3877b595334SYuyang Du  *
3887b595334SYuyang Du  * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
3897b595334SYuyang Du  * with the highest load (=88761), always runnable on a single cfs_rq,
3907b595334SYuyang Du  * and should not overflow as the number already hits PID_MAX_LIMIT.
3917b595334SYuyang Du  *
3927b595334SYuyang Du  * For all other cases (including 32-bit kernels), struct load_weight's
3937b595334SYuyang Du  * weight will overflow first before we do, because:
3947b595334SYuyang Du  *
3957b595334SYuyang Du  *    Max(load_avg) <= Max(load.weight)
3967b595334SYuyang Du  *
3977b595334SYuyang Du  * Then it is the load_weight's responsibility to consider overflow
3987b595334SYuyang Du  * issues.
3999d89c257SYuyang Du  */
4009d85f21cSPaul Turner struct sched_avg {
4015eca1c10SIngo Molnar 	u64				last_update_time;
4025eca1c10SIngo Molnar 	u64				load_sum;
4031ea6c46aSPeter Zijlstra 	u64				runnable_load_sum;
4045eca1c10SIngo Molnar 	u32				util_sum;
4055eca1c10SIngo Molnar 	u32				period_contrib;
4065eca1c10SIngo Molnar 	unsigned long			load_avg;
4071ea6c46aSPeter Zijlstra 	unsigned long			runnable_load_avg;
4085eca1c10SIngo Molnar 	unsigned long			util_avg;
4097f65ea42SPatrick Bellasi 	struct util_est			util_est;
410317d359dSPeter Zijlstra } ____cacheline_aligned;
4119d85f21cSPaul Turner 
41241acab88SLucas De Marchi struct sched_statistics {
4137f5f8e8dSIngo Molnar #ifdef CONFIG_SCHEDSTATS
41494c18227SIngo Molnar 	u64				wait_start;
41594c18227SIngo Molnar 	u64				wait_max;
4166d082592SArjan van de Ven 	u64				wait_count;
4176d082592SArjan van de Ven 	u64				wait_sum;
4188f0dfc34SArjan van de Ven 	u64				iowait_count;
4198f0dfc34SArjan van de Ven 	u64				iowait_sum;
42094c18227SIngo Molnar 
42194c18227SIngo Molnar 	u64				sleep_start;
42220b8a59fSIngo Molnar 	u64				sleep_max;
42394c18227SIngo Molnar 	s64				sum_sleep_runtime;
42494c18227SIngo Molnar 
42594c18227SIngo Molnar 	u64				block_start;
42620b8a59fSIngo Molnar 	u64				block_max;
42720b8a59fSIngo Molnar 	u64				exec_max;
428eba1ed4bSIngo Molnar 	u64				slice_max;
429cc367732SIngo Molnar 
430cc367732SIngo Molnar 	u64				nr_migrations_cold;
431cc367732SIngo Molnar 	u64				nr_failed_migrations_affine;
432cc367732SIngo Molnar 	u64				nr_failed_migrations_running;
433cc367732SIngo Molnar 	u64				nr_failed_migrations_hot;
434cc367732SIngo Molnar 	u64				nr_forced_migrations;
435cc367732SIngo Molnar 
436cc367732SIngo Molnar 	u64				nr_wakeups;
437cc367732SIngo Molnar 	u64				nr_wakeups_sync;
438cc367732SIngo Molnar 	u64				nr_wakeups_migrate;
439cc367732SIngo Molnar 	u64				nr_wakeups_local;
440cc367732SIngo Molnar 	u64				nr_wakeups_remote;
441cc367732SIngo Molnar 	u64				nr_wakeups_affine;
442cc367732SIngo Molnar 	u64				nr_wakeups_affine_attempts;
443cc367732SIngo Molnar 	u64				nr_wakeups_passive;
444cc367732SIngo Molnar 	u64				nr_wakeups_idle;
44541acab88SLucas De Marchi #endif
4467f5f8e8dSIngo Molnar };
44741acab88SLucas De Marchi 
44841acab88SLucas De Marchi struct sched_entity {
4495eca1c10SIngo Molnar 	/* For load-balancing: */
4505eca1c10SIngo Molnar 	struct load_weight		load;
4511ea6c46aSPeter Zijlstra 	unsigned long			runnable_weight;
45241acab88SLucas De Marchi 	struct rb_node			run_node;
45341acab88SLucas De Marchi 	struct list_head		group_node;
45441acab88SLucas De Marchi 	unsigned int			on_rq;
45541acab88SLucas De Marchi 
45641acab88SLucas De Marchi 	u64				exec_start;
45741acab88SLucas De Marchi 	u64				sum_exec_runtime;
45841acab88SLucas De Marchi 	u64				vruntime;
45941acab88SLucas De Marchi 	u64				prev_sum_exec_runtime;
46041acab88SLucas De Marchi 
46141acab88SLucas De Marchi 	u64				nr_migrations;
46241acab88SLucas De Marchi 
46341acab88SLucas De Marchi 	struct sched_statistics		statistics;
46494c18227SIngo Molnar 
46520b8a59fSIngo Molnar #ifdef CONFIG_FAIR_GROUP_SCHED
466fed14d45SPeter Zijlstra 	int				depth;
46720b8a59fSIngo Molnar 	struct sched_entity		*parent;
46820b8a59fSIngo Molnar 	/* rq on which this entity is (to be) queued: */
46920b8a59fSIngo Molnar 	struct cfs_rq			*cfs_rq;
47020b8a59fSIngo Molnar 	/* rq "owned" by this entity/group: */
47120b8a59fSIngo Molnar 	struct cfs_rq			*my_q;
47220b8a59fSIngo Molnar #endif
4738bd75c77SClark Williams 
474141965c7SAlex Shi #ifdef CONFIG_SMP
4755a107804SJiri Olsa 	/*
4765a107804SJiri Olsa 	 * Per entity load average tracking.
4775a107804SJiri Olsa 	 *
4785a107804SJiri Olsa 	 * Put into separate cache line so it does not
4795a107804SJiri Olsa 	 * collide with read-mostly values above.
4805a107804SJiri Olsa 	 */
481317d359dSPeter Zijlstra 	struct sched_avg		avg;
4829d85f21cSPaul Turner #endif
48320b8a59fSIngo Molnar };
48470b97a7fSIngo Molnar 
485fa717060SPeter Zijlstra struct sched_rt_entity {
486fa717060SPeter Zijlstra 	struct list_head		run_list;
48778f2c7dbSPeter Zijlstra 	unsigned long			timeout;
48857d2aa00SYing Xue 	unsigned long			watchdog_stamp;
489bee367edSRichard Kennedy 	unsigned int			time_slice;
490ff77e468SPeter Zijlstra 	unsigned short			on_rq;
491ff77e468SPeter Zijlstra 	unsigned short			on_list;
4926f505b16SPeter Zijlstra 
49358d6c2d7SPeter Zijlstra 	struct sched_rt_entity		*back;
494052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
4956f505b16SPeter Zijlstra 	struct sched_rt_entity		*parent;
4966f505b16SPeter Zijlstra 	/* rq on which this entity is (to be) queued: */
4976f505b16SPeter Zijlstra 	struct rt_rq			*rt_rq;
4986f505b16SPeter Zijlstra 	/* rq "owned" by this entity/group: */
4996f505b16SPeter Zijlstra 	struct rt_rq			*my_q;
5006f505b16SPeter Zijlstra #endif
5013859a271SKees Cook } __randomize_layout;
502fa717060SPeter Zijlstra 
503aab03e05SDario Faggioli struct sched_dl_entity {
504aab03e05SDario Faggioli 	struct rb_node			rb_node;
505aab03e05SDario Faggioli 
506aab03e05SDario Faggioli 	/*
507aab03e05SDario Faggioli 	 * Original scheduling parameters. Copied here from sched_attr
5084027d080Sxiaofeng.yan 	 * during sched_setattr(), they will remain the same until
5094027d080Sxiaofeng.yan 	 * the next sched_setattr().
510aab03e05SDario Faggioli 	 */
5115eca1c10SIngo Molnar 	u64				dl_runtime;	/* Maximum runtime for each instance	*/
5125eca1c10SIngo Molnar 	u64				dl_deadline;	/* Relative deadline of each instance	*/
5135eca1c10SIngo Molnar 	u64				dl_period;	/* Separation of two instances (period) */
51454d6d303SDaniel Bristot de Oliveira 	u64				dl_bw;		/* dl_runtime / dl_period		*/
5153effcb42SDaniel Bristot de Oliveira 	u64				dl_density;	/* dl_runtime / dl_deadline		*/
516aab03e05SDario Faggioli 
517aab03e05SDario Faggioli 	/*
518aab03e05SDario Faggioli 	 * Actual scheduling parameters. Initialized with the values above,
519dfcb245eSIngo Molnar 	 * they are continuously updated during task execution. Note that
520aab03e05SDario Faggioli 	 * the remaining runtime could be < 0 in case we are in overrun.
521aab03e05SDario Faggioli 	 */
5225eca1c10SIngo Molnar 	s64				runtime;	/* Remaining runtime for this instance	*/
5235eca1c10SIngo Molnar 	u64				deadline;	/* Absolute deadline for this instance	*/
5245eca1c10SIngo Molnar 	unsigned int			flags;		/* Specifying the scheduler behaviour	*/
525aab03e05SDario Faggioli 
526aab03e05SDario Faggioli 	/*
527aab03e05SDario Faggioli 	 * Some bool flags:
528aab03e05SDario Faggioli 	 *
529aab03e05SDario Faggioli 	 * @dl_throttled tells if we exhausted the runtime. If so, the
530aab03e05SDario Faggioli 	 * task has to wait for a replenishment to be performed at the
531aab03e05SDario Faggioli 	 * next firing of dl_timer.
532aab03e05SDario Faggioli 	 *
5332d3d891dSDario Faggioli 	 * @dl_boosted tells if we are boosted due to DI. If so we are
5342d3d891dSDario Faggioli 	 * outside bandwidth enforcement mechanism (but only until we
5355bfd126eSJuri Lelli 	 * exit the critical section);
5365bfd126eSJuri Lelli 	 *
5375eca1c10SIngo Molnar 	 * @dl_yielded tells if task gave up the CPU before consuming
5385bfd126eSJuri Lelli 	 * all its available runtime during the last job.
539209a0cbdSLuca Abeni 	 *
540209a0cbdSLuca Abeni 	 * @dl_non_contending tells if the task is inactive while still
541209a0cbdSLuca Abeni 	 * contributing to the active utilization. In other words, it
542209a0cbdSLuca Abeni 	 * indicates if the inactive timer has been armed and its handler
543209a0cbdSLuca Abeni 	 * has not been executed yet. This flag is useful to avoid race
544209a0cbdSLuca Abeni 	 * conditions between the inactive timer handler and the wakeup
545209a0cbdSLuca Abeni 	 * code.
54634be3930SJuri Lelli 	 *
54734be3930SJuri Lelli 	 * @dl_overrun tells if the task asked to be informed about runtime
54834be3930SJuri Lelli 	 * overruns.
549aab03e05SDario Faggioli 	 */
550aa5222e9SDan Carpenter 	unsigned int			dl_throttled      : 1;
551aa5222e9SDan Carpenter 	unsigned int			dl_boosted        : 1;
552aa5222e9SDan Carpenter 	unsigned int			dl_yielded        : 1;
553aa5222e9SDan Carpenter 	unsigned int			dl_non_contending : 1;
55434be3930SJuri Lelli 	unsigned int			dl_overrun	  : 1;
555aab03e05SDario Faggioli 
556aab03e05SDario Faggioli 	/*
557aab03e05SDario Faggioli 	 * Bandwidth enforcement timer. Each -deadline task has its
558aab03e05SDario Faggioli 	 * own bandwidth to be enforced, thus we need one timer per task.
559aab03e05SDario Faggioli 	 */
560aab03e05SDario Faggioli 	struct hrtimer			dl_timer;
561209a0cbdSLuca Abeni 
562209a0cbdSLuca Abeni 	/*
563209a0cbdSLuca Abeni 	 * Inactive timer, responsible for decreasing the active utilization
564209a0cbdSLuca Abeni 	 * at the "0-lag time". When a -deadline task blocks, it contributes
565209a0cbdSLuca Abeni 	 * to GRUB's active utilization until the "0-lag time", hence a
566209a0cbdSLuca Abeni 	 * timer is needed to decrease the active utilization at the correct
567209a0cbdSLuca Abeni 	 * time.
568209a0cbdSLuca Abeni 	 */
569209a0cbdSLuca Abeni 	struct hrtimer inactive_timer;
570aab03e05SDario Faggioli };
5718bd75c77SClark Williams 
5721d082fd0SPaul E. McKenney union rcu_special {
5731d082fd0SPaul E. McKenney 	struct {
5748203d6d0SPaul E. McKenney 		u8			blocked;
5758203d6d0SPaul E. McKenney 		u8			need_qs;
57605f41571SPaul E. McKenney 		u8			exp_hint; /* Hint for performance. */
57705f41571SPaul E. McKenney 		u8			pad; /* No garbage from compiler! */
5788203d6d0SPaul E. McKenney 	} b; /* Bits. */
57905f41571SPaul E. McKenney 	u32 s; /* Set of bits. */
5801d082fd0SPaul E. McKenney };
58186848966SPaul E. McKenney 
5828dc85d54SPeter Zijlstra enum perf_event_task_context {
5838dc85d54SPeter Zijlstra 	perf_invalid_context = -1,
5848dc85d54SPeter Zijlstra 	perf_hw_context = 0,
58589a1e187SPeter Zijlstra 	perf_sw_context,
5868dc85d54SPeter Zijlstra 	perf_nr_task_contexts,
5878dc85d54SPeter Zijlstra };
5888dc85d54SPeter Zijlstra 
589eb61baf6SIngo Molnar struct wake_q_node {
590eb61baf6SIngo Molnar 	struct wake_q_node *next;
591eb61baf6SIngo Molnar };
592eb61baf6SIngo Molnar 
5931da177e4SLinus Torvalds struct task_struct {
594c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
595c65eacbeSAndy Lutomirski 	/*
596c65eacbeSAndy Lutomirski 	 * For reasons of header soup (see current_thread_info()), this
597c65eacbeSAndy Lutomirski 	 * must be the first element of task_struct.
598c65eacbeSAndy Lutomirski 	 */
599c65eacbeSAndy Lutomirski 	struct thread_info		thread_info;
600c65eacbeSAndy Lutomirski #endif
6015eca1c10SIngo Molnar 	/* -1 unrunnable, 0 runnable, >0 stopped: */
6025eca1c10SIngo Molnar 	volatile long			state;
60329e48ce8SKees Cook 
60429e48ce8SKees Cook 	/*
60529e48ce8SKees Cook 	 * This begins the randomizable portion of task_struct. Only
60629e48ce8SKees Cook 	 * scheduling-critical items should be added above here.
60729e48ce8SKees Cook 	 */
60829e48ce8SKees Cook 	randomized_struct_fields_start
60929e48ce8SKees Cook 
610f7e4217bSRoman Zippel 	void				*stack;
611*ec1d2819SElena Reshetova 	refcount_t			usage;
6125eca1c10SIngo Molnar 	/* Per task flags (PF_*), defined further below: */
6135eca1c10SIngo Molnar 	unsigned int			flags;
61497dc32cdSWilliam Cohen 	unsigned int			ptrace;
6151da177e4SLinus Torvalds 
6162dd73a4fSPeter Williams #ifdef CONFIG_SMP
617fa14ff4aSPeter Zijlstra 	struct llist_node		wake_entry;
6183ca7a440SPeter Zijlstra 	int				on_cpu;
619c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
6205eca1c10SIngo Molnar 	/* Current CPU: */
6215eca1c10SIngo Molnar 	unsigned int			cpu;
622c65eacbeSAndy Lutomirski #endif
62363b0e9edSMike Galbraith 	unsigned int			wakee_flips;
62462470419SMichael Wang 	unsigned long			wakee_flip_decay_ts;
62563b0e9edSMike Galbraith 	struct task_struct		*last_wakee;
626ac66f547SPeter Zijlstra 
62732e839ddSMel Gorman 	/*
62832e839ddSMel Gorman 	 * recent_used_cpu is initially set as the last CPU used by a task
62932e839ddSMel Gorman 	 * that wakes affine another task. Waker/wakee relationships can
63032e839ddSMel Gorman 	 * push tasks around a CPU where each wakeup moves to the next one.
63132e839ddSMel Gorman 	 * Tracking a recently used CPU allows a quick search for a recently
63232e839ddSMel Gorman 	 * used CPU that may be idle.
63332e839ddSMel Gorman 	 */
63432e839ddSMel Gorman 	int				recent_used_cpu;
635ac66f547SPeter Zijlstra 	int				wake_cpu;
6364866cde0SNick Piggin #endif
637fd2f4419SPeter Zijlstra 	int				on_rq;
63850e645a8SIngo Molnar 
6395eca1c10SIngo Molnar 	int				prio;
6405eca1c10SIngo Molnar 	int				static_prio;
6415eca1c10SIngo Molnar 	int				normal_prio;
642c7aceabaSRichard Kennedy 	unsigned int			rt_priority;
6435eca1c10SIngo Molnar 
6445522d5d5SIngo Molnar 	const struct sched_class	*sched_class;
64520b8a59fSIngo Molnar 	struct sched_entity		se;
646fa717060SPeter Zijlstra 	struct sched_rt_entity		rt;
6478323f26cSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
6488323f26cSPeter Zijlstra 	struct task_group		*sched_task_group;
6498323f26cSPeter Zijlstra #endif
650aab03e05SDario Faggioli 	struct sched_dl_entity		dl;
6511da177e4SLinus Torvalds 
652e107be36SAvi Kivity #ifdef CONFIG_PREEMPT_NOTIFIERS
6535eca1c10SIngo Molnar 	/* List of struct preempt_notifier: */
654e107be36SAvi Kivity 	struct hlist_head		preempt_notifiers;
655e107be36SAvi Kivity #endif
656e107be36SAvi Kivity 
6576c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE
6582056a782SJens Axboe 	unsigned int			btrace_seq;
6596c5c9341SAlexey Dobriyan #endif
6601da177e4SLinus Torvalds 
66197dc32cdSWilliam Cohen 	unsigned int			policy;
66229baa747SPeter Zijlstra 	int				nr_cpus_allowed;
6631da177e4SLinus Torvalds 	cpumask_t			cpus_allowed;
6641da177e4SLinus Torvalds 
665a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
666e260be67SPaul E. McKenney 	int				rcu_read_lock_nesting;
6671d082fd0SPaul E. McKenney 	union rcu_special		rcu_read_unlock_special;
668f41d911fSPaul E. McKenney 	struct list_head		rcu_node_entry;
669a57eb940SPaul E. McKenney 	struct rcu_node			*rcu_blocked_node;
67028f6569aSPranith Kumar #endif /* #ifdef CONFIG_PREEMPT_RCU */
6715eca1c10SIngo Molnar 
6728315f422SPaul E. McKenney #ifdef CONFIG_TASKS_RCU
6738315f422SPaul E. McKenney 	unsigned long			rcu_tasks_nvcsw;
674ccdd29ffSPaul E. McKenney 	u8				rcu_tasks_holdout;
675ccdd29ffSPaul E. McKenney 	u8				rcu_tasks_idx;
676176f8f7aSPaul E. McKenney 	int				rcu_tasks_idle_cpu;
677ccdd29ffSPaul E. McKenney 	struct list_head		rcu_tasks_holdout_list;
6788315f422SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_RCU */
679e260be67SPaul E. McKenney 
6801da177e4SLinus Torvalds 	struct sched_info		sched_info;
6811da177e4SLinus Torvalds 
6821da177e4SLinus Torvalds 	struct list_head		tasks;
683806c09a7SDario Faggioli #ifdef CONFIG_SMP
684917b627dSGregory Haskins 	struct plist_node		pushable_tasks;
6851baca4ceSJuri Lelli 	struct rb_node			pushable_dl_tasks;
686806c09a7SDario Faggioli #endif
6871da177e4SLinus Torvalds 
6885eca1c10SIngo Molnar 	struct mm_struct		*mm;
6895eca1c10SIngo Molnar 	struct mm_struct		*active_mm;
690314ff785SIngo Molnar 
691314ff785SIngo Molnar 	/* Per-thread vma caching: */
692314ff785SIngo Molnar 	struct vmacache			vmacache;
693314ff785SIngo Molnar 
6945eca1c10SIngo Molnar #ifdef SPLIT_RSS_COUNTING
69534e55232SKAMEZAWA Hiroyuki 	struct task_rss_stat		rss_stat;
69634e55232SKAMEZAWA Hiroyuki #endif
69797dc32cdSWilliam Cohen 	int				exit_state;
6985eca1c10SIngo Molnar 	int				exit_code;
6995eca1c10SIngo Molnar 	int				exit_signal;
7005eca1c10SIngo Molnar 	/* The signal sent when the parent dies: */
7015eca1c10SIngo Molnar 	int				pdeath_signal;
7025eca1c10SIngo Molnar 	/* JOBCTL_*, siglock protected: */
7035eca1c10SIngo Molnar 	unsigned long			jobctl;
7049b89f6baSAndrei Epure 
7055eca1c10SIngo Molnar 	/* Used for emulating ABI behavior of previous Linux versions: */
70697dc32cdSWilliam Cohen 	unsigned int			personality;
7079b89f6baSAndrei Epure 
7085eca1c10SIngo Molnar 	/* Scheduler bits, serialized by scheduler locks: */
709ca94c442SLennart Poettering 	unsigned			sched_reset_on_fork:1;
710a8e4f2eaSPeter Zijlstra 	unsigned			sched_contributes_to_load:1;
711ff303e66SPeter Zijlstra 	unsigned			sched_migrated:1;
712b7e7ade3SPeter Zijlstra 	unsigned			sched_remote_wakeup:1;
713eb414681SJohannes Weiner #ifdef CONFIG_PSI
714eb414681SJohannes Weiner 	unsigned			sched_psi_wake_requeue:1;
715eb414681SJohannes Weiner #endif
716eb414681SJohannes Weiner 
7175eca1c10SIngo Molnar 	/* Force alignment to the next boundary: */
7185eca1c10SIngo Molnar 	unsigned			:0;
719be958bdcSPeter Zijlstra 
7205eca1c10SIngo Molnar 	/* Unserialized, strictly 'current' */
7215eca1c10SIngo Molnar 
7225eca1c10SIngo Molnar 	/* Bit to tell LSMs we're in execve(): */
7235eca1c10SIngo Molnar 	unsigned			in_execve:1;
724be958bdcSPeter Zijlstra 	unsigned			in_iowait:1;
7255eca1c10SIngo Molnar #ifndef TIF_RESTORE_SIGMASK
7267e781418SAndy Lutomirski 	unsigned			restore_sigmask:1;
7277e781418SAndy Lutomirski #endif
728626ebc41STejun Heo #ifdef CONFIG_MEMCG
72929ef680aSMichal Hocko 	unsigned			in_user_fault:1;
730127424c8SJohannes Weiner #endif
731ff303e66SPeter Zijlstra #ifdef CONFIG_COMPAT_BRK
732ff303e66SPeter Zijlstra 	unsigned			brk_randomized:1;
733ff303e66SPeter Zijlstra #endif
73477f88796STejun Heo #ifdef CONFIG_CGROUPS
73577f88796STejun Heo 	/* disallow userland-initiated cgroup migration */
73677f88796STejun Heo 	unsigned			no_cgroup_migration:1;
73777f88796STejun Heo #endif
738d09d8df3SJosef Bacik #ifdef CONFIG_BLK_CGROUP
739d09d8df3SJosef Bacik 	/* to be used once the psi infrastructure lands upstream. */
740d09d8df3SJosef Bacik 	unsigned			use_memdelay:1;
741d09d8df3SJosef Bacik #endif
7426f185c29SVladimir Davydov 
7439da3f2b7SJann Horn 	/*
7449da3f2b7SJann Horn 	 * May usercopy functions fault on kernel addresses?
7459da3f2b7SJann Horn 	 * This is not just a single bit because this can potentially nest.
7469da3f2b7SJann Horn 	 */
7479da3f2b7SJann Horn 	unsigned int			kernel_uaccess_faults_ok;
7489da3f2b7SJann Horn 
7495eca1c10SIngo Molnar 	unsigned long			atomic_flags; /* Flags requiring atomic access. */
7501d4457f9SKees Cook 
751f56141e3SAndy Lutomirski 	struct restart_block		restart_block;
752f56141e3SAndy Lutomirski 
7531da177e4SLinus Torvalds 	pid_t				pid;
7541da177e4SLinus Torvalds 	pid_t				tgid;
7550a425405SArjan van de Ven 
756050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR
7575eca1c10SIngo Molnar 	/* Canary value for the -fstack-protector GCC feature: */
7580a425405SArjan van de Ven 	unsigned long			stack_canary;
7591314562aSHiroshi Shimamoto #endif
7601da177e4SLinus Torvalds 	/*
7615eca1c10SIngo Molnar 	 * Pointers to the (original) parent process, youngest child, younger sibling,
7621da177e4SLinus Torvalds 	 * older sibling, respectively.  (p->father can be replaced with
763f470021aSRoland McGrath 	 * p->real_parent->pid)
7641da177e4SLinus Torvalds 	 */
7655eca1c10SIngo Molnar 
7665eca1c10SIngo Molnar 	/* Real parent process: */
7675eca1c10SIngo Molnar 	struct task_struct __rcu	*real_parent;
7685eca1c10SIngo Molnar 
7695eca1c10SIngo Molnar 	/* Recipient of SIGCHLD, wait4() reports: */
7705eca1c10SIngo Molnar 	struct task_struct __rcu	*parent;
7711da177e4SLinus Torvalds 
772f470021aSRoland McGrath 	/*
7735eca1c10SIngo Molnar 	 * Children/sibling form the list of natural children:
7745eca1c10SIngo Molnar 	 */
7755eca1c10SIngo Molnar 	struct list_head		children;
7765eca1c10SIngo Molnar 	struct list_head		sibling;
7775eca1c10SIngo Molnar 	struct task_struct		*group_leader;
7785eca1c10SIngo Molnar 
7795eca1c10SIngo Molnar 	/*
7805eca1c10SIngo Molnar 	 * 'ptraced' is the list of tasks this task is using ptrace() on.
7815eca1c10SIngo Molnar 	 *
782f470021aSRoland McGrath 	 * This includes both natural children and PTRACE_ATTACH targets.
7835eca1c10SIngo Molnar 	 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
784f470021aSRoland McGrath 	 */
785f470021aSRoland McGrath 	struct list_head		ptraced;
786f470021aSRoland McGrath 	struct list_head		ptrace_entry;
787f470021aSRoland McGrath 
7881da177e4SLinus Torvalds 	/* PID/PID hash table linkage. */
7892c470475SEric W. Biederman 	struct pid			*thread_pid;
7902c470475SEric W. Biederman 	struct hlist_node		pid_links[PIDTYPE_MAX];
79147e65328SOleg Nesterov 	struct list_head		thread_group;
7920c740d0aSOleg Nesterov 	struct list_head		thread_node;
7931da177e4SLinus Torvalds 
7945eca1c10SIngo Molnar 	struct completion		*vfork_done;
7951da177e4SLinus Torvalds 
7965eca1c10SIngo Molnar 	/* CLONE_CHILD_SETTID: */
7975eca1c10SIngo Molnar 	int __user			*set_child_tid;
7985eca1c10SIngo Molnar 
7995eca1c10SIngo Molnar 	/* CLONE_CHILD_CLEARTID: */
8005eca1c10SIngo Molnar 	int __user			*clear_child_tid;
8015eca1c10SIngo Molnar 
8025eca1c10SIngo Molnar 	u64				utime;
8035eca1c10SIngo Molnar 	u64				stime;
80440565b5aSStanislaw Gruszka #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
8055eca1c10SIngo Molnar 	u64				utimescaled;
8065eca1c10SIngo Molnar 	u64				stimescaled;
80740565b5aSStanislaw Gruszka #endif
80816a6d9beSFrederic Weisbecker 	u64				gtime;
8099d7fb042SPeter Zijlstra 	struct prev_cputime		prev_cputime;
8106a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
811bac5b6b6SFrederic Weisbecker 	struct vtime			vtime;
8126a61671bSFrederic Weisbecker #endif
813d027d45dSFrederic Weisbecker 
814d027d45dSFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL
815f009a7a7SFrederic Weisbecker 	atomic_t			tick_dep_mask;
816d027d45dSFrederic Weisbecker #endif
8175eca1c10SIngo Molnar 	/* Context switch counts: */
8185eca1c10SIngo Molnar 	unsigned long			nvcsw;
8195eca1c10SIngo Molnar 	unsigned long			nivcsw;
8205eca1c10SIngo Molnar 
8215eca1c10SIngo Molnar 	/* Monotonic time in nsecs: */
8225eca1c10SIngo Molnar 	u64				start_time;
8235eca1c10SIngo Molnar 
8245eca1c10SIngo Molnar 	/* Boot based time in nsecs: */
8255eca1c10SIngo Molnar 	u64				real_start_time;
8265eca1c10SIngo Molnar 
8275eca1c10SIngo Molnar 	/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
8285eca1c10SIngo Molnar 	unsigned long			min_flt;
8295eca1c10SIngo Molnar 	unsigned long			maj_flt;
8301da177e4SLinus Torvalds 
831b18b6a9cSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS
832f06febc9SFrank Mayhar 	struct task_cputime		cputime_expires;
8331da177e4SLinus Torvalds 	struct list_head		cpu_timers[3];
834b18b6a9cSNicolas Pitre #endif
8351da177e4SLinus Torvalds 
8365eca1c10SIngo Molnar 	/* Process credentials: */
8375eca1c10SIngo Molnar 
8385eca1c10SIngo Molnar 	/* Tracer's credentials at attach: */
8395eca1c10SIngo Molnar 	const struct cred __rcu		*ptracer_cred;
8405eca1c10SIngo Molnar 
8415eca1c10SIngo Molnar 	/* Objective and real subjective task credentials (COW): */
8425eca1c10SIngo Molnar 	const struct cred __rcu		*real_cred;
8435eca1c10SIngo Molnar 
8445eca1c10SIngo Molnar 	/* Effective (overridable) subjective task credentials (COW): */
8455eca1c10SIngo Molnar 	const struct cred __rcu		*cred;
8465eca1c10SIngo Molnar 
8475eca1c10SIngo Molnar 	/*
8485eca1c10SIngo Molnar 	 * executable name, excluding path.
8495eca1c10SIngo Molnar 	 *
8505eca1c10SIngo Molnar 	 * - normally initialized setup_new_exec()
8515eca1c10SIngo Molnar 	 * - access it with [gs]et_task_comm()
8525eca1c10SIngo Molnar 	 * - lock it with task_lock()
8535eca1c10SIngo Molnar 	 */
8545eca1c10SIngo Molnar 	char				comm[TASK_COMM_LEN];
8555eca1c10SIngo Molnar 
856756daf26SNeilBrown 	struct nameidata		*nameidata;
8575eca1c10SIngo Molnar 
8583d5b6fccSAlexey Dobriyan #ifdef CONFIG_SYSVIPC
8591da177e4SLinus Torvalds 	struct sysv_sem			sysvsem;
860ab602f79SJack Miller 	struct sysv_shm			sysvshm;
8613d5b6fccSAlexey Dobriyan #endif
862e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK
86382a1fcb9SIngo Molnar 	unsigned long			last_switch_count;
864a2e51445SDmitry Vyukov 	unsigned long			last_switch_time;
86582a1fcb9SIngo Molnar #endif
8665eca1c10SIngo Molnar 	/* Filesystem information: */
8671da177e4SLinus Torvalds 	struct fs_struct		*fs;
8685eca1c10SIngo Molnar 
8695eca1c10SIngo Molnar 	/* Open file information: */
8701da177e4SLinus Torvalds 	struct files_struct		*files;
8715eca1c10SIngo Molnar 
8725eca1c10SIngo Molnar 	/* Namespaces: */
873ab516013SSerge E. Hallyn 	struct nsproxy			*nsproxy;
8745eca1c10SIngo Molnar 
8755eca1c10SIngo Molnar 	/* Signal handlers: */
8761da177e4SLinus Torvalds 	struct signal_struct		*signal;
8771da177e4SLinus Torvalds 	struct sighand_struct		*sighand;
8785eca1c10SIngo Molnar 	sigset_t			blocked;
8795eca1c10SIngo Molnar 	sigset_t			real_blocked;
8805eca1c10SIngo Molnar 	/* Restored if set_restore_sigmask() was used: */
8815eca1c10SIngo Molnar 	sigset_t			saved_sigmask;
8821da177e4SLinus Torvalds 	struct sigpending		pending;
8831da177e4SLinus Torvalds 	unsigned long			sas_ss_sp;
8841da177e4SLinus Torvalds 	size_t				sas_ss_size;
8855eca1c10SIngo Molnar 	unsigned int			sas_ss_flags;
8862e01fabeSOleg Nesterov 
88767d12145SAl Viro 	struct callback_head		*task_works;
888e73f8959SOleg Nesterov 
8891da177e4SLinus Torvalds 	struct audit_context		*audit_context;
890bfef93a5SAl Viro #ifdef CONFIG_AUDITSYSCALL
891e1760bd5SEric W. Biederman 	kuid_t				loginuid;
8924746ec5bSEric Paris 	unsigned int			sessionid;
893bfef93a5SAl Viro #endif
894932ecebbSWill Drewry 	struct seccomp			seccomp;
8951da177e4SLinus Torvalds 
8965eca1c10SIngo Molnar 	/* Thread group tracking: */
8971da177e4SLinus Torvalds 	u32				parent_exec_id;
8981da177e4SLinus Torvalds 	u32				self_exec_id;
8995eca1c10SIngo Molnar 
9005eca1c10SIngo Molnar 	/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
9011da177e4SLinus Torvalds 	spinlock_t			alloc_lock;
9021da177e4SLinus Torvalds 
903b29739f9SIngo Molnar 	/* Protection of the PI data structures: */
9041d615482SThomas Gleixner 	raw_spinlock_t			pi_lock;
905b29739f9SIngo Molnar 
90676751049SPeter Zijlstra 	struct wake_q_node		wake_q;
90776751049SPeter Zijlstra 
90823f78d4aSIngo Molnar #ifdef CONFIG_RT_MUTEXES
9095eca1c10SIngo Molnar 	/* PI waiters blocked on a rt_mutex held by this task: */
910a23ba907SDavidlohr Bueso 	struct rb_root_cached		pi_waiters;
911e96a7705SXunlei Pang 	/* Updated under owner's pi_lock and rq lock */
912e96a7705SXunlei Pang 	struct task_struct		*pi_top_task;
9135eca1c10SIngo Molnar 	/* Deadlock detection and priority inheritance handling: */
91423f78d4aSIngo Molnar 	struct rt_mutex_waiter		*pi_blocked_on;
91523f78d4aSIngo Molnar #endif
91623f78d4aSIngo Molnar 
917408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES
9185eca1c10SIngo Molnar 	/* Mutex deadlock detection: */
919408894eeSIngo Molnar 	struct mutex_waiter		*blocked_on;
920408894eeSIngo Molnar #endif
9215eca1c10SIngo Molnar 
922de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS
923de30a2b3SIngo Molnar 	unsigned int			irq_events;
924de30a2b3SIngo Molnar 	unsigned long			hardirq_enable_ip;
925de30a2b3SIngo Molnar 	unsigned long			hardirq_disable_ip;
926fa1452e8SHiroshi Shimamoto 	unsigned int			hardirq_enable_event;
927de30a2b3SIngo Molnar 	unsigned int			hardirq_disable_event;
928fa1452e8SHiroshi Shimamoto 	int				hardirqs_enabled;
929de30a2b3SIngo Molnar 	int				hardirq_context;
930fa1452e8SHiroshi Shimamoto 	unsigned long			softirq_disable_ip;
931fa1452e8SHiroshi Shimamoto 	unsigned long			softirq_enable_ip;
932fa1452e8SHiroshi Shimamoto 	unsigned int			softirq_disable_event;
933fa1452e8SHiroshi Shimamoto 	unsigned int			softirq_enable_event;
934fa1452e8SHiroshi Shimamoto 	int				softirqs_enabled;
935de30a2b3SIngo Molnar 	int				softirq_context;
936de30a2b3SIngo Molnar #endif
9375eca1c10SIngo Molnar 
938fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP
939bdb9441eSPeter Zijlstra # define MAX_LOCK_DEPTH			48UL
940fbb9ce95SIngo Molnar 	u64				curr_chain_key;
941fbb9ce95SIngo Molnar 	int				lockdep_depth;
942fbb9ce95SIngo Molnar 	unsigned int			lockdep_recursion;
943c7aceabaSRichard Kennedy 	struct held_lock		held_locks[MAX_LOCK_DEPTH];
944fbb9ce95SIngo Molnar #endif
9455eca1c10SIngo Molnar 
946c6d30853SAndrey Ryabinin #ifdef CONFIG_UBSAN
947c6d30853SAndrey Ryabinin 	unsigned int			in_ubsan;
948c6d30853SAndrey Ryabinin #endif
949408894eeSIngo Molnar 
9505eca1c10SIngo Molnar 	/* Journalling filesystem info: */
9511da177e4SLinus Torvalds 	void				*journal_info;
9521da177e4SLinus Torvalds 
9535eca1c10SIngo Molnar 	/* Stacked block device info: */
954bddd87c7SAkinobu Mita 	struct bio_list			*bio_list;
955d89d8796SNeil Brown 
95673c10101SJens Axboe #ifdef CONFIG_BLOCK
9575eca1c10SIngo Molnar 	/* Stack plugging: */
95873c10101SJens Axboe 	struct blk_plug			*plug;
95973c10101SJens Axboe #endif
96073c10101SJens Axboe 
9615eca1c10SIngo Molnar 	/* VM state: */
9621da177e4SLinus Torvalds 	struct reclaim_state		*reclaim_state;
9631da177e4SLinus Torvalds 
9641da177e4SLinus Torvalds 	struct backing_dev_info		*backing_dev_info;
9651da177e4SLinus Torvalds 
9661da177e4SLinus Torvalds 	struct io_context		*io_context;
9671da177e4SLinus Torvalds 
9685eca1c10SIngo Molnar 	/* Ptrace state: */
9691da177e4SLinus Torvalds 	unsigned long			ptrace_message;
970ae7795bcSEric W. Biederman 	kernel_siginfo_t		*last_siginfo;
9715eca1c10SIngo Molnar 
9727c3ab738SAndrew Morton 	struct task_io_accounting	ioac;
973eb414681SJohannes Weiner #ifdef CONFIG_PSI
974eb414681SJohannes Weiner 	/* Pressure stall state */
975eb414681SJohannes Weiner 	unsigned int			psi_flags;
976eb414681SJohannes Weiner #endif
9775eca1c10SIngo Molnar #ifdef CONFIG_TASK_XACCT
9785eca1c10SIngo Molnar 	/* Accumulated RSS usage: */
9795eca1c10SIngo Molnar 	u64				acct_rss_mem1;
9805eca1c10SIngo Molnar 	/* Accumulated virtual memory usage: */
9815eca1c10SIngo Molnar 	u64				acct_vm_mem1;
9825eca1c10SIngo Molnar 	/* stime + utime since last update: */
9835eca1c10SIngo Molnar 	u64				acct_timexpd;
9841da177e4SLinus Torvalds #endif
9851da177e4SLinus Torvalds #ifdef CONFIG_CPUSETS
9865eca1c10SIngo Molnar 	/* Protected by ->alloc_lock: */
9875eca1c10SIngo Molnar 	nodemask_t			mems_allowed;
9885eca1c10SIngo Molnar 	/* Seqence number to catch updates: */
9895eca1c10SIngo Molnar 	seqcount_t			mems_allowed_seq;
990825a46afSPaul Jackson 	int				cpuset_mem_spread_rotor;
9916adef3ebSJack Steiner 	int				cpuset_slab_spread_rotor;
9921da177e4SLinus Torvalds #endif
993ddbcc7e8SPaul Menage #ifdef CONFIG_CGROUPS
9945eca1c10SIngo Molnar 	/* Control Group info protected by css_set_lock: */
9952c392b8cSArnd Bergmann 	struct css_set __rcu		*cgroups;
9965eca1c10SIngo Molnar 	/* cg_list protected by css_set_lock and tsk->alloc_lock: */
997817929ecSPaul Menage 	struct list_head		cg_list;
998ddbcc7e8SPaul Menage #endif
99990802938SBorislav Petkov #ifdef CONFIG_X86_RESCTRL
10000734ded1SVikas Shivappa 	u32				closid;
1001d6aaba61SVikas Shivappa 	u32				rmid;
1002e02737d5SFenghua Yu #endif
100342b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX
10040771dfefSIngo Molnar 	struct robust_list_head __user	*robust_list;
100534f192c6SIngo Molnar #ifdef CONFIG_COMPAT
100634f192c6SIngo Molnar 	struct compat_robust_list_head __user *compat_robust_list;
100734f192c6SIngo Molnar #endif
1008c87e2837SIngo Molnar 	struct list_head		pi_state_list;
1009c87e2837SIngo Molnar 	struct futex_pi_state		*pi_state_cache;
101042b2dd0aSAlexey Dobriyan #endif
1011cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
10128dc85d54SPeter Zijlstra 	struct perf_event_context	*perf_event_ctxp[perf_nr_task_contexts];
1013cdd6c482SIngo Molnar 	struct mutex			perf_event_mutex;
1014cdd6c482SIngo Molnar 	struct list_head		perf_event_list;
1015a63eaf34SPaul Mackerras #endif
10168f47b187SThomas Gleixner #ifdef CONFIG_DEBUG_PREEMPT
10178f47b187SThomas Gleixner 	unsigned long			preempt_disable_ip;
10188f47b187SThomas Gleixner #endif
1019c7aceabaSRichard Kennedy #ifdef CONFIG_NUMA
10205eca1c10SIngo Molnar 	/* Protected by alloc_lock: */
10215eca1c10SIngo Molnar 	struct mempolicy		*mempolicy;
102245816682SVlastimil Babka 	short				il_prev;
1023207205a2SEric Dumazet 	short				pref_node_fork;
1024c7aceabaSRichard Kennedy #endif
1025cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
1026cbee9f88SPeter Zijlstra 	int				numa_scan_seq;
1027cbee9f88SPeter Zijlstra 	unsigned int			numa_scan_period;
1028598f0ec0SMel Gorman 	unsigned int			numa_scan_period_max;
1029de1c9ce6SRik van Riel 	int				numa_preferred_nid;
10306b9a7460SMel Gorman 	unsigned long			numa_migrate_retry;
10315eca1c10SIngo Molnar 	/* Migration stamp: */
10325eca1c10SIngo Molnar 	u64				node_stamp;
10337e2703e6SRik van Riel 	u64				last_task_numa_placement;
10347e2703e6SRik van Riel 	u64				last_sum_exec_runtime;
1035cbee9f88SPeter Zijlstra 	struct callback_head		numa_work;
1036f809ca9aSMel Gorman 
10378c8a743cSPeter Zijlstra 	struct numa_group		*numa_group;
10388c8a743cSPeter Zijlstra 
1039745d6147SMel Gorman 	/*
104044dba3d5SIulia Manda 	 * numa_faults is an array split into four regions:
104144dba3d5SIulia Manda 	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
104244dba3d5SIulia Manda 	 * in this precise order.
104344dba3d5SIulia Manda 	 *
104444dba3d5SIulia Manda 	 * faults_memory: Exponential decaying average of faults on a per-node
104544dba3d5SIulia Manda 	 * basis. Scheduling placement decisions are made based on these
104644dba3d5SIulia Manda 	 * counts. The values remain static for the duration of a PTE scan.
104744dba3d5SIulia Manda 	 * faults_cpu: Track the nodes the process was running on when a NUMA
104844dba3d5SIulia Manda 	 * hinting fault was incurred.
104944dba3d5SIulia Manda 	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
105044dba3d5SIulia Manda 	 * during the current scan window. When the scan completes, the counts
105144dba3d5SIulia Manda 	 * in faults_memory and faults_cpu decay and these values are copied.
1052745d6147SMel Gorman 	 */
105344dba3d5SIulia Manda 	unsigned long			*numa_faults;
105483e1d2cdSMel Gorman 	unsigned long			total_numa_faults;
1055745d6147SMel Gorman 
1056745d6147SMel Gorman 	/*
105704bb2f94SRik van Riel 	 * numa_faults_locality tracks if faults recorded during the last
1058074c2381SMel Gorman 	 * scan window were remote/local or failed to migrate. The task scan
1059074c2381SMel Gorman 	 * period is adapted based on the locality of the faults with different
1060074c2381SMel Gorman 	 * weights depending on whether they were shared or private faults
106104bb2f94SRik van Riel 	 */
1062074c2381SMel Gorman 	unsigned long			numa_faults_locality[3];
106304bb2f94SRik van Riel 
1064b32e86b4SIngo Molnar 	unsigned long			numa_pages_migrated;
1065cbee9f88SPeter Zijlstra #endif /* CONFIG_NUMA_BALANCING */
1066cbee9f88SPeter Zijlstra 
1067d7822b1eSMathieu Desnoyers #ifdef CONFIG_RSEQ
1068d7822b1eSMathieu Desnoyers 	struct rseq __user *rseq;
1069d7822b1eSMathieu Desnoyers 	u32 rseq_len;
1070d7822b1eSMathieu Desnoyers 	u32 rseq_sig;
1071d7822b1eSMathieu Desnoyers 	/*
1072d7822b1eSMathieu Desnoyers 	 * RmW on rseq_event_mask must be performed atomically
1073d7822b1eSMathieu Desnoyers 	 * with respect to preemption.
1074d7822b1eSMathieu Desnoyers 	 */
1075d7822b1eSMathieu Desnoyers 	unsigned long rseq_event_mask;
1076d7822b1eSMathieu Desnoyers #endif
1077d7822b1eSMathieu Desnoyers 
107872b252aeSMel Gorman 	struct tlbflush_unmap_batch	tlb_ubc;
107972b252aeSMel Gorman 
1080e56d0903SIngo Molnar 	struct rcu_head			rcu;
1081b92ce558SJens Axboe 
10825eca1c10SIngo Molnar 	/* Cache last used pipe for splice(): */
1083b92ce558SJens Axboe 	struct pipe_inode_info		*splice_pipe;
10845640f768SEric Dumazet 
10855640f768SEric Dumazet 	struct page_frag		task_frag;
10865640f768SEric Dumazet 
1087ca74e92bSShailabh Nagar #ifdef CONFIG_TASK_DELAY_ACCT
1088ca74e92bSShailabh Nagar 	struct task_delay_info		*delays;
1089ca74e92bSShailabh Nagar #endif
109047913d4eSIngo Molnar 
1091f4f154fdSAkinobu Mita #ifdef CONFIG_FAULT_INJECTION
1092f4f154fdSAkinobu Mita 	int				make_it_fail;
10939049f2f6SAkinobu Mita 	unsigned int			fail_nth;
1094f4f154fdSAkinobu Mita #endif
10959d823e8fSWu Fengguang 	/*
10965eca1c10SIngo Molnar 	 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
10975eca1c10SIngo Molnar 	 * balance_dirty_pages() for a dirty throttling pause:
10989d823e8fSWu Fengguang 	 */
10999d823e8fSWu Fengguang 	int				nr_dirtied;
11009d823e8fSWu Fengguang 	int				nr_dirtied_pause;
11015eca1c10SIngo Molnar 	/* Start of a write-and-pause period: */
11025eca1c10SIngo Molnar 	unsigned long			dirty_paused_when;
11039d823e8fSWu Fengguang 
11049745512cSArjan van de Ven #ifdef CONFIG_LATENCYTOP
11059745512cSArjan van de Ven 	int				latency_record_count;
11069745512cSArjan van de Ven 	struct latency_record		latency_record[LT_SAVECOUNT];
11079745512cSArjan van de Ven #endif
11086976675dSArjan van de Ven 	/*
11095eca1c10SIngo Molnar 	 * Time slack values; these are used to round up poll() and
11106976675dSArjan van de Ven 	 * select() etc timeout values. These are in nanoseconds.
11116976675dSArjan van de Ven 	 */
1112da8b44d5SJohn Stultz 	u64				timer_slack_ns;
1113da8b44d5SJohn Stultz 	u64				default_timer_slack_ns;
1114f8d570a4SDavid Miller 
11150b24beccSAndrey Ryabinin #ifdef CONFIG_KASAN
11160b24beccSAndrey Ryabinin 	unsigned int			kasan_depth;
11170b24beccSAndrey Ryabinin #endif
11185eca1c10SIngo Molnar 
1119fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11205eca1c10SIngo Molnar 	/* Index of current stored address in ret_stack: */
1121f201ae23SFrederic Weisbecker 	int				curr_ret_stack;
112239eb456dSSteven Rostedt (VMware) 	int				curr_ret_depth;
11235eca1c10SIngo Molnar 
11245eca1c10SIngo Molnar 	/* Stack of return addresses for return function tracing: */
1125f201ae23SFrederic Weisbecker 	struct ftrace_ret_stack		*ret_stack;
11265eca1c10SIngo Molnar 
11275eca1c10SIngo Molnar 	/* Timestamp for last schedule: */
11288aef2d28SSteven Rostedt 	unsigned long long		ftrace_timestamp;
11295eca1c10SIngo Molnar 
1130f201ae23SFrederic Weisbecker 	/*
1131f201ae23SFrederic Weisbecker 	 * Number of functions that haven't been traced
11325eca1c10SIngo Molnar 	 * because of depth overrun:
1133f201ae23SFrederic Weisbecker 	 */
1134f201ae23SFrederic Weisbecker 	atomic_t			trace_overrun;
11355eca1c10SIngo Molnar 
11365eca1c10SIngo Molnar 	/* Pause tracing: */
1137380c4b14SFrederic Weisbecker 	atomic_t			tracing_graph_pause;
1138f201ae23SFrederic Weisbecker #endif
11395eca1c10SIngo Molnar 
1140ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING
11415eca1c10SIngo Molnar 	/* State flags for use by tracers: */
1142ea4e2bc4SSteven Rostedt 	unsigned long			trace;
11435eca1c10SIngo Molnar 
11445eca1c10SIngo Molnar 	/* Bitmask and counter of trace recursion: */
1145261842b7SSteven Rostedt 	unsigned long			trace_recursion;
1146261842b7SSteven Rostedt #endif /* CONFIG_TRACING */
11475eca1c10SIngo Molnar 
11485c9a8750SDmitry Vyukov #ifdef CONFIG_KCOV
11495eca1c10SIngo Molnar 	/* Coverage collection mode enabled for this task (0 if disabled): */
11500ed557aaSMark Rutland 	unsigned int			kcov_mode;
11515eca1c10SIngo Molnar 
11525eca1c10SIngo Molnar 	/* Size of the kcov_area: */
11535eca1c10SIngo Molnar 	unsigned int			kcov_size;
11545eca1c10SIngo Molnar 
11555eca1c10SIngo Molnar 	/* Buffer for coverage collection: */
11565c9a8750SDmitry Vyukov 	void				*kcov_area;
11575eca1c10SIngo Molnar 
11585eca1c10SIngo Molnar 	/* KCOV descriptor wired with this task or NULL: */
11595c9a8750SDmitry Vyukov 	struct kcov			*kcov;
11605c9a8750SDmitry Vyukov #endif
11615eca1c10SIngo Molnar 
11626f185c29SVladimir Davydov #ifdef CONFIG_MEMCG
1163626ebc41STejun Heo 	struct mem_cgroup		*memcg_in_oom;
1164626ebc41STejun Heo 	gfp_t				memcg_oom_gfp_mask;
1165626ebc41STejun Heo 	int				memcg_oom_order;
1166b23afb93STejun Heo 
11675eca1c10SIngo Molnar 	/* Number of pages to reclaim on returning to userland: */
1168b23afb93STejun Heo 	unsigned int			memcg_nr_pages_over_high;
1169d46eb14bSShakeel Butt 
1170d46eb14bSShakeel Butt 	/* Used by memcontrol for targeted memcg charge: */
1171d46eb14bSShakeel Butt 	struct mem_cgroup		*active_memcg;
1172569b846dSKAMEZAWA Hiroyuki #endif
11735eca1c10SIngo Molnar 
1174d09d8df3SJosef Bacik #ifdef CONFIG_BLK_CGROUP
1175d09d8df3SJosef Bacik 	struct request_queue		*throttle_queue;
1176d09d8df3SJosef Bacik #endif
1177d09d8df3SJosef Bacik 
11780326f5a9SSrikar Dronamraju #ifdef CONFIG_UPROBES
11790326f5a9SSrikar Dronamraju 	struct uprobe_task		*utask;
11800326f5a9SSrikar Dronamraju #endif
1181cafe5635SKent Overstreet #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1182cafe5635SKent Overstreet 	unsigned int			sequential_io;
1183cafe5635SKent Overstreet 	unsigned int			sequential_io_avg;
1184cafe5635SKent Overstreet #endif
11858eb23b9fSPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
11868eb23b9fSPeter Zijlstra 	unsigned long			task_state_change;
11878eb23b9fSPeter Zijlstra #endif
11888bcbde54SDavid Hildenbrand 	int				pagefault_disabled;
118903049269SMichal Hocko #ifdef CONFIG_MMU
119029c696e1SVladimir Davydov 	struct task_struct		*oom_reaper_list;
119103049269SMichal Hocko #endif
1192ba14a194SAndy Lutomirski #ifdef CONFIG_VMAP_STACK
1193ba14a194SAndy Lutomirski 	struct vm_struct		*stack_vm_area;
1194ba14a194SAndy Lutomirski #endif
119568f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
11965eca1c10SIngo Molnar 	/* A live task holds one reference: */
119768f24b08SAndy Lutomirski 	atomic_t			stack_refcount;
119868f24b08SAndy Lutomirski #endif
1199d83a7cb3SJosh Poimboeuf #ifdef CONFIG_LIVEPATCH
1200d83a7cb3SJosh Poimboeuf 	int patch_state;
1201d83a7cb3SJosh Poimboeuf #endif
1202e4e55b47STetsuo Handa #ifdef CONFIG_SECURITY
1203e4e55b47STetsuo Handa 	/* Used by LSM modules for access restriction: */
1204e4e55b47STetsuo Handa 	void				*security;
1205e4e55b47STetsuo Handa #endif
120629e48ce8SKees Cook 
1207afaef01cSAlexander Popov #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1208afaef01cSAlexander Popov 	unsigned long			lowest_stack;
1209c8d12627SAlexander Popov 	unsigned long			prev_lowest_stack;
1210afaef01cSAlexander Popov #endif
1211afaef01cSAlexander Popov 
121229e48ce8SKees Cook 	/*
121329e48ce8SKees Cook 	 * New fields for task_struct should be added above here, so that
121429e48ce8SKees Cook 	 * they are included in the randomized portion of task_struct.
121529e48ce8SKees Cook 	 */
121629e48ce8SKees Cook 	randomized_struct_fields_end
121729e48ce8SKees Cook 
12185eca1c10SIngo Molnar 	/* CPU-specific state of this task: */
12190c8c0f03SDave Hansen 	struct thread_struct		thread;
12205eca1c10SIngo Molnar 
12210c8c0f03SDave Hansen 	/*
12220c8c0f03SDave Hansen 	 * WARNING: on x86, 'thread_struct' contains a variable-sized
12230c8c0f03SDave Hansen 	 * structure.  It *MUST* be at the end of 'task_struct'.
12240c8c0f03SDave Hansen 	 *
12250c8c0f03SDave Hansen 	 * Do not put anything below here!
12260c8c0f03SDave Hansen 	 */
12271da177e4SLinus Torvalds };
12281da177e4SLinus Torvalds 
1229e868171aSAlexey Dobriyan static inline struct pid *task_pid(struct task_struct *task)
123022c935f4SEric W. Biederman {
12312c470475SEric W. Biederman 	return task->thread_pid;
123222c935f4SEric W. Biederman }
123322c935f4SEric W. Biederman 
12347af57294SPavel Emelyanov /*
12357af57294SPavel Emelyanov  * the helpers to get the task's different pids as they are seen
12367af57294SPavel Emelyanov  * from various namespaces
12377af57294SPavel Emelyanov  *
12387af57294SPavel Emelyanov  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
123944c4e1b2SEric W. Biederman  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
124044c4e1b2SEric W. Biederman  *                     current.
12417af57294SPavel Emelyanov  * task_xid_nr_ns()  : id seen from the ns specified;
12427af57294SPavel Emelyanov  *
12437af57294SPavel Emelyanov  * see also pid_nr() etc in include/linux/pid.h
12447af57294SPavel Emelyanov  */
12455eca1c10SIngo Molnar pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
12467af57294SPavel Emelyanov 
1247e868171aSAlexey Dobriyan static inline pid_t task_pid_nr(struct task_struct *tsk)
12487af57294SPavel Emelyanov {
12497af57294SPavel Emelyanov 	return tsk->pid;
12507af57294SPavel Emelyanov }
12517af57294SPavel Emelyanov 
12525eca1c10SIngo Molnar static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
125352ee2dfdSOleg Nesterov {
125452ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
125552ee2dfdSOleg Nesterov }
12567af57294SPavel Emelyanov 
12577af57294SPavel Emelyanov static inline pid_t task_pid_vnr(struct task_struct *tsk)
12587af57294SPavel Emelyanov {
125952ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
12607af57294SPavel Emelyanov }
12617af57294SPavel Emelyanov 
12627af57294SPavel Emelyanov 
1263e868171aSAlexey Dobriyan static inline pid_t task_tgid_nr(struct task_struct *tsk)
12647af57294SPavel Emelyanov {
12657af57294SPavel Emelyanov 	return tsk->tgid;
12667af57294SPavel Emelyanov }
12677af57294SPavel Emelyanov 
12685eca1c10SIngo Molnar /**
12695eca1c10SIngo Molnar  * pid_alive - check that a task structure is not stale
12705eca1c10SIngo Molnar  * @p: Task structure to be checked.
12715eca1c10SIngo Molnar  *
12725eca1c10SIngo Molnar  * Test if a process is not yet dead (at most zombie state)
12735eca1c10SIngo Molnar  * If pid_alive fails, then pointers within the task structure
12745eca1c10SIngo Molnar  * can be stale and must not be dereferenced.
12755eca1c10SIngo Molnar  *
12765eca1c10SIngo Molnar  * Return: 1 if the process is alive. 0 otherwise.
12775eca1c10SIngo Molnar  */
12785eca1c10SIngo Molnar static inline int pid_alive(const struct task_struct *p)
12795eca1c10SIngo Molnar {
12802c470475SEric W. Biederman 	return p->thread_pid != NULL;
12815eca1c10SIngo Molnar }
12827af57294SPavel Emelyanov 
12835eca1c10SIngo Molnar static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
12847af57294SPavel Emelyanov {
128552ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
12867af57294SPavel Emelyanov }
12877af57294SPavel Emelyanov 
12887af57294SPavel Emelyanov static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
12897af57294SPavel Emelyanov {
129052ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
12917af57294SPavel Emelyanov }
12927af57294SPavel Emelyanov 
12937af57294SPavel Emelyanov 
12945eca1c10SIngo Molnar static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
12957af57294SPavel Emelyanov {
129652ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
12977af57294SPavel Emelyanov }
12987af57294SPavel Emelyanov 
12997af57294SPavel Emelyanov static inline pid_t task_session_vnr(struct task_struct *tsk)
13007af57294SPavel Emelyanov {
130152ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
13027af57294SPavel Emelyanov }
13037af57294SPavel Emelyanov 
1304dd1c1f2fSOleg Nesterov static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1305dd1c1f2fSOleg Nesterov {
13066883f81aSEric W. Biederman 	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1307dd1c1f2fSOleg Nesterov }
1308dd1c1f2fSOleg Nesterov 
1309dd1c1f2fSOleg Nesterov static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1310dd1c1f2fSOleg Nesterov {
13116883f81aSEric W. Biederman 	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1312dd1c1f2fSOleg Nesterov }
1313dd1c1f2fSOleg Nesterov 
1314dd1c1f2fSOleg Nesterov static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1315dd1c1f2fSOleg Nesterov {
1316dd1c1f2fSOleg Nesterov 	pid_t pid = 0;
1317dd1c1f2fSOleg Nesterov 
1318dd1c1f2fSOleg Nesterov 	rcu_read_lock();
1319dd1c1f2fSOleg Nesterov 	if (pid_alive(tsk))
1320dd1c1f2fSOleg Nesterov 		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1321dd1c1f2fSOleg Nesterov 	rcu_read_unlock();
1322dd1c1f2fSOleg Nesterov 
1323dd1c1f2fSOleg Nesterov 	return pid;
1324dd1c1f2fSOleg Nesterov }
1325dd1c1f2fSOleg Nesterov 
1326dd1c1f2fSOleg Nesterov static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1327dd1c1f2fSOleg Nesterov {
1328dd1c1f2fSOleg Nesterov 	return task_ppid_nr_ns(tsk, &init_pid_ns);
1329dd1c1f2fSOleg Nesterov }
1330dd1c1f2fSOleg Nesterov 
13315eca1c10SIngo Molnar /* Obsolete, do not use: */
13321b0f7ffdSOleg Nesterov static inline pid_t task_pgrp_nr(struct task_struct *tsk)
13331b0f7ffdSOleg Nesterov {
13341b0f7ffdSOleg Nesterov 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
13351b0f7ffdSOleg Nesterov }
13367af57294SPavel Emelyanov 
133706eb6184SPeter Zijlstra #define TASK_REPORT_IDLE	(TASK_REPORT + 1)
133806eb6184SPeter Zijlstra #define TASK_REPORT_MAX		(TASK_REPORT_IDLE << 1)
133906eb6184SPeter Zijlstra 
13401d48b080SPeter Zijlstra static inline unsigned int task_state_index(struct task_struct *tsk)
134120435d84SXie XiuQi {
13421593baabSPeter Zijlstra 	unsigned int tsk_state = READ_ONCE(tsk->state);
13431593baabSPeter Zijlstra 	unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
134420435d84SXie XiuQi 
134506eb6184SPeter Zijlstra 	BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
134606eb6184SPeter Zijlstra 
134706eb6184SPeter Zijlstra 	if (tsk_state == TASK_IDLE)
134806eb6184SPeter Zijlstra 		state = TASK_REPORT_IDLE;
134906eb6184SPeter Zijlstra 
13501593baabSPeter Zijlstra 	return fls(state);
13511593baabSPeter Zijlstra }
135220435d84SXie XiuQi 
13531d48b080SPeter Zijlstra static inline char task_index_to_char(unsigned int state)
13541593baabSPeter Zijlstra {
13558ef9925bSPeter Zijlstra 	static const char state_char[] = "RSDTtXZPI";
13561593baabSPeter Zijlstra 
135706eb6184SPeter Zijlstra 	BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
13581593baabSPeter Zijlstra 
13591593baabSPeter Zijlstra 	return state_char[state];
13601593baabSPeter Zijlstra }
13611593baabSPeter Zijlstra 
13621593baabSPeter Zijlstra static inline char task_state_to_char(struct task_struct *tsk)
13631593baabSPeter Zijlstra {
13641d48b080SPeter Zijlstra 	return task_index_to_char(task_state_index(tsk));
136520435d84SXie XiuQi }
136620435d84SXie XiuQi 
13671da177e4SLinus Torvalds /**
1368570f5241SSergey Senozhatsky  * is_global_init - check if a task structure is init. Since init
1369570f5241SSergey Senozhatsky  * is free to have sub-threads we need to check tgid.
13703260259fSHenne  * @tsk: Task structure to be checked.
13713260259fSHenne  *
13723260259fSHenne  * Check if a task structure is the first user space task the kernel created.
1373e69f6186SYacine Belkadi  *
1374e69f6186SYacine Belkadi  * Return: 1 if the task structure is init. 0 otherwise.
1375f400e198SSukadev Bhattiprolu  */
1376e868171aSAlexey Dobriyan static inline int is_global_init(struct task_struct *tsk)
1377b461cc03SPavel Emelyanov {
1378570f5241SSergey Senozhatsky 	return task_tgid_nr(tsk) == 1;
1379b461cc03SPavel Emelyanov }
1380b460cbc5SSerge E. Hallyn 
13819ec52099SCedric Le Goater extern struct pid *cad_pid;
13829ec52099SCedric Le Goater 
13831da177e4SLinus Torvalds /*
13841da177e4SLinus Torvalds  * Per process flags
13851da177e4SLinus Torvalds  */
1386c1de45caSPeter Zijlstra #define PF_IDLE			0x00000002	/* I am an IDLE thread */
13875eca1c10SIngo Molnar #define PF_EXITING		0x00000004	/* Getting shut down */
13885eca1c10SIngo Molnar #define PF_EXITPIDONE		0x00000008	/* PI exit done on shut down */
138994886b84SLaurent Vivier #define PF_VCPU			0x00000010	/* I'm a virtual CPU */
139021aa9af0STejun Heo #define PF_WQ_WORKER		0x00000020	/* I'm a workqueue worker */
13915eca1c10SIngo Molnar #define PF_FORKNOEXEC		0x00000040	/* Forked but didn't exec */
13925eca1c10SIngo Molnar #define PF_MCE_PROCESS		0x00000080      /* Process policy on mce errors */
13935eca1c10SIngo Molnar #define PF_SUPERPRIV		0x00000100	/* Used super-user privileges */
13945eca1c10SIngo Molnar #define PF_DUMPCORE		0x00000200	/* Dumped core */
13955eca1c10SIngo Molnar #define PF_SIGNALED		0x00000400	/* Killed by a signal */
13961da177e4SLinus Torvalds #define PF_MEMALLOC		0x00000800	/* Allocating memory */
13975eca1c10SIngo Molnar #define PF_NPROC_EXCEEDED	0x00001000	/* set_user() noticed that RLIMIT_NPROC was exceeded */
13985eca1c10SIngo Molnar #define PF_USED_MATH		0x00002000	/* If unset the fpu must be initialized before use */
13995eca1c10SIngo Molnar #define PF_USED_ASYNC		0x00004000	/* Used async_schedule*(), used by module init */
14005eca1c10SIngo Molnar #define PF_NOFREEZE		0x00008000	/* This thread should not be frozen */
14015eca1c10SIngo Molnar #define PF_FROZEN		0x00010000	/* Frozen for system suspend */
14027dea19f9SMichal Hocko #define PF_KSWAPD		0x00020000	/* I am kswapd */
14037dea19f9SMichal Hocko #define PF_MEMALLOC_NOFS	0x00040000	/* All allocation requests will inherit GFP_NOFS */
14047dea19f9SMichal Hocko #define PF_MEMALLOC_NOIO	0x00080000	/* All allocation requests will inherit GFP_NOIO */
14051da177e4SLinus Torvalds #define PF_LESS_THROTTLE	0x00100000	/* Throttle me less: I clean memory */
1406246bb0b1SOleg Nesterov #define PF_KTHREAD		0x00200000	/* I am a kernel thread */
14075eca1c10SIngo Molnar #define PF_RANDOMIZE		0x00400000	/* Randomize virtual address space */
1408b31dc66aSJens Axboe #define PF_SWAPWRITE		0x00800000	/* Allowed to write to swap */
1409eb414681SJohannes Weiner #define PF_MEMSTALL		0x01000000	/* Stalled due to lack of memory */
141073ab1cb2STaehee Yoo #define PF_UMH			0x02000000	/* I'm an Usermodehelper process */
141114a40ffcSTejun Heo #define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
14124db96cf0SAndi Kleen #define PF_MCE_EARLY		0x08000000      /* Early kill for mce process policy */
141358a69cb4STejun Heo #define PF_FREEZER_SKIP		0x40000000	/* Freezer should not count it as freezable */
14145eca1c10SIngo Molnar #define PF_SUSPEND_TASK		0x80000000      /* This thread called freeze_processes() and should not be frozen */
14151da177e4SLinus Torvalds 
14161da177e4SLinus Torvalds /*
14171da177e4SLinus Torvalds  * Only the _current_ task can read/write to tsk->flags, but other
14181da177e4SLinus Torvalds  * tasks can access tsk->flags in readonly mode for example
14191da177e4SLinus Torvalds  * with tsk_used_math (like during threaded core dumping).
14201da177e4SLinus Torvalds  * There is however an exception to this rule during ptrace
14211da177e4SLinus Torvalds  * or during fork: the ptracer task is allowed to write to the
14221da177e4SLinus Torvalds  * child->flags of its traced child (same goes for fork, the parent
14231da177e4SLinus Torvalds  * can write to the child->flags), because we're guaranteed the
14241da177e4SLinus Torvalds  * child is not running and in turn not changing child->flags
14251da177e4SLinus Torvalds  * at the same time the parent does it.
14261da177e4SLinus Torvalds  */
14271da177e4SLinus Torvalds #define clear_stopped_child_used_math(child)	do { (child)->flags &= ~PF_USED_MATH; } while (0)
14281da177e4SLinus Torvalds #define set_stopped_child_used_math(child)	do { (child)->flags |= PF_USED_MATH; } while (0)
14291da177e4SLinus Torvalds #define clear_used_math()			clear_stopped_child_used_math(current)
14301da177e4SLinus Torvalds #define set_used_math()				set_stopped_child_used_math(current)
14315eca1c10SIngo Molnar 
14321da177e4SLinus Torvalds #define conditional_stopped_child_used_math(condition, child) \
14331da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
14345eca1c10SIngo Molnar 
14355eca1c10SIngo Molnar #define conditional_used_math(condition)	conditional_stopped_child_used_math(condition, current)
14365eca1c10SIngo Molnar 
14371da177e4SLinus Torvalds #define copy_to_stopped_child_used_math(child) \
14381da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
14395eca1c10SIngo Molnar 
14401da177e4SLinus Torvalds /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
14411da177e4SLinus Torvalds #define tsk_used_math(p)			((p)->flags & PF_USED_MATH)
14421da177e4SLinus Torvalds #define used_math()				tsk_used_math(current)
14431da177e4SLinus Torvalds 
144462ec05ddSThomas Gleixner static inline bool is_percpu_thread(void)
144562ec05ddSThomas Gleixner {
144662ec05ddSThomas Gleixner #ifdef CONFIG_SMP
144762ec05ddSThomas Gleixner 	return (current->flags & PF_NO_SETAFFINITY) &&
144862ec05ddSThomas Gleixner 		(current->nr_cpus_allowed  == 1);
144962ec05ddSThomas Gleixner #else
145062ec05ddSThomas Gleixner 	return true;
145162ec05ddSThomas Gleixner #endif
145262ec05ddSThomas Gleixner }
145362ec05ddSThomas Gleixner 
14541d4457f9SKees Cook /* Per-process atomic flags. */
1455a2b86f77SZefan Li #define PFA_NO_NEW_PRIVS		0	/* May not gain new privileges. */
14562ad654bcSZefan Li #define PFA_SPREAD_PAGE			1	/* Spread page cache over cpuset */
14572ad654bcSZefan Li #define PFA_SPREAD_SLAB			2	/* Spread some slab caches over cpuset */
1458356e4bffSThomas Gleixner #define PFA_SPEC_SSB_DISABLE		3	/* Speculative Store Bypass disabled */
1459356e4bffSThomas Gleixner #define PFA_SPEC_SSB_FORCE_DISABLE	4	/* Speculative Store Bypass force disabled*/
14609137bb27SThomas Gleixner #define PFA_SPEC_IB_DISABLE		5	/* Indirect branch speculation restricted */
14619137bb27SThomas Gleixner #define PFA_SPEC_IB_FORCE_DISABLE	6	/* Indirect branch speculation permanently restricted */
14621d4457f9SKees Cook 
1463e0e5070bSZefan Li #define TASK_PFA_TEST(name, func)					\
1464e0e5070bSZefan Li 	static inline bool task_##func(struct task_struct *p)		\
1465e0e5070bSZefan Li 	{ return test_bit(PFA_##name, &p->atomic_flags); }
14665eca1c10SIngo Molnar 
1467e0e5070bSZefan Li #define TASK_PFA_SET(name, func)					\
1468e0e5070bSZefan Li 	static inline void task_set_##func(struct task_struct *p)	\
1469e0e5070bSZefan Li 	{ set_bit(PFA_##name, &p->atomic_flags); }
14705eca1c10SIngo Molnar 
1471e0e5070bSZefan Li #define TASK_PFA_CLEAR(name, func)					\
1472e0e5070bSZefan Li 	static inline void task_clear_##func(struct task_struct *p)	\
1473e0e5070bSZefan Li 	{ clear_bit(PFA_##name, &p->atomic_flags); }
14741d4457f9SKees Cook 
1475e0e5070bSZefan Li TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1476e0e5070bSZefan Li TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
14771d4457f9SKees Cook 
14782ad654bcSZefan Li TASK_PFA_TEST(SPREAD_PAGE, spread_page)
14792ad654bcSZefan Li TASK_PFA_SET(SPREAD_PAGE, spread_page)
14802ad654bcSZefan Li TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
14812ad654bcSZefan Li 
14822ad654bcSZefan Li TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
14832ad654bcSZefan Li TASK_PFA_SET(SPREAD_SLAB, spread_slab)
14842ad654bcSZefan Li TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1485544b2c91STejun Heo 
1486356e4bffSThomas Gleixner TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1487356e4bffSThomas Gleixner TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1488356e4bffSThomas Gleixner TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1489356e4bffSThomas Gleixner 
1490356e4bffSThomas Gleixner TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1491356e4bffSThomas Gleixner TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1492356e4bffSThomas Gleixner 
14939137bb27SThomas Gleixner TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
14949137bb27SThomas Gleixner TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
14959137bb27SThomas Gleixner TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
14969137bb27SThomas Gleixner 
14979137bb27SThomas Gleixner TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
14989137bb27SThomas Gleixner TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
14999137bb27SThomas Gleixner 
15005eca1c10SIngo Molnar static inline void
1501717a94b5SNeilBrown current_restore_flags(unsigned long orig_flags, unsigned long flags)
1502907aed48SMel Gorman {
1503717a94b5SNeilBrown 	current->flags &= ~flags;
1504717a94b5SNeilBrown 	current->flags |= orig_flags & flags;
1505907aed48SMel Gorman }
1506907aed48SMel Gorman 
15075eca1c10SIngo Molnar extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
15085eca1c10SIngo Molnar extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
15091da177e4SLinus Torvalds #ifdef CONFIG_SMP
15105eca1c10SIngo Molnar extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
15115eca1c10SIngo Molnar extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
15121da177e4SLinus Torvalds #else
15135eca1c10SIngo Molnar static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
15141e1b6c51SKOSAKI Motohiro {
15151e1b6c51SKOSAKI Motohiro }
15165eca1c10SIngo Molnar static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
15171da177e4SLinus Torvalds {
151896f874e2SRusty Russell 	if (!cpumask_test_cpu(0, new_mask))
15191da177e4SLinus Torvalds 		return -EINVAL;
15201da177e4SLinus Torvalds 	return 0;
15211da177e4SLinus Torvalds }
15221da177e4SLinus Torvalds #endif
1523e0ad9556SRusty Russell 
15246d0d2878SChristian Borntraeger #ifndef cpu_relax_yield
15256d0d2878SChristian Borntraeger #define cpu_relax_yield() cpu_relax()
15266d0d2878SChristian Borntraeger #endif
15276d0d2878SChristian Borntraeger 
1528fa93384fSDan Carpenter extern int yield_to(struct task_struct *p, bool preempt);
152936c8b586SIngo Molnar extern void set_user_nice(struct task_struct *p, long nice);
153036c8b586SIngo Molnar extern int task_prio(const struct task_struct *p);
15315eca1c10SIngo Molnar 
1532d0ea0268SDongsheng Yang /**
1533d0ea0268SDongsheng Yang  * task_nice - return the nice value of a given task.
1534d0ea0268SDongsheng Yang  * @p: the task in question.
1535d0ea0268SDongsheng Yang  *
1536d0ea0268SDongsheng Yang  * Return: The nice value [ -20 ... 0 ... 19 ].
1537d0ea0268SDongsheng Yang  */
1538d0ea0268SDongsheng Yang static inline int task_nice(const struct task_struct *p)
1539d0ea0268SDongsheng Yang {
1540d0ea0268SDongsheng Yang 	return PRIO_TO_NICE((p)->static_prio);
1541d0ea0268SDongsheng Yang }
15425eca1c10SIngo Molnar 
154336c8b586SIngo Molnar extern int can_nice(const struct task_struct *p, const int nice);
154436c8b586SIngo Molnar extern int task_curr(const struct task_struct *p);
15451da177e4SLinus Torvalds extern int idle_cpu(int cpu);
1546943d355dSRohit Jain extern int available_idle_cpu(int cpu);
15475eca1c10SIngo Molnar extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
15485eca1c10SIngo Molnar extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
15495eca1c10SIngo Molnar extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1550794a56ebSJuri Lelli extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
155136c8b586SIngo Molnar extern struct task_struct *idle_task(int cpu);
15525eca1c10SIngo Molnar 
1553c4f30608SPaul E. McKenney /**
1554c4f30608SPaul E. McKenney  * is_idle_task - is the specified task an idle task?
1555fa757281SRandy Dunlap  * @p: the task in question.
1556e69f6186SYacine Belkadi  *
1557e69f6186SYacine Belkadi  * Return: 1 if @p is an idle task. 0 otherwise.
1558c4f30608SPaul E. McKenney  */
15597061ca3bSPaul E. McKenney static inline bool is_idle_task(const struct task_struct *p)
1560c4f30608SPaul E. McKenney {
1561c1de45caSPeter Zijlstra 	return !!(p->flags & PF_IDLE);
1562c4f30608SPaul E. McKenney }
15635eca1c10SIngo Molnar 
156436c8b586SIngo Molnar extern struct task_struct *curr_task(int cpu);
1565a458ae2eSPeter Zijlstra extern void ia64_set_curr_task(int cpu, struct task_struct *p);
15661da177e4SLinus Torvalds 
15671da177e4SLinus Torvalds void yield(void);
15681da177e4SLinus Torvalds 
15691da177e4SLinus Torvalds union thread_union {
15700500871fSDavid Howells #ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
15710500871fSDavid Howells 	struct task_struct task;
15720500871fSDavid Howells #endif
1573c65eacbeSAndy Lutomirski #ifndef CONFIG_THREAD_INFO_IN_TASK
15741da177e4SLinus Torvalds 	struct thread_info thread_info;
1575c65eacbeSAndy Lutomirski #endif
15761da177e4SLinus Torvalds 	unsigned long stack[THREAD_SIZE/sizeof(long)];
15771da177e4SLinus Torvalds };
15781da177e4SLinus Torvalds 
15790500871fSDavid Howells #ifndef CONFIG_THREAD_INFO_IN_TASK
15800500871fSDavid Howells extern struct thread_info init_thread_info;
15810500871fSDavid Howells #endif
15820500871fSDavid Howells 
15830500871fSDavid Howells extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
15840500871fSDavid Howells 
1585f3ac6067SIngo Molnar #ifdef CONFIG_THREAD_INFO_IN_TASK
1586f3ac6067SIngo Molnar static inline struct thread_info *task_thread_info(struct task_struct *task)
1587f3ac6067SIngo Molnar {
1588f3ac6067SIngo Molnar 	return &task->thread_info;
1589f3ac6067SIngo Molnar }
1590f3ac6067SIngo Molnar #elif !defined(__HAVE_THREAD_FUNCTIONS)
1591f3ac6067SIngo Molnar # define task_thread_info(task)	((struct thread_info *)(task)->stack)
1592f3ac6067SIngo Molnar #endif
1593f3ac6067SIngo Molnar 
1594198fe21bSPavel Emelyanov /*
1595198fe21bSPavel Emelyanov  * find a task by one of its numerical ids
1596198fe21bSPavel Emelyanov  *
1597198fe21bSPavel Emelyanov  * find_task_by_pid_ns():
1598198fe21bSPavel Emelyanov  *      finds a task by its pid in the specified namespace
1599228ebcbeSPavel Emelyanov  * find_task_by_vpid():
1600228ebcbeSPavel Emelyanov  *      finds a task by its virtual pid
1601198fe21bSPavel Emelyanov  *
1602e49859e7SPavel Emelyanov  * see also find_vpid() etc in include/linux/pid.h
1603198fe21bSPavel Emelyanov  */
1604198fe21bSPavel Emelyanov 
1605228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_vpid(pid_t nr);
16065eca1c10SIngo Molnar extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1607198fe21bSPavel Emelyanov 
16082ee08260SMike Rapoport /*
16092ee08260SMike Rapoport  * find a task by its virtual pid and get the task struct
16102ee08260SMike Rapoport  */
16112ee08260SMike Rapoport extern struct task_struct *find_get_task_by_vpid(pid_t nr);
16122ee08260SMike Rapoport 
1613b3c97528SHarvey Harrison extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1614b3c97528SHarvey Harrison extern int wake_up_process(struct task_struct *tsk);
16153e51e3edSSamir Bellabes extern void wake_up_new_task(struct task_struct *tsk);
16165eca1c10SIngo Molnar 
16171da177e4SLinus Torvalds #ifdef CONFIG_SMP
16181da177e4SLinus Torvalds extern void kick_process(struct task_struct *tsk);
16191da177e4SLinus Torvalds #else
16201da177e4SLinus Torvalds static inline void kick_process(struct task_struct *tsk) { }
16211da177e4SLinus Torvalds #endif
16221da177e4SLinus Torvalds 
162382b89778SAdrian Hunter extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
16245eca1c10SIngo Molnar 
162582b89778SAdrian Hunter static inline void set_task_comm(struct task_struct *tsk, const char *from)
162682b89778SAdrian Hunter {
162782b89778SAdrian Hunter 	__set_task_comm(tsk, from, false);
162882b89778SAdrian Hunter }
16295eca1c10SIngo Molnar 
16303756f640SArnd Bergmann extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
16313756f640SArnd Bergmann #define get_task_comm(buf, tsk) ({			\
16323756f640SArnd Bergmann 	BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN);	\
16333756f640SArnd Bergmann 	__get_task_comm(buf, sizeof(buf), tsk);		\
16343756f640SArnd Bergmann })
16351da177e4SLinus Torvalds 
16361da177e4SLinus Torvalds #ifdef CONFIG_SMP
1637317f3941SPeter Zijlstra void scheduler_ipi(void);
163885ba2d86SRoland McGrath extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
16391da177e4SLinus Torvalds #else
1640184748ccSPeter Zijlstra static inline void scheduler_ipi(void) { }
16415eca1c10SIngo Molnar static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
164285ba2d86SRoland McGrath {
164385ba2d86SRoland McGrath 	return 1;
164485ba2d86SRoland McGrath }
16451da177e4SLinus Torvalds #endif
16461da177e4SLinus Torvalds 
16475eca1c10SIngo Molnar /*
16485eca1c10SIngo Molnar  * Set thread flags in other task's structures.
16495eca1c10SIngo Molnar  * See asm/thread_info.h for TIF_xxxx flags available:
16501da177e4SLinus Torvalds  */
16511da177e4SLinus Torvalds static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
16521da177e4SLinus Torvalds {
1653a1261f54SAl Viro 	set_ti_thread_flag(task_thread_info(tsk), flag);
16541da177e4SLinus Torvalds }
16551da177e4SLinus Torvalds 
16561da177e4SLinus Torvalds static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
16571da177e4SLinus Torvalds {
1658a1261f54SAl Viro 	clear_ti_thread_flag(task_thread_info(tsk), flag);
16591da177e4SLinus Torvalds }
16601da177e4SLinus Torvalds 
166193ee37c2SDave Martin static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
166293ee37c2SDave Martin 					  bool value)
166393ee37c2SDave Martin {
166493ee37c2SDave Martin 	update_ti_thread_flag(task_thread_info(tsk), flag, value);
166593ee37c2SDave Martin }
166693ee37c2SDave Martin 
16671da177e4SLinus Torvalds static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
16681da177e4SLinus Torvalds {
1669a1261f54SAl Viro 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
16701da177e4SLinus Torvalds }
16711da177e4SLinus Torvalds 
16721da177e4SLinus Torvalds static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
16731da177e4SLinus Torvalds {
1674a1261f54SAl Viro 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
16751da177e4SLinus Torvalds }
16761da177e4SLinus Torvalds 
16771da177e4SLinus Torvalds static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
16781da177e4SLinus Torvalds {
1679a1261f54SAl Viro 	return test_ti_thread_flag(task_thread_info(tsk), flag);
16801da177e4SLinus Torvalds }
16811da177e4SLinus Torvalds 
16821da177e4SLinus Torvalds static inline void set_tsk_need_resched(struct task_struct *tsk)
16831da177e4SLinus Torvalds {
16841da177e4SLinus Torvalds 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
16851da177e4SLinus Torvalds }
16861da177e4SLinus Torvalds 
16871da177e4SLinus Torvalds static inline void clear_tsk_need_resched(struct task_struct *tsk)
16881da177e4SLinus Torvalds {
16891da177e4SLinus Torvalds 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
16901da177e4SLinus Torvalds }
16911da177e4SLinus Torvalds 
16928ae121acSGregory Haskins static inline int test_tsk_need_resched(struct task_struct *tsk)
16938ae121acSGregory Haskins {
16948ae121acSGregory Haskins 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
16958ae121acSGregory Haskins }
16968ae121acSGregory Haskins 
16971da177e4SLinus Torvalds /*
16981da177e4SLinus Torvalds  * cond_resched() and cond_resched_lock(): latency reduction via
16991da177e4SLinus Torvalds  * explicit rescheduling in places that are safe. The return
17001da177e4SLinus Torvalds  * value indicates whether a reschedule was done in fact.
17011da177e4SLinus Torvalds  * cond_resched_lock() will drop the spinlock before scheduling,
17021da177e4SLinus Torvalds  */
170335a773a0SPeter Zijlstra #ifndef CONFIG_PREEMPT
1704c3921ab7SLinus Torvalds extern int _cond_resched(void);
170535a773a0SPeter Zijlstra #else
170635a773a0SPeter Zijlstra static inline int _cond_resched(void) { return 0; }
170735a773a0SPeter Zijlstra #endif
17086f80bd98SFrederic Weisbecker 
1709613afbf8SFrederic Weisbecker #define cond_resched() ({			\
17103427445aSPeter Zijlstra 	___might_sleep(__FILE__, __LINE__, 0);	\
1711613afbf8SFrederic Weisbecker 	_cond_resched();			\
1712613afbf8SFrederic Weisbecker })
17136f80bd98SFrederic Weisbecker 
1714613afbf8SFrederic Weisbecker extern int __cond_resched_lock(spinlock_t *lock);
1715613afbf8SFrederic Weisbecker 
1716613afbf8SFrederic Weisbecker #define cond_resched_lock(lock) ({				\
17173427445aSPeter Zijlstra 	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1718613afbf8SFrederic Weisbecker 	__cond_resched_lock(lock);				\
1719613afbf8SFrederic Weisbecker })
1720613afbf8SFrederic Weisbecker 
1721f6f3c437SSimon Horman static inline void cond_resched_rcu(void)
1722f6f3c437SSimon Horman {
1723f6f3c437SSimon Horman #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1724f6f3c437SSimon Horman 	rcu_read_unlock();
1725f6f3c437SSimon Horman 	cond_resched();
1726f6f3c437SSimon Horman 	rcu_read_lock();
1727f6f3c437SSimon Horman #endif
1728f6f3c437SSimon Horman }
1729f6f3c437SSimon Horman 
17301da177e4SLinus Torvalds /*
17311da177e4SLinus Torvalds  * Does a critical section need to be broken due to another
173295c354feSNick Piggin  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
173395c354feSNick Piggin  * but a general need for low latency)
17341da177e4SLinus Torvalds  */
173595c354feSNick Piggin static inline int spin_needbreak(spinlock_t *lock)
17361da177e4SLinus Torvalds {
173795c354feSNick Piggin #ifdef CONFIG_PREEMPT
173895c354feSNick Piggin 	return spin_is_contended(lock);
173995c354feSNick Piggin #else
17401da177e4SLinus Torvalds 	return 0;
174195c354feSNick Piggin #endif
17421da177e4SLinus Torvalds }
17431da177e4SLinus Torvalds 
174475f93fedSPeter Zijlstra static __always_inline bool need_resched(void)
174575f93fedSPeter Zijlstra {
174675f93fedSPeter Zijlstra 	return unlikely(tif_need_resched());
174775f93fedSPeter Zijlstra }
174875f93fedSPeter Zijlstra 
1749ee761f62SThomas Gleixner /*
17501da177e4SLinus Torvalds  * Wrappers for p->thread_info->cpu access. No-op on UP.
17511da177e4SLinus Torvalds  */
17521da177e4SLinus Torvalds #ifdef CONFIG_SMP
17531da177e4SLinus Torvalds 
17541da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
17551da177e4SLinus Torvalds {
1756c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
1757c65eacbeSAndy Lutomirski 	return p->cpu;
1758c65eacbeSAndy Lutomirski #else
1759a1261f54SAl Viro 	return task_thread_info(p)->cpu;
1760c65eacbeSAndy Lutomirski #endif
17611da177e4SLinus Torvalds }
17621da177e4SLinus Torvalds 
1763c65cc870SIngo Molnar extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
17641da177e4SLinus Torvalds 
17651da177e4SLinus Torvalds #else
17661da177e4SLinus Torvalds 
17671da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
17681da177e4SLinus Torvalds {
17691da177e4SLinus Torvalds 	return 0;
17701da177e4SLinus Torvalds }
17711da177e4SLinus Torvalds 
17721da177e4SLinus Torvalds static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
17731da177e4SLinus Torvalds {
17741da177e4SLinus Torvalds }
17751da177e4SLinus Torvalds 
17761da177e4SLinus Torvalds #endif /* CONFIG_SMP */
17771da177e4SLinus Torvalds 
1778d9345c65SPan Xinhui /*
1779d9345c65SPan Xinhui  * In order to reduce various lock holder preemption latencies provide an
1780d9345c65SPan Xinhui  * interface to see if a vCPU is currently running or not.
1781d9345c65SPan Xinhui  *
1782d9345c65SPan Xinhui  * This allows us to terminate optimistic spin loops and block, analogous to
1783d9345c65SPan Xinhui  * the native optimistic spin heuristic of testing if the lock owner task is
1784d9345c65SPan Xinhui  * running or not.
1785d9345c65SPan Xinhui  */
1786d9345c65SPan Xinhui #ifndef vcpu_is_preempted
1787d9345c65SPan Xinhui # define vcpu_is_preempted(cpu)	false
1788d9345c65SPan Xinhui #endif
1789d9345c65SPan Xinhui 
179096f874e2SRusty Russell extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
179196f874e2SRusty Russell extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
17925c45bf27SSiddha, Suresh B 
179382455257SDave Hansen #ifndef TASK_SIZE_OF
179482455257SDave Hansen #define TASK_SIZE_OF(tsk)	TASK_SIZE
179582455257SDave Hansen #endif
179682455257SDave Hansen 
1797d7822b1eSMathieu Desnoyers #ifdef CONFIG_RSEQ
1798d7822b1eSMathieu Desnoyers 
1799d7822b1eSMathieu Desnoyers /*
1800d7822b1eSMathieu Desnoyers  * Map the event mask on the user-space ABI enum rseq_cs_flags
1801d7822b1eSMathieu Desnoyers  * for direct mask checks.
1802d7822b1eSMathieu Desnoyers  */
1803d7822b1eSMathieu Desnoyers enum rseq_event_mask_bits {
1804d7822b1eSMathieu Desnoyers 	RSEQ_EVENT_PREEMPT_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
1805d7822b1eSMathieu Desnoyers 	RSEQ_EVENT_SIGNAL_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
1806d7822b1eSMathieu Desnoyers 	RSEQ_EVENT_MIGRATE_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
1807d7822b1eSMathieu Desnoyers };
1808d7822b1eSMathieu Desnoyers 
1809d7822b1eSMathieu Desnoyers enum rseq_event_mask {
1810d7822b1eSMathieu Desnoyers 	RSEQ_EVENT_PREEMPT	= (1U << RSEQ_EVENT_PREEMPT_BIT),
1811d7822b1eSMathieu Desnoyers 	RSEQ_EVENT_SIGNAL	= (1U << RSEQ_EVENT_SIGNAL_BIT),
1812d7822b1eSMathieu Desnoyers 	RSEQ_EVENT_MIGRATE	= (1U << RSEQ_EVENT_MIGRATE_BIT),
1813d7822b1eSMathieu Desnoyers };
1814d7822b1eSMathieu Desnoyers 
1815d7822b1eSMathieu Desnoyers static inline void rseq_set_notify_resume(struct task_struct *t)
1816d7822b1eSMathieu Desnoyers {
1817d7822b1eSMathieu Desnoyers 	if (t->rseq)
1818d7822b1eSMathieu Desnoyers 		set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1819d7822b1eSMathieu Desnoyers }
1820d7822b1eSMathieu Desnoyers 
1821784e0300SWill Deacon void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
1822d7822b1eSMathieu Desnoyers 
1823784e0300SWill Deacon static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1824784e0300SWill Deacon 					     struct pt_regs *regs)
1825d7822b1eSMathieu Desnoyers {
1826d7822b1eSMathieu Desnoyers 	if (current->rseq)
1827784e0300SWill Deacon 		__rseq_handle_notify_resume(ksig, regs);
1828d7822b1eSMathieu Desnoyers }
1829d7822b1eSMathieu Desnoyers 
1830784e0300SWill Deacon static inline void rseq_signal_deliver(struct ksignal *ksig,
1831784e0300SWill Deacon 				       struct pt_regs *regs)
1832d7822b1eSMathieu Desnoyers {
1833d7822b1eSMathieu Desnoyers 	preempt_disable();
1834d7822b1eSMathieu Desnoyers 	__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
1835d7822b1eSMathieu Desnoyers 	preempt_enable();
1836784e0300SWill Deacon 	rseq_handle_notify_resume(ksig, regs);
1837d7822b1eSMathieu Desnoyers }
1838d7822b1eSMathieu Desnoyers 
1839d7822b1eSMathieu Desnoyers /* rseq_preempt() requires preemption to be disabled. */
1840d7822b1eSMathieu Desnoyers static inline void rseq_preempt(struct task_struct *t)
1841d7822b1eSMathieu Desnoyers {
1842d7822b1eSMathieu Desnoyers 	__set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
1843d7822b1eSMathieu Desnoyers 	rseq_set_notify_resume(t);
1844d7822b1eSMathieu Desnoyers }
1845d7822b1eSMathieu Desnoyers 
1846d7822b1eSMathieu Desnoyers /* rseq_migrate() requires preemption to be disabled. */
1847d7822b1eSMathieu Desnoyers static inline void rseq_migrate(struct task_struct *t)
1848d7822b1eSMathieu Desnoyers {
1849d7822b1eSMathieu Desnoyers 	__set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
1850d7822b1eSMathieu Desnoyers 	rseq_set_notify_resume(t);
1851d7822b1eSMathieu Desnoyers }
1852d7822b1eSMathieu Desnoyers 
1853d7822b1eSMathieu Desnoyers /*
1854d7822b1eSMathieu Desnoyers  * If parent process has a registered restartable sequences area, the
18559a789fcfSMathieu Desnoyers  * child inherits. Only applies when forking a process, not a thread.
1856d7822b1eSMathieu Desnoyers  */
1857d7822b1eSMathieu Desnoyers static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
1858d7822b1eSMathieu Desnoyers {
1859d7822b1eSMathieu Desnoyers 	if (clone_flags & CLONE_THREAD) {
1860d7822b1eSMathieu Desnoyers 		t->rseq = NULL;
1861d7822b1eSMathieu Desnoyers 		t->rseq_len = 0;
1862d7822b1eSMathieu Desnoyers 		t->rseq_sig = 0;
1863d7822b1eSMathieu Desnoyers 		t->rseq_event_mask = 0;
1864d7822b1eSMathieu Desnoyers 	} else {
1865d7822b1eSMathieu Desnoyers 		t->rseq = current->rseq;
1866d7822b1eSMathieu Desnoyers 		t->rseq_len = current->rseq_len;
1867d7822b1eSMathieu Desnoyers 		t->rseq_sig = current->rseq_sig;
1868d7822b1eSMathieu Desnoyers 		t->rseq_event_mask = current->rseq_event_mask;
1869d7822b1eSMathieu Desnoyers 	}
1870d7822b1eSMathieu Desnoyers }
1871d7822b1eSMathieu Desnoyers 
1872d7822b1eSMathieu Desnoyers static inline void rseq_execve(struct task_struct *t)
1873d7822b1eSMathieu Desnoyers {
1874d7822b1eSMathieu Desnoyers 	t->rseq = NULL;
1875d7822b1eSMathieu Desnoyers 	t->rseq_len = 0;
1876d7822b1eSMathieu Desnoyers 	t->rseq_sig = 0;
1877d7822b1eSMathieu Desnoyers 	t->rseq_event_mask = 0;
1878d7822b1eSMathieu Desnoyers }
1879d7822b1eSMathieu Desnoyers 
1880d7822b1eSMathieu Desnoyers #else
1881d7822b1eSMathieu Desnoyers 
1882d7822b1eSMathieu Desnoyers static inline void rseq_set_notify_resume(struct task_struct *t)
1883d7822b1eSMathieu Desnoyers {
1884d7822b1eSMathieu Desnoyers }
1885784e0300SWill Deacon static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1886784e0300SWill Deacon 					     struct pt_regs *regs)
1887d7822b1eSMathieu Desnoyers {
1888d7822b1eSMathieu Desnoyers }
1889784e0300SWill Deacon static inline void rseq_signal_deliver(struct ksignal *ksig,
1890784e0300SWill Deacon 				       struct pt_regs *regs)
1891d7822b1eSMathieu Desnoyers {
1892d7822b1eSMathieu Desnoyers }
1893d7822b1eSMathieu Desnoyers static inline void rseq_preempt(struct task_struct *t)
1894d7822b1eSMathieu Desnoyers {
1895d7822b1eSMathieu Desnoyers }
1896d7822b1eSMathieu Desnoyers static inline void rseq_migrate(struct task_struct *t)
1897d7822b1eSMathieu Desnoyers {
1898d7822b1eSMathieu Desnoyers }
1899d7822b1eSMathieu Desnoyers static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
1900d7822b1eSMathieu Desnoyers {
1901d7822b1eSMathieu Desnoyers }
1902d7822b1eSMathieu Desnoyers static inline void rseq_execve(struct task_struct *t)
1903d7822b1eSMathieu Desnoyers {
1904d7822b1eSMathieu Desnoyers }
1905d7822b1eSMathieu Desnoyers 
1906d7822b1eSMathieu Desnoyers #endif
1907d7822b1eSMathieu Desnoyers 
190873ab1cb2STaehee Yoo void __exit_umh(struct task_struct *tsk);
190973ab1cb2STaehee Yoo 
191073ab1cb2STaehee Yoo static inline void exit_umh(struct task_struct *tsk)
191173ab1cb2STaehee Yoo {
191273ab1cb2STaehee Yoo 	if (unlikely(tsk->flags & PF_UMH))
191373ab1cb2STaehee Yoo 		__exit_umh(tsk);
191473ab1cb2STaehee Yoo }
191573ab1cb2STaehee Yoo 
1916d7822b1eSMathieu Desnoyers #ifdef CONFIG_DEBUG_RSEQ
1917d7822b1eSMathieu Desnoyers 
1918d7822b1eSMathieu Desnoyers void rseq_syscall(struct pt_regs *regs);
1919d7822b1eSMathieu Desnoyers 
1920d7822b1eSMathieu Desnoyers #else
1921d7822b1eSMathieu Desnoyers 
1922d7822b1eSMathieu Desnoyers static inline void rseq_syscall(struct pt_regs *regs)
1923d7822b1eSMathieu Desnoyers {
1924d7822b1eSMathieu Desnoyers }
1925d7822b1eSMathieu Desnoyers 
1926d7822b1eSMathieu Desnoyers #endif
1927d7822b1eSMathieu Desnoyers 
19281da177e4SLinus Torvalds #endif
1929