xref: /linux/include/linux/sched.h (revision 1d48b080bcce0a5e7d7aa2dbcdb35deefc188c3f)
11da177e4SLinus Torvalds #ifndef _LINUX_SCHED_H
21da177e4SLinus Torvalds #define _LINUX_SCHED_H
31da177e4SLinus Torvalds 
45eca1c10SIngo Molnar /*
55eca1c10SIngo Molnar  * Define 'struct task_struct' and provide the main scheduler
65eca1c10SIngo Molnar  * APIs (schedule(), wakeup variants, etc.)
75eca1c10SIngo Molnar  */
85eca1c10SIngo Molnar 
9607ca46eSDavid Howells #include <uapi/linux/sched.h>
10b7b3c76aSDavid Woodhouse 
1170b8157eSIngo Molnar #include <asm/current.h>
1270b8157eSIngo Molnar 
135eca1c10SIngo Molnar #include <linux/pid.h>
145eca1c10SIngo Molnar #include <linux/sem.h>
155eca1c10SIngo Molnar #include <linux/shm.h>
165eca1c10SIngo Molnar #include <linux/kcov.h>
175eca1c10SIngo Molnar #include <linux/mutex.h>
185eca1c10SIngo Molnar #include <linux/plist.h>
195eca1c10SIngo Molnar #include <linux/hrtimer.h>
205eca1c10SIngo Molnar #include <linux/seccomp.h>
215eca1c10SIngo Molnar #include <linux/nodemask.h>
225eca1c10SIngo Molnar #include <linux/rcupdate.h>
235eca1c10SIngo Molnar #include <linux/resource.h>
245eca1c10SIngo Molnar #include <linux/latencytop.h>
255eca1c10SIngo Molnar #include <linux/sched/prio.h>
265eca1c10SIngo Molnar #include <linux/signal_types.h>
275eca1c10SIngo Molnar #include <linux/mm_types_task.h>
285eca1c10SIngo Molnar #include <linux/task_io_accounting.h>
295eca1c10SIngo Molnar 
305eca1c10SIngo Molnar /* task_struct member predeclarations (sorted alphabetically): */
31c7af7877SIngo Molnar struct audit_context;
32c7af7877SIngo Molnar struct backing_dev_info;
33c7af7877SIngo Molnar struct bio_list;
34c7af7877SIngo Molnar struct blk_plug;
35c7af7877SIngo Molnar struct cfs_rq;
36c7af7877SIngo Molnar struct fs_struct;
37c7af7877SIngo Molnar struct futex_pi_state;
38c7af7877SIngo Molnar struct io_context;
39c7af7877SIngo Molnar struct mempolicy;
40c7af7877SIngo Molnar struct nameidata;
41c7af7877SIngo Molnar struct nsproxy;
42c7af7877SIngo Molnar struct perf_event_context;
43c7af7877SIngo Molnar struct pid_namespace;
44c7af7877SIngo Molnar struct pipe_inode_info;
45c7af7877SIngo Molnar struct rcu_node;
46c7af7877SIngo Molnar struct reclaim_state;
47c7af7877SIngo Molnar struct robust_list_head;
48e2d1e2aeSIngo Molnar struct sched_attr;
49e2d1e2aeSIngo Molnar struct sched_param;
5043ae34cbSIngo Molnar struct seq_file;
51c7af7877SIngo Molnar struct sighand_struct;
52c7af7877SIngo Molnar struct signal_struct;
53c7af7877SIngo Molnar struct task_delay_info;
544cf86d77SIngo Molnar struct task_group;
551da177e4SLinus Torvalds 
564a8342d2SLinus Torvalds /*
574a8342d2SLinus Torvalds  * Task state bitmask. NOTE! These bits are also
584a8342d2SLinus Torvalds  * encoded in fs/proc/array.c: get_task_state().
594a8342d2SLinus Torvalds  *
604a8342d2SLinus Torvalds  * We have two separate sets of flags: task->state
614a8342d2SLinus Torvalds  * is about runnability, while task->exit_state are
624a8342d2SLinus Torvalds  * about the task exiting. Confusing, but this way
634a8342d2SLinus Torvalds  * modifying one set can't modify the other one by
644a8342d2SLinus Torvalds  * mistake.
654a8342d2SLinus Torvalds  */
665eca1c10SIngo Molnar 
675eca1c10SIngo Molnar /* Used in tsk->state: */
6892c4bc9fSPeter Zijlstra #define TASK_RUNNING			0x0000
6992c4bc9fSPeter Zijlstra #define TASK_INTERRUPTIBLE		0x0001
7092c4bc9fSPeter Zijlstra #define TASK_UNINTERRUPTIBLE		0x0002
7192c4bc9fSPeter Zijlstra #define __TASK_STOPPED			0x0004
7292c4bc9fSPeter Zijlstra #define __TASK_TRACED			0x0008
735eca1c10SIngo Molnar /* Used in tsk->exit_state: */
7492c4bc9fSPeter Zijlstra #define EXIT_DEAD			0x0010
7592c4bc9fSPeter Zijlstra #define EXIT_ZOMBIE			0x0020
76abd50b39SOleg Nesterov #define EXIT_TRACE			(EXIT_ZOMBIE | EXIT_DEAD)
775eca1c10SIngo Molnar /* Used in tsk->state again: */
788ef9925bSPeter Zijlstra #define TASK_PARKED			0x0040
798ef9925bSPeter Zijlstra #define TASK_DEAD			0x0080
808ef9925bSPeter Zijlstra #define TASK_WAKEKILL			0x0100
818ef9925bSPeter Zijlstra #define TASK_WAKING			0x0200
8292c4bc9fSPeter Zijlstra #define TASK_NOLOAD			0x0400
8392c4bc9fSPeter Zijlstra #define TASK_NEW			0x0800
8492c4bc9fSPeter Zijlstra #define TASK_STATE_MAX			0x1000
85f021a3c2SMatthew Wilcox 
865eca1c10SIngo Molnar /* Convenience macros for the sake of set_current_state: */
87f021a3c2SMatthew Wilcox #define TASK_KILLABLE			(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
88f021a3c2SMatthew Wilcox #define TASK_STOPPED			(TASK_WAKEKILL | __TASK_STOPPED)
89f021a3c2SMatthew Wilcox #define TASK_TRACED			(TASK_WAKEKILL | __TASK_TRACED)
901da177e4SLinus Torvalds 
9180ed87c8SPeter Zijlstra #define TASK_IDLE			(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
9280ed87c8SPeter Zijlstra 
935eca1c10SIngo Molnar /* Convenience macros for the sake of wake_up(): */
9492a1f4bcSMatthew Wilcox #define TASK_NORMAL			(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
95f021a3c2SMatthew Wilcox #define TASK_ALL			(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
9692a1f4bcSMatthew Wilcox 
975eca1c10SIngo Molnar /* get_task_state(): */
9892a1f4bcSMatthew Wilcox #define TASK_REPORT			(TASK_RUNNING | TASK_INTERRUPTIBLE | \
99f021a3c2SMatthew Wilcox 					 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
1008ef9925bSPeter Zijlstra 					 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
1018ef9925bSPeter Zijlstra 					 TASK_PARKED)
10292a1f4bcSMatthew Wilcox 
103f021a3c2SMatthew Wilcox #define task_is_traced(task)		((task->state & __TASK_TRACED) != 0)
1045eca1c10SIngo Molnar 
105f021a3c2SMatthew Wilcox #define task_is_stopped(task)		((task->state & __TASK_STOPPED) != 0)
1065eca1c10SIngo Molnar 
1075eca1c10SIngo Molnar #define task_is_stopped_or_traced(task)	((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
1085eca1c10SIngo Molnar 
1095eca1c10SIngo Molnar #define task_contributes_to_load(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
11080ed87c8SPeter Zijlstra 					 (task->flags & PF_FROZEN) == 0 && \
11180ed87c8SPeter Zijlstra 					 (task->state & TASK_NOLOAD) == 0)
1121da177e4SLinus Torvalds 
1138eb23b9fSPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1148eb23b9fSPeter Zijlstra 
1158eb23b9fSPeter Zijlstra #define __set_current_state(state_value)			\
1168eb23b9fSPeter Zijlstra 	do {							\
1178eb23b9fSPeter Zijlstra 		current->task_state_change = _THIS_IP_;		\
1188eb23b9fSPeter Zijlstra 		current->state = (state_value);			\
1198eb23b9fSPeter Zijlstra 	} while (0)
1208eb23b9fSPeter Zijlstra #define set_current_state(state_value)				\
1218eb23b9fSPeter Zijlstra 	do {							\
1228eb23b9fSPeter Zijlstra 		current->task_state_change = _THIS_IP_;		\
123b92b8b35SPeter Zijlstra 		smp_store_mb(current->state, (state_value));	\
1248eb23b9fSPeter Zijlstra 	} while (0)
1258eb23b9fSPeter Zijlstra 
1268eb23b9fSPeter Zijlstra #else
127498d0c57SAndrew Morton /*
128498d0c57SAndrew Morton  * set_current_state() includes a barrier so that the write of current->state
129498d0c57SAndrew Morton  * is correctly serialised wrt the caller's subsequent test of whether to
130498d0c57SAndrew Morton  * actually sleep:
131498d0c57SAndrew Morton  *
132a2250238SPeter Zijlstra  *   for (;;) {
133498d0c57SAndrew Morton  *	set_current_state(TASK_UNINTERRUPTIBLE);
134a2250238SPeter Zijlstra  *	if (!need_sleep)
135a2250238SPeter Zijlstra  *		break;
136498d0c57SAndrew Morton  *
137a2250238SPeter Zijlstra  *	schedule();
138a2250238SPeter Zijlstra  *   }
139a2250238SPeter Zijlstra  *   __set_current_state(TASK_RUNNING);
140a2250238SPeter Zijlstra  *
141a2250238SPeter Zijlstra  * If the caller does not need such serialisation (because, for instance, the
142a2250238SPeter Zijlstra  * condition test and condition change and wakeup are under the same lock) then
143a2250238SPeter Zijlstra  * use __set_current_state().
144a2250238SPeter Zijlstra  *
145a2250238SPeter Zijlstra  * The above is typically ordered against the wakeup, which does:
146a2250238SPeter Zijlstra  *
147a2250238SPeter Zijlstra  *	need_sleep = false;
148a2250238SPeter Zijlstra  *	wake_up_state(p, TASK_UNINTERRUPTIBLE);
149a2250238SPeter Zijlstra  *
150a2250238SPeter Zijlstra  * Where wake_up_state() (and all other wakeup primitives) imply enough
151a2250238SPeter Zijlstra  * barriers to order the store of the variable against wakeup.
152a2250238SPeter Zijlstra  *
153a2250238SPeter Zijlstra  * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
154a2250238SPeter Zijlstra  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
155a2250238SPeter Zijlstra  * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
156a2250238SPeter Zijlstra  *
157a2250238SPeter Zijlstra  * This is obviously fine, since they both store the exact same value.
158a2250238SPeter Zijlstra  *
159a2250238SPeter Zijlstra  * Also see the comments of try_to_wake_up().
160498d0c57SAndrew Morton  */
1615eca1c10SIngo Molnar #define __set_current_state(state_value) do { current->state = (state_value); } while (0)
1625eca1c10SIngo Molnar #define set_current_state(state_value)	 smp_store_mb(current->state, (state_value))
1638eb23b9fSPeter Zijlstra #endif
1648eb23b9fSPeter Zijlstra 
1655eca1c10SIngo Molnar /* Task command name length: */
1661da177e4SLinus Torvalds #define TASK_COMM_LEN			16
1671da177e4SLinus Torvalds 
1683fa0818bSRik van Riel extern cpumask_var_t			cpu_isolated_map;
1693fa0818bSRik van Riel 
1701da177e4SLinus Torvalds extern void scheduler_tick(void);
1711da177e4SLinus Torvalds 
1721da177e4SLinus Torvalds #define	MAX_SCHEDULE_TIMEOUT		LONG_MAX
1735eca1c10SIngo Molnar 
1745eca1c10SIngo Molnar extern long schedule_timeout(long timeout);
1755eca1c10SIngo Molnar extern long schedule_timeout_interruptible(long timeout);
1765eca1c10SIngo Molnar extern long schedule_timeout_killable(long timeout);
1775eca1c10SIngo Molnar extern long schedule_timeout_uninterruptible(long timeout);
1785eca1c10SIngo Molnar extern long schedule_timeout_idle(long timeout);
1791da177e4SLinus Torvalds asmlinkage void schedule(void);
180c5491ea7SThomas Gleixner extern void schedule_preempt_disabled(void);
1811da177e4SLinus Torvalds 
18210ab5643STejun Heo extern int __must_check io_schedule_prepare(void);
18310ab5643STejun Heo extern void io_schedule_finish(int token);
1849cff8adeSNeilBrown extern long io_schedule_timeout(long timeout);
18510ab5643STejun Heo extern void io_schedule(void);
1869cff8adeSNeilBrown 
187f06febc9SFrank Mayhar /**
1880ba42a59SMasanari Iida  * struct prev_cputime - snapshot of system and user cputime
189d37f761dSFrederic Weisbecker  * @utime: time spent in user mode
190d37f761dSFrederic Weisbecker  * @stime: time spent in system mode
1919d7fb042SPeter Zijlstra  * @lock: protects the above two fields
192d37f761dSFrederic Weisbecker  *
1939d7fb042SPeter Zijlstra  * Stores previous user/system time values such that we can guarantee
1949d7fb042SPeter Zijlstra  * monotonicity.
195d37f761dSFrederic Weisbecker  */
1969d7fb042SPeter Zijlstra struct prev_cputime {
1979d7fb042SPeter Zijlstra #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1985613fda9SFrederic Weisbecker 	u64				utime;
1995613fda9SFrederic Weisbecker 	u64				stime;
2009d7fb042SPeter Zijlstra 	raw_spinlock_t			lock;
2019d7fb042SPeter Zijlstra #endif
202d37f761dSFrederic Weisbecker };
203d37f761dSFrederic Weisbecker 
204d37f761dSFrederic Weisbecker /**
205f06febc9SFrank Mayhar  * struct task_cputime - collected CPU time counts
2065613fda9SFrederic Weisbecker  * @utime:		time spent in user mode, in nanoseconds
2075613fda9SFrederic Weisbecker  * @stime:		time spent in kernel mode, in nanoseconds
208f06febc9SFrank Mayhar  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
209f06febc9SFrank Mayhar  *
2109d7fb042SPeter Zijlstra  * This structure groups together three kinds of CPU time that are tracked for
2119d7fb042SPeter Zijlstra  * threads and thread groups.  Most things considering CPU time want to group
2129d7fb042SPeter Zijlstra  * these counts together and treat all three of them in parallel.
213f06febc9SFrank Mayhar  */
214f06febc9SFrank Mayhar struct task_cputime {
2155613fda9SFrederic Weisbecker 	u64				utime;
2165613fda9SFrederic Weisbecker 	u64				stime;
217f06febc9SFrank Mayhar 	unsigned long long		sum_exec_runtime;
218f06febc9SFrank Mayhar };
2199d7fb042SPeter Zijlstra 
2205eca1c10SIngo Molnar /* Alternate field names when used on cache expirations: */
221f06febc9SFrank Mayhar #define virt_exp			utime
2229d7fb042SPeter Zijlstra #define prof_exp			stime
223f06febc9SFrank Mayhar #define sched_exp			sum_exec_runtime
224f06febc9SFrank Mayhar 
225bac5b6b6SFrederic Weisbecker enum vtime_state {
226bac5b6b6SFrederic Weisbecker 	/* Task is sleeping or running in a CPU with VTIME inactive: */
227bac5b6b6SFrederic Weisbecker 	VTIME_INACTIVE = 0,
228bac5b6b6SFrederic Weisbecker 	/* Task runs in userspace in a CPU with VTIME active: */
229bac5b6b6SFrederic Weisbecker 	VTIME_USER,
230bac5b6b6SFrederic Weisbecker 	/* Task runs in kernelspace in a CPU with VTIME active: */
231bac5b6b6SFrederic Weisbecker 	VTIME_SYS,
232bac5b6b6SFrederic Weisbecker };
233bac5b6b6SFrederic Weisbecker 
234bac5b6b6SFrederic Weisbecker struct vtime {
235bac5b6b6SFrederic Weisbecker 	seqcount_t		seqcount;
236bac5b6b6SFrederic Weisbecker 	unsigned long long	starttime;
237bac5b6b6SFrederic Weisbecker 	enum vtime_state	state;
2382a42eb95SWanpeng Li 	u64			utime;
2392a42eb95SWanpeng Li 	u64			stime;
2402a42eb95SWanpeng Li 	u64			gtime;
241bac5b6b6SFrederic Weisbecker };
242bac5b6b6SFrederic Weisbecker 
2431da177e4SLinus Torvalds struct sched_info {
2447f5f8e8dSIngo Molnar #ifdef CONFIG_SCHED_INFO
2455eca1c10SIngo Molnar 	/* Cumulative counters: */
2461da177e4SLinus Torvalds 
2475eca1c10SIngo Molnar 	/* # of times we have run on this CPU: */
2485eca1c10SIngo Molnar 	unsigned long			pcount;
2495eca1c10SIngo Molnar 
2505eca1c10SIngo Molnar 	/* Time spent waiting on a runqueue: */
2515eca1c10SIngo Molnar 	unsigned long long		run_delay;
2525eca1c10SIngo Molnar 
2535eca1c10SIngo Molnar 	/* Timestamps: */
2545eca1c10SIngo Molnar 
2555eca1c10SIngo Molnar 	/* When did we last run on a CPU? */
2565eca1c10SIngo Molnar 	unsigned long long		last_arrival;
2575eca1c10SIngo Molnar 
2585eca1c10SIngo Molnar 	/* When were we last queued to run? */
2595eca1c10SIngo Molnar 	unsigned long long		last_queued;
2605eca1c10SIngo Molnar 
261f6db8347SNaveen N. Rao #endif /* CONFIG_SCHED_INFO */
2627f5f8e8dSIngo Molnar };
2631da177e4SLinus Torvalds 
2641da177e4SLinus Torvalds /*
2656ecdd749SYuyang Du  * Integer metrics need fixed point arithmetic, e.g., sched/fair
2666ecdd749SYuyang Du  * has a few: load, load_avg, util_avg, freq, and capacity.
2676ecdd749SYuyang Du  *
2686ecdd749SYuyang Du  * We define a basic fixed point arithmetic range, and then formalize
2696ecdd749SYuyang Du  * all these metrics based on that basic range.
2706ecdd749SYuyang Du  */
2716ecdd749SYuyang Du # define SCHED_FIXEDPOINT_SHIFT		10
2726ecdd749SYuyang Du # define SCHED_FIXEDPOINT_SCALE		(1L << SCHED_FIXEDPOINT_SHIFT)
2736ecdd749SYuyang Du 
27420b8a59fSIngo Molnar struct load_weight {
2759dbdb155SPeter Zijlstra 	unsigned long			weight;
2769dbdb155SPeter Zijlstra 	u32				inv_weight;
27720b8a59fSIngo Molnar };
27820b8a59fSIngo Molnar 
2799d89c257SYuyang Du /*
2807b595334SYuyang Du  * The load_avg/util_avg accumulates an infinite geometric series
2817b595334SYuyang Du  * (see __update_load_avg() in kernel/sched/fair.c).
2827b595334SYuyang Du  *
2837b595334SYuyang Du  * [load_avg definition]
2847b595334SYuyang Du  *
2857b595334SYuyang Du  *   load_avg = runnable% * scale_load_down(load)
2867b595334SYuyang Du  *
2877b595334SYuyang Du  * where runnable% is the time ratio that a sched_entity is runnable.
2887b595334SYuyang Du  * For cfs_rq, it is the aggregated load_avg of all runnable and
2899d89c257SYuyang Du  * blocked sched_entities.
2907b595334SYuyang Du  *
2917b595334SYuyang Du  * load_avg may also take frequency scaling into account:
2927b595334SYuyang Du  *
2937b595334SYuyang Du  *   load_avg = runnable% * scale_load_down(load) * freq%
2947b595334SYuyang Du  *
2957b595334SYuyang Du  * where freq% is the CPU frequency normalized to the highest frequency.
2967b595334SYuyang Du  *
2977b595334SYuyang Du  * [util_avg definition]
2987b595334SYuyang Du  *
2997b595334SYuyang Du  *   util_avg = running% * SCHED_CAPACITY_SCALE
3007b595334SYuyang Du  *
3017b595334SYuyang Du  * where running% is the time ratio that a sched_entity is running on
3027b595334SYuyang Du  * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
3037b595334SYuyang Du  * and blocked sched_entities.
3047b595334SYuyang Du  *
3057b595334SYuyang Du  * util_avg may also factor frequency scaling and CPU capacity scaling:
3067b595334SYuyang Du  *
3077b595334SYuyang Du  *   util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
3087b595334SYuyang Du  *
3097b595334SYuyang Du  * where freq% is the same as above, and capacity% is the CPU capacity
3107b595334SYuyang Du  * normalized to the greatest capacity (due to uarch differences, etc).
3117b595334SYuyang Du  *
3127b595334SYuyang Du  * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
3137b595334SYuyang Du  * themselves are in the range of [0, 1]. To do fixed point arithmetics,
3147b595334SYuyang Du  * we therefore scale them to as large a range as necessary. This is for
3157b595334SYuyang Du  * example reflected by util_avg's SCHED_CAPACITY_SCALE.
3167b595334SYuyang Du  *
3177b595334SYuyang Du  * [Overflow issue]
3187b595334SYuyang Du  *
3197b595334SYuyang Du  * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
3207b595334SYuyang Du  * with the highest load (=88761), always runnable on a single cfs_rq,
3217b595334SYuyang Du  * and should not overflow as the number already hits PID_MAX_LIMIT.
3227b595334SYuyang Du  *
3237b595334SYuyang Du  * For all other cases (including 32-bit kernels), struct load_weight's
3247b595334SYuyang Du  * weight will overflow first before we do, because:
3257b595334SYuyang Du  *
3267b595334SYuyang Du  *    Max(load_avg) <= Max(load.weight)
3277b595334SYuyang Du  *
3287b595334SYuyang Du  * Then it is the load_weight's responsibility to consider overflow
3297b595334SYuyang Du  * issues.
3309d89c257SYuyang Du  */
3319d85f21cSPaul Turner struct sched_avg {
3325eca1c10SIngo Molnar 	u64				last_update_time;
3335eca1c10SIngo Molnar 	u64				load_sum;
3341ea6c46aSPeter Zijlstra 	u64				runnable_load_sum;
3355eca1c10SIngo Molnar 	u32				util_sum;
3365eca1c10SIngo Molnar 	u32				period_contrib;
3375eca1c10SIngo Molnar 	unsigned long			load_avg;
3381ea6c46aSPeter Zijlstra 	unsigned long			runnable_load_avg;
3395eca1c10SIngo Molnar 	unsigned long			util_avg;
3409d85f21cSPaul Turner };
3419d85f21cSPaul Turner 
34241acab88SLucas De Marchi struct sched_statistics {
3437f5f8e8dSIngo Molnar #ifdef CONFIG_SCHEDSTATS
34494c18227SIngo Molnar 	u64				wait_start;
34594c18227SIngo Molnar 	u64				wait_max;
3466d082592SArjan van de Ven 	u64				wait_count;
3476d082592SArjan van de Ven 	u64				wait_sum;
3488f0dfc34SArjan van de Ven 	u64				iowait_count;
3498f0dfc34SArjan van de Ven 	u64				iowait_sum;
35094c18227SIngo Molnar 
35194c18227SIngo Molnar 	u64				sleep_start;
35220b8a59fSIngo Molnar 	u64				sleep_max;
35394c18227SIngo Molnar 	s64				sum_sleep_runtime;
35494c18227SIngo Molnar 
35594c18227SIngo Molnar 	u64				block_start;
35620b8a59fSIngo Molnar 	u64				block_max;
35720b8a59fSIngo Molnar 	u64				exec_max;
358eba1ed4bSIngo Molnar 	u64				slice_max;
359cc367732SIngo Molnar 
360cc367732SIngo Molnar 	u64				nr_migrations_cold;
361cc367732SIngo Molnar 	u64				nr_failed_migrations_affine;
362cc367732SIngo Molnar 	u64				nr_failed_migrations_running;
363cc367732SIngo Molnar 	u64				nr_failed_migrations_hot;
364cc367732SIngo Molnar 	u64				nr_forced_migrations;
365cc367732SIngo Molnar 
366cc367732SIngo Molnar 	u64				nr_wakeups;
367cc367732SIngo Molnar 	u64				nr_wakeups_sync;
368cc367732SIngo Molnar 	u64				nr_wakeups_migrate;
369cc367732SIngo Molnar 	u64				nr_wakeups_local;
370cc367732SIngo Molnar 	u64				nr_wakeups_remote;
371cc367732SIngo Molnar 	u64				nr_wakeups_affine;
372cc367732SIngo Molnar 	u64				nr_wakeups_affine_attempts;
373cc367732SIngo Molnar 	u64				nr_wakeups_passive;
374cc367732SIngo Molnar 	u64				nr_wakeups_idle;
37541acab88SLucas De Marchi #endif
3767f5f8e8dSIngo Molnar };
37741acab88SLucas De Marchi 
37841acab88SLucas De Marchi struct sched_entity {
3795eca1c10SIngo Molnar 	/* For load-balancing: */
3805eca1c10SIngo Molnar 	struct load_weight		load;
3811ea6c46aSPeter Zijlstra 	unsigned long			runnable_weight;
38241acab88SLucas De Marchi 	struct rb_node			run_node;
38341acab88SLucas De Marchi 	struct list_head		group_node;
38441acab88SLucas De Marchi 	unsigned int			on_rq;
38541acab88SLucas De Marchi 
38641acab88SLucas De Marchi 	u64				exec_start;
38741acab88SLucas De Marchi 	u64				sum_exec_runtime;
38841acab88SLucas De Marchi 	u64				vruntime;
38941acab88SLucas De Marchi 	u64				prev_sum_exec_runtime;
39041acab88SLucas De Marchi 
39141acab88SLucas De Marchi 	u64				nr_migrations;
39241acab88SLucas De Marchi 
39341acab88SLucas De Marchi 	struct sched_statistics		statistics;
39494c18227SIngo Molnar 
39520b8a59fSIngo Molnar #ifdef CONFIG_FAIR_GROUP_SCHED
396fed14d45SPeter Zijlstra 	int				depth;
39720b8a59fSIngo Molnar 	struct sched_entity		*parent;
39820b8a59fSIngo Molnar 	/* rq on which this entity is (to be) queued: */
39920b8a59fSIngo Molnar 	struct cfs_rq			*cfs_rq;
40020b8a59fSIngo Molnar 	/* rq "owned" by this entity/group: */
40120b8a59fSIngo Molnar 	struct cfs_rq			*my_q;
40220b8a59fSIngo Molnar #endif
4038bd75c77SClark Williams 
404141965c7SAlex Shi #ifdef CONFIG_SMP
4055a107804SJiri Olsa 	/*
4065a107804SJiri Olsa 	 * Per entity load average tracking.
4075a107804SJiri Olsa 	 *
4085a107804SJiri Olsa 	 * Put into separate cache line so it does not
4095a107804SJiri Olsa 	 * collide with read-mostly values above.
4105a107804SJiri Olsa 	 */
4115a107804SJiri Olsa 	struct sched_avg		avg ____cacheline_aligned_in_smp;
4129d85f21cSPaul Turner #endif
41320b8a59fSIngo Molnar };
41470b97a7fSIngo Molnar 
415fa717060SPeter Zijlstra struct sched_rt_entity {
416fa717060SPeter Zijlstra 	struct list_head		run_list;
41778f2c7dbSPeter Zijlstra 	unsigned long			timeout;
41857d2aa00SYing Xue 	unsigned long			watchdog_stamp;
419bee367edSRichard Kennedy 	unsigned int			time_slice;
420ff77e468SPeter Zijlstra 	unsigned short			on_rq;
421ff77e468SPeter Zijlstra 	unsigned short			on_list;
4226f505b16SPeter Zijlstra 
42358d6c2d7SPeter Zijlstra 	struct sched_rt_entity		*back;
424052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
4256f505b16SPeter Zijlstra 	struct sched_rt_entity		*parent;
4266f505b16SPeter Zijlstra 	/* rq on which this entity is (to be) queued: */
4276f505b16SPeter Zijlstra 	struct rt_rq			*rt_rq;
4286f505b16SPeter Zijlstra 	/* rq "owned" by this entity/group: */
4296f505b16SPeter Zijlstra 	struct rt_rq			*my_q;
4306f505b16SPeter Zijlstra #endif
4313859a271SKees Cook } __randomize_layout;
432fa717060SPeter Zijlstra 
433aab03e05SDario Faggioli struct sched_dl_entity {
434aab03e05SDario Faggioli 	struct rb_node			rb_node;
435aab03e05SDario Faggioli 
436aab03e05SDario Faggioli 	/*
437aab03e05SDario Faggioli 	 * Original scheduling parameters. Copied here from sched_attr
4384027d080Sxiaofeng.yan 	 * during sched_setattr(), they will remain the same until
4394027d080Sxiaofeng.yan 	 * the next sched_setattr().
440aab03e05SDario Faggioli 	 */
4415eca1c10SIngo Molnar 	u64				dl_runtime;	/* Maximum runtime for each instance	*/
4425eca1c10SIngo Molnar 	u64				dl_deadline;	/* Relative deadline of each instance	*/
4435eca1c10SIngo Molnar 	u64				dl_period;	/* Separation of two instances (period) */
44454d6d303SDaniel Bristot de Oliveira 	u64				dl_bw;		/* dl_runtime / dl_period		*/
4453effcb42SDaniel Bristot de Oliveira 	u64				dl_density;	/* dl_runtime / dl_deadline		*/
446aab03e05SDario Faggioli 
447aab03e05SDario Faggioli 	/*
448aab03e05SDario Faggioli 	 * Actual scheduling parameters. Initialized with the values above,
449aab03e05SDario Faggioli 	 * they are continously updated during task execution. Note that
450aab03e05SDario Faggioli 	 * the remaining runtime could be < 0 in case we are in overrun.
451aab03e05SDario Faggioli 	 */
4525eca1c10SIngo Molnar 	s64				runtime;	/* Remaining runtime for this instance	*/
4535eca1c10SIngo Molnar 	u64				deadline;	/* Absolute deadline for this instance	*/
4545eca1c10SIngo Molnar 	unsigned int			flags;		/* Specifying the scheduler behaviour	*/
455aab03e05SDario Faggioli 
456aab03e05SDario Faggioli 	/*
457aab03e05SDario Faggioli 	 * Some bool flags:
458aab03e05SDario Faggioli 	 *
459aab03e05SDario Faggioli 	 * @dl_throttled tells if we exhausted the runtime. If so, the
460aab03e05SDario Faggioli 	 * task has to wait for a replenishment to be performed at the
461aab03e05SDario Faggioli 	 * next firing of dl_timer.
462aab03e05SDario Faggioli 	 *
4632d3d891dSDario Faggioli 	 * @dl_boosted tells if we are boosted due to DI. If so we are
4642d3d891dSDario Faggioli 	 * outside bandwidth enforcement mechanism (but only until we
4655bfd126eSJuri Lelli 	 * exit the critical section);
4665bfd126eSJuri Lelli 	 *
4675eca1c10SIngo Molnar 	 * @dl_yielded tells if task gave up the CPU before consuming
4685bfd126eSJuri Lelli 	 * all its available runtime during the last job.
469209a0cbdSLuca Abeni 	 *
470209a0cbdSLuca Abeni 	 * @dl_non_contending tells if the task is inactive while still
471209a0cbdSLuca Abeni 	 * contributing to the active utilization. In other words, it
472209a0cbdSLuca Abeni 	 * indicates if the inactive timer has been armed and its handler
473209a0cbdSLuca Abeni 	 * has not been executed yet. This flag is useful to avoid race
474209a0cbdSLuca Abeni 	 * conditions between the inactive timer handler and the wakeup
475209a0cbdSLuca Abeni 	 * code.
476aab03e05SDario Faggioli 	 */
4775eca1c10SIngo Molnar 	int				dl_throttled;
4785eca1c10SIngo Molnar 	int				dl_boosted;
4795eca1c10SIngo Molnar 	int				dl_yielded;
480209a0cbdSLuca Abeni 	int				dl_non_contending;
481aab03e05SDario Faggioli 
482aab03e05SDario Faggioli 	/*
483aab03e05SDario Faggioli 	 * Bandwidth enforcement timer. Each -deadline task has its
484aab03e05SDario Faggioli 	 * own bandwidth to be enforced, thus we need one timer per task.
485aab03e05SDario Faggioli 	 */
486aab03e05SDario Faggioli 	struct hrtimer			dl_timer;
487209a0cbdSLuca Abeni 
488209a0cbdSLuca Abeni 	/*
489209a0cbdSLuca Abeni 	 * Inactive timer, responsible for decreasing the active utilization
490209a0cbdSLuca Abeni 	 * at the "0-lag time". When a -deadline task blocks, it contributes
491209a0cbdSLuca Abeni 	 * to GRUB's active utilization until the "0-lag time", hence a
492209a0cbdSLuca Abeni 	 * timer is needed to decrease the active utilization at the correct
493209a0cbdSLuca Abeni 	 * time.
494209a0cbdSLuca Abeni 	 */
495209a0cbdSLuca Abeni 	struct hrtimer inactive_timer;
496aab03e05SDario Faggioli };
4978bd75c77SClark Williams 
4981d082fd0SPaul E. McKenney union rcu_special {
4991d082fd0SPaul E. McKenney 	struct {
5008203d6d0SPaul E. McKenney 		u8			blocked;
5018203d6d0SPaul E. McKenney 		u8			need_qs;
5028203d6d0SPaul E. McKenney 		u8			exp_need_qs;
5035eca1c10SIngo Molnar 
5045eca1c10SIngo Molnar 		/* Otherwise the compiler can store garbage here: */
5055eca1c10SIngo Molnar 		u8			pad;
5068203d6d0SPaul E. McKenney 	} b; /* Bits. */
5078203d6d0SPaul E. McKenney 	u32 s; /* Set of bits. */
5081d082fd0SPaul E. McKenney };
50986848966SPaul E. McKenney 
5108dc85d54SPeter Zijlstra enum perf_event_task_context {
5118dc85d54SPeter Zijlstra 	perf_invalid_context = -1,
5128dc85d54SPeter Zijlstra 	perf_hw_context = 0,
51389a1e187SPeter Zijlstra 	perf_sw_context,
5148dc85d54SPeter Zijlstra 	perf_nr_task_contexts,
5158dc85d54SPeter Zijlstra };
5168dc85d54SPeter Zijlstra 
517eb61baf6SIngo Molnar struct wake_q_node {
518eb61baf6SIngo Molnar 	struct wake_q_node *next;
519eb61baf6SIngo Molnar };
520eb61baf6SIngo Molnar 
5211da177e4SLinus Torvalds struct task_struct {
522c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
523c65eacbeSAndy Lutomirski 	/*
524c65eacbeSAndy Lutomirski 	 * For reasons of header soup (see current_thread_info()), this
525c65eacbeSAndy Lutomirski 	 * must be the first element of task_struct.
526c65eacbeSAndy Lutomirski 	 */
527c65eacbeSAndy Lutomirski 	struct thread_info		thread_info;
528c65eacbeSAndy Lutomirski #endif
5295eca1c10SIngo Molnar 	/* -1 unrunnable, 0 runnable, >0 stopped: */
5305eca1c10SIngo Molnar 	volatile long			state;
53129e48ce8SKees Cook 
53229e48ce8SKees Cook 	/*
53329e48ce8SKees Cook 	 * This begins the randomizable portion of task_struct. Only
53429e48ce8SKees Cook 	 * scheduling-critical items should be added above here.
53529e48ce8SKees Cook 	 */
53629e48ce8SKees Cook 	randomized_struct_fields_start
53729e48ce8SKees Cook 
538f7e4217bSRoman Zippel 	void				*stack;
5391da177e4SLinus Torvalds 	atomic_t			usage;
5405eca1c10SIngo Molnar 	/* Per task flags (PF_*), defined further below: */
5415eca1c10SIngo Molnar 	unsigned int			flags;
54297dc32cdSWilliam Cohen 	unsigned int			ptrace;
5431da177e4SLinus Torvalds 
5442dd73a4fSPeter Williams #ifdef CONFIG_SMP
545fa14ff4aSPeter Zijlstra 	struct llist_node		wake_entry;
5463ca7a440SPeter Zijlstra 	int				on_cpu;
547c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
5485eca1c10SIngo Molnar 	/* Current CPU: */
5495eca1c10SIngo Molnar 	unsigned int			cpu;
550c65eacbeSAndy Lutomirski #endif
55163b0e9edSMike Galbraith 	unsigned int			wakee_flips;
55262470419SMichael Wang 	unsigned long			wakee_flip_decay_ts;
55363b0e9edSMike Galbraith 	struct task_struct		*last_wakee;
554ac66f547SPeter Zijlstra 
555ac66f547SPeter Zijlstra 	int				wake_cpu;
5564866cde0SNick Piggin #endif
557fd2f4419SPeter Zijlstra 	int				on_rq;
55850e645a8SIngo Molnar 
5595eca1c10SIngo Molnar 	int				prio;
5605eca1c10SIngo Molnar 	int				static_prio;
5615eca1c10SIngo Molnar 	int				normal_prio;
562c7aceabaSRichard Kennedy 	unsigned int			rt_priority;
5635eca1c10SIngo Molnar 
5645522d5d5SIngo Molnar 	const struct sched_class	*sched_class;
56520b8a59fSIngo Molnar 	struct sched_entity		se;
566fa717060SPeter Zijlstra 	struct sched_rt_entity		rt;
5678323f26cSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
5688323f26cSPeter Zijlstra 	struct task_group		*sched_task_group;
5698323f26cSPeter Zijlstra #endif
570aab03e05SDario Faggioli 	struct sched_dl_entity		dl;
5711da177e4SLinus Torvalds 
572e107be36SAvi Kivity #ifdef CONFIG_PREEMPT_NOTIFIERS
5735eca1c10SIngo Molnar 	/* List of struct preempt_notifier: */
574e107be36SAvi Kivity 	struct hlist_head		preempt_notifiers;
575e107be36SAvi Kivity #endif
576e107be36SAvi Kivity 
5776c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE
5782056a782SJens Axboe 	unsigned int			btrace_seq;
5796c5c9341SAlexey Dobriyan #endif
5801da177e4SLinus Torvalds 
58197dc32cdSWilliam Cohen 	unsigned int			policy;
58229baa747SPeter Zijlstra 	int				nr_cpus_allowed;
5831da177e4SLinus Torvalds 	cpumask_t			cpus_allowed;
5841da177e4SLinus Torvalds 
585a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
586e260be67SPaul E. McKenney 	int				rcu_read_lock_nesting;
5871d082fd0SPaul E. McKenney 	union rcu_special		rcu_read_unlock_special;
588f41d911fSPaul E. McKenney 	struct list_head		rcu_node_entry;
589a57eb940SPaul E. McKenney 	struct rcu_node			*rcu_blocked_node;
59028f6569aSPranith Kumar #endif /* #ifdef CONFIG_PREEMPT_RCU */
5915eca1c10SIngo Molnar 
5928315f422SPaul E. McKenney #ifdef CONFIG_TASKS_RCU
5938315f422SPaul E. McKenney 	unsigned long			rcu_tasks_nvcsw;
594ccdd29ffSPaul E. McKenney 	u8				rcu_tasks_holdout;
595ccdd29ffSPaul E. McKenney 	u8				rcu_tasks_idx;
596176f8f7aSPaul E. McKenney 	int				rcu_tasks_idle_cpu;
597ccdd29ffSPaul E. McKenney 	struct list_head		rcu_tasks_holdout_list;
5988315f422SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_RCU */
599e260be67SPaul E. McKenney 
6001da177e4SLinus Torvalds 	struct sched_info		sched_info;
6011da177e4SLinus Torvalds 
6021da177e4SLinus Torvalds 	struct list_head		tasks;
603806c09a7SDario Faggioli #ifdef CONFIG_SMP
604917b627dSGregory Haskins 	struct plist_node		pushable_tasks;
6051baca4ceSJuri Lelli 	struct rb_node			pushable_dl_tasks;
606806c09a7SDario Faggioli #endif
6071da177e4SLinus Torvalds 
6085eca1c10SIngo Molnar 	struct mm_struct		*mm;
6095eca1c10SIngo Molnar 	struct mm_struct		*active_mm;
610314ff785SIngo Molnar 
611314ff785SIngo Molnar 	/* Per-thread vma caching: */
612314ff785SIngo Molnar 	struct vmacache			vmacache;
613314ff785SIngo Molnar 
6145eca1c10SIngo Molnar #ifdef SPLIT_RSS_COUNTING
61534e55232SKAMEZAWA Hiroyuki 	struct task_rss_stat		rss_stat;
61634e55232SKAMEZAWA Hiroyuki #endif
61797dc32cdSWilliam Cohen 	int				exit_state;
6185eca1c10SIngo Molnar 	int				exit_code;
6195eca1c10SIngo Molnar 	int				exit_signal;
6205eca1c10SIngo Molnar 	/* The signal sent when the parent dies: */
6215eca1c10SIngo Molnar 	int				pdeath_signal;
6225eca1c10SIngo Molnar 	/* JOBCTL_*, siglock protected: */
6235eca1c10SIngo Molnar 	unsigned long			jobctl;
6249b89f6baSAndrei Epure 
6255eca1c10SIngo Molnar 	/* Used for emulating ABI behavior of previous Linux versions: */
62697dc32cdSWilliam Cohen 	unsigned int			personality;
6279b89f6baSAndrei Epure 
6285eca1c10SIngo Molnar 	/* Scheduler bits, serialized by scheduler locks: */
629ca94c442SLennart Poettering 	unsigned			sched_reset_on_fork:1;
630a8e4f2eaSPeter Zijlstra 	unsigned			sched_contributes_to_load:1;
631ff303e66SPeter Zijlstra 	unsigned			sched_migrated:1;
632b7e7ade3SPeter Zijlstra 	unsigned			sched_remote_wakeup:1;
6335eca1c10SIngo Molnar 	/* Force alignment to the next boundary: */
6345eca1c10SIngo Molnar 	unsigned			:0;
635be958bdcSPeter Zijlstra 
6365eca1c10SIngo Molnar 	/* Unserialized, strictly 'current' */
6375eca1c10SIngo Molnar 
6385eca1c10SIngo Molnar 	/* Bit to tell LSMs we're in execve(): */
6395eca1c10SIngo Molnar 	unsigned			in_execve:1;
640be958bdcSPeter Zijlstra 	unsigned			in_iowait:1;
6415eca1c10SIngo Molnar #ifndef TIF_RESTORE_SIGMASK
6427e781418SAndy Lutomirski 	unsigned			restore_sigmask:1;
6437e781418SAndy Lutomirski #endif
644626ebc41STejun Heo #ifdef CONFIG_MEMCG
645626ebc41STejun Heo 	unsigned			memcg_may_oom:1;
646127424c8SJohannes Weiner #ifndef CONFIG_SLOB
6476f185c29SVladimir Davydov 	unsigned			memcg_kmem_skip_account:1;
6486f185c29SVladimir Davydov #endif
649127424c8SJohannes Weiner #endif
650ff303e66SPeter Zijlstra #ifdef CONFIG_COMPAT_BRK
651ff303e66SPeter Zijlstra 	unsigned			brk_randomized:1;
652ff303e66SPeter Zijlstra #endif
65377f88796STejun Heo #ifdef CONFIG_CGROUPS
65477f88796STejun Heo 	/* disallow userland-initiated cgroup migration */
65577f88796STejun Heo 	unsigned			no_cgroup_migration:1;
65677f88796STejun Heo #endif
6576f185c29SVladimir Davydov 
6585eca1c10SIngo Molnar 	unsigned long			atomic_flags; /* Flags requiring atomic access. */
6591d4457f9SKees Cook 
660f56141e3SAndy Lutomirski 	struct restart_block		restart_block;
661f56141e3SAndy Lutomirski 
6621da177e4SLinus Torvalds 	pid_t				pid;
6631da177e4SLinus Torvalds 	pid_t				tgid;
6640a425405SArjan van de Ven 
6651314562aSHiroshi Shimamoto #ifdef CONFIG_CC_STACKPROTECTOR
6665eca1c10SIngo Molnar 	/* Canary value for the -fstack-protector GCC feature: */
6670a425405SArjan van de Ven 	unsigned long			stack_canary;
6681314562aSHiroshi Shimamoto #endif
6691da177e4SLinus Torvalds 	/*
6705eca1c10SIngo Molnar 	 * Pointers to the (original) parent process, youngest child, younger sibling,
6711da177e4SLinus Torvalds 	 * older sibling, respectively.  (p->father can be replaced with
672f470021aSRoland McGrath 	 * p->real_parent->pid)
6731da177e4SLinus Torvalds 	 */
6745eca1c10SIngo Molnar 
6755eca1c10SIngo Molnar 	/* Real parent process: */
6765eca1c10SIngo Molnar 	struct task_struct __rcu	*real_parent;
6775eca1c10SIngo Molnar 
6785eca1c10SIngo Molnar 	/* Recipient of SIGCHLD, wait4() reports: */
6795eca1c10SIngo Molnar 	struct task_struct __rcu	*parent;
6801da177e4SLinus Torvalds 
681f470021aSRoland McGrath 	/*
6825eca1c10SIngo Molnar 	 * Children/sibling form the list of natural children:
6835eca1c10SIngo Molnar 	 */
6845eca1c10SIngo Molnar 	struct list_head		children;
6855eca1c10SIngo Molnar 	struct list_head		sibling;
6865eca1c10SIngo Molnar 	struct task_struct		*group_leader;
6875eca1c10SIngo Molnar 
6885eca1c10SIngo Molnar 	/*
6895eca1c10SIngo Molnar 	 * 'ptraced' is the list of tasks this task is using ptrace() on.
6905eca1c10SIngo Molnar 	 *
691f470021aSRoland McGrath 	 * This includes both natural children and PTRACE_ATTACH targets.
6925eca1c10SIngo Molnar 	 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
693f470021aSRoland McGrath 	 */
694f470021aSRoland McGrath 	struct list_head		ptraced;
695f470021aSRoland McGrath 	struct list_head		ptrace_entry;
696f470021aSRoland McGrath 
6971da177e4SLinus Torvalds 	/* PID/PID hash table linkage. */
69892476d7fSEric W. Biederman 	struct pid_link			pids[PIDTYPE_MAX];
69947e65328SOleg Nesterov 	struct list_head		thread_group;
7000c740d0aSOleg Nesterov 	struct list_head		thread_node;
7011da177e4SLinus Torvalds 
7025eca1c10SIngo Molnar 	struct completion		*vfork_done;
7031da177e4SLinus Torvalds 
7045eca1c10SIngo Molnar 	/* CLONE_CHILD_SETTID: */
7055eca1c10SIngo Molnar 	int __user			*set_child_tid;
7065eca1c10SIngo Molnar 
7075eca1c10SIngo Molnar 	/* CLONE_CHILD_CLEARTID: */
7085eca1c10SIngo Molnar 	int __user			*clear_child_tid;
7095eca1c10SIngo Molnar 
7105eca1c10SIngo Molnar 	u64				utime;
7115eca1c10SIngo Molnar 	u64				stime;
71240565b5aSStanislaw Gruszka #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
7135eca1c10SIngo Molnar 	u64				utimescaled;
7145eca1c10SIngo Molnar 	u64				stimescaled;
71540565b5aSStanislaw Gruszka #endif
71616a6d9beSFrederic Weisbecker 	u64				gtime;
7179d7fb042SPeter Zijlstra 	struct prev_cputime		prev_cputime;
7186a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
719bac5b6b6SFrederic Weisbecker 	struct vtime			vtime;
7206a61671bSFrederic Weisbecker #endif
721d027d45dSFrederic Weisbecker 
722d027d45dSFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL
723f009a7a7SFrederic Weisbecker 	atomic_t			tick_dep_mask;
724d027d45dSFrederic Weisbecker #endif
7255eca1c10SIngo Molnar 	/* Context switch counts: */
7265eca1c10SIngo Molnar 	unsigned long			nvcsw;
7275eca1c10SIngo Molnar 	unsigned long			nivcsw;
7285eca1c10SIngo Molnar 
7295eca1c10SIngo Molnar 	/* Monotonic time in nsecs: */
7305eca1c10SIngo Molnar 	u64				start_time;
7315eca1c10SIngo Molnar 
7325eca1c10SIngo Molnar 	/* Boot based time in nsecs: */
7335eca1c10SIngo Molnar 	u64				real_start_time;
7345eca1c10SIngo Molnar 
7355eca1c10SIngo Molnar 	/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
7365eca1c10SIngo Molnar 	unsigned long			min_flt;
7375eca1c10SIngo Molnar 	unsigned long			maj_flt;
7381da177e4SLinus Torvalds 
739b18b6a9cSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS
740f06febc9SFrank Mayhar 	struct task_cputime		cputime_expires;
7411da177e4SLinus Torvalds 	struct list_head		cpu_timers[3];
742b18b6a9cSNicolas Pitre #endif
7431da177e4SLinus Torvalds 
7445eca1c10SIngo Molnar 	/* Process credentials: */
7455eca1c10SIngo Molnar 
7465eca1c10SIngo Molnar 	/* Tracer's credentials at attach: */
7475eca1c10SIngo Molnar 	const struct cred __rcu		*ptracer_cred;
7485eca1c10SIngo Molnar 
7495eca1c10SIngo Molnar 	/* Objective and real subjective task credentials (COW): */
7505eca1c10SIngo Molnar 	const struct cred __rcu		*real_cred;
7515eca1c10SIngo Molnar 
7525eca1c10SIngo Molnar 	/* Effective (overridable) subjective task credentials (COW): */
7535eca1c10SIngo Molnar 	const struct cred __rcu		*cred;
7545eca1c10SIngo Molnar 
7555eca1c10SIngo Molnar 	/*
7565eca1c10SIngo Molnar 	 * executable name, excluding path.
7575eca1c10SIngo Molnar 	 *
7585eca1c10SIngo Molnar 	 * - normally initialized setup_new_exec()
7595eca1c10SIngo Molnar 	 * - access it with [gs]et_task_comm()
7605eca1c10SIngo Molnar 	 * - lock it with task_lock()
7615eca1c10SIngo Molnar 	 */
7625eca1c10SIngo Molnar 	char				comm[TASK_COMM_LEN];
7635eca1c10SIngo Molnar 
764756daf26SNeilBrown 	struct nameidata		*nameidata;
7655eca1c10SIngo Molnar 
7663d5b6fccSAlexey Dobriyan #ifdef CONFIG_SYSVIPC
7671da177e4SLinus Torvalds 	struct sysv_sem			sysvsem;
768ab602f79SJack Miller 	struct sysv_shm			sysvshm;
7693d5b6fccSAlexey Dobriyan #endif
770e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK
77182a1fcb9SIngo Molnar 	unsigned long			last_switch_count;
77282a1fcb9SIngo Molnar #endif
7735eca1c10SIngo Molnar 	/* Filesystem information: */
7741da177e4SLinus Torvalds 	struct fs_struct		*fs;
7755eca1c10SIngo Molnar 
7765eca1c10SIngo Molnar 	/* Open file information: */
7771da177e4SLinus Torvalds 	struct files_struct		*files;
7785eca1c10SIngo Molnar 
7795eca1c10SIngo Molnar 	/* Namespaces: */
780ab516013SSerge E. Hallyn 	struct nsproxy			*nsproxy;
7815eca1c10SIngo Molnar 
7825eca1c10SIngo Molnar 	/* Signal handlers: */
7831da177e4SLinus Torvalds 	struct signal_struct		*signal;
7841da177e4SLinus Torvalds 	struct sighand_struct		*sighand;
7855eca1c10SIngo Molnar 	sigset_t			blocked;
7865eca1c10SIngo Molnar 	sigset_t			real_blocked;
7875eca1c10SIngo Molnar 	/* Restored if set_restore_sigmask() was used: */
7885eca1c10SIngo Molnar 	sigset_t			saved_sigmask;
7891da177e4SLinus Torvalds 	struct sigpending		pending;
7901da177e4SLinus Torvalds 	unsigned long			sas_ss_sp;
7911da177e4SLinus Torvalds 	size_t				sas_ss_size;
7925eca1c10SIngo Molnar 	unsigned int			sas_ss_flags;
7932e01fabeSOleg Nesterov 
79467d12145SAl Viro 	struct callback_head		*task_works;
795e73f8959SOleg Nesterov 
7961da177e4SLinus Torvalds 	struct audit_context		*audit_context;
797bfef93a5SAl Viro #ifdef CONFIG_AUDITSYSCALL
798e1760bd5SEric W. Biederman 	kuid_t				loginuid;
7994746ec5bSEric Paris 	unsigned int			sessionid;
800bfef93a5SAl Viro #endif
801932ecebbSWill Drewry 	struct seccomp			seccomp;
8021da177e4SLinus Torvalds 
8035eca1c10SIngo Molnar 	/* Thread group tracking: */
8041da177e4SLinus Torvalds 	u32				parent_exec_id;
8051da177e4SLinus Torvalds 	u32				self_exec_id;
8065eca1c10SIngo Molnar 
8075eca1c10SIngo Molnar 	/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
8081da177e4SLinus Torvalds 	spinlock_t			alloc_lock;
8091da177e4SLinus Torvalds 
810b29739f9SIngo Molnar 	/* Protection of the PI data structures: */
8111d615482SThomas Gleixner 	raw_spinlock_t			pi_lock;
812b29739f9SIngo Molnar 
81376751049SPeter Zijlstra 	struct wake_q_node		wake_q;
81476751049SPeter Zijlstra 
81523f78d4aSIngo Molnar #ifdef CONFIG_RT_MUTEXES
8165eca1c10SIngo Molnar 	/* PI waiters blocked on a rt_mutex held by this task: */
817a23ba907SDavidlohr Bueso 	struct rb_root_cached		pi_waiters;
818e96a7705SXunlei Pang 	/* Updated under owner's pi_lock and rq lock */
819e96a7705SXunlei Pang 	struct task_struct		*pi_top_task;
8205eca1c10SIngo Molnar 	/* Deadlock detection and priority inheritance handling: */
82123f78d4aSIngo Molnar 	struct rt_mutex_waiter		*pi_blocked_on;
82223f78d4aSIngo Molnar #endif
82323f78d4aSIngo Molnar 
824408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES
8255eca1c10SIngo Molnar 	/* Mutex deadlock detection: */
826408894eeSIngo Molnar 	struct mutex_waiter		*blocked_on;
827408894eeSIngo Molnar #endif
8285eca1c10SIngo Molnar 
829de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS
830de30a2b3SIngo Molnar 	unsigned int			irq_events;
831de30a2b3SIngo Molnar 	unsigned long			hardirq_enable_ip;
832de30a2b3SIngo Molnar 	unsigned long			hardirq_disable_ip;
833fa1452e8SHiroshi Shimamoto 	unsigned int			hardirq_enable_event;
834de30a2b3SIngo Molnar 	unsigned int			hardirq_disable_event;
835fa1452e8SHiroshi Shimamoto 	int				hardirqs_enabled;
836de30a2b3SIngo Molnar 	int				hardirq_context;
837fa1452e8SHiroshi Shimamoto 	unsigned long			softirq_disable_ip;
838fa1452e8SHiroshi Shimamoto 	unsigned long			softirq_enable_ip;
839fa1452e8SHiroshi Shimamoto 	unsigned int			softirq_disable_event;
840fa1452e8SHiroshi Shimamoto 	unsigned int			softirq_enable_event;
841fa1452e8SHiroshi Shimamoto 	int				softirqs_enabled;
842de30a2b3SIngo Molnar 	int				softirq_context;
843de30a2b3SIngo Molnar #endif
8445eca1c10SIngo Molnar 
845fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP
846bdb9441eSPeter Zijlstra # define MAX_LOCK_DEPTH			48UL
847fbb9ce95SIngo Molnar 	u64				curr_chain_key;
848fbb9ce95SIngo Molnar 	int				lockdep_depth;
849fbb9ce95SIngo Molnar 	unsigned int			lockdep_recursion;
850c7aceabaSRichard Kennedy 	struct held_lock		held_locks[MAX_LOCK_DEPTH];
851fbb9ce95SIngo Molnar #endif
8525eca1c10SIngo Molnar 
853b09be676SByungchul Park #ifdef CONFIG_LOCKDEP_CROSSRELEASE
854b09be676SByungchul Park #define MAX_XHLOCKS_NR 64UL
855b09be676SByungchul Park 	struct hist_lock *xhlocks; /* Crossrelease history locks */
856b09be676SByungchul Park 	unsigned int xhlock_idx;
857b09be676SByungchul Park 	/* For restoring at history boundaries */
858b09be676SByungchul Park 	unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
85923f873d8SByungchul Park 	unsigned int hist_id;
86023f873d8SByungchul Park 	/* For overwrite check at each context exit */
86123f873d8SByungchul Park 	unsigned int hist_id_save[XHLOCK_CTX_NR];
8621da177e4SLinus Torvalds #endif
8631da177e4SLinus Torvalds 
864c6d30853SAndrey Ryabinin #ifdef CONFIG_UBSAN
865c6d30853SAndrey Ryabinin 	unsigned int			in_ubsan;
866c6d30853SAndrey Ryabinin #endif
8671da177e4SLinus Torvalds 
8685eca1c10SIngo Molnar 	/* Journalling filesystem info: */
8691da177e4SLinus Torvalds 	void				*journal_info;
8701da177e4SLinus Torvalds 
8715eca1c10SIngo Molnar 	/* Stacked block device info: */
872bddd87c7SAkinobu Mita 	struct bio_list			*bio_list;
873d89d8796SNeil Brown 
87473c10101SJens Axboe #ifdef CONFIG_BLOCK
8755eca1c10SIngo Molnar 	/* Stack plugging: */
87673c10101SJens Axboe 	struct blk_plug			*plug;
87773c10101SJens Axboe #endif
87873c10101SJens Axboe 
8795eca1c10SIngo Molnar 	/* VM state: */
8801da177e4SLinus Torvalds 	struct reclaim_state		*reclaim_state;
8811da177e4SLinus Torvalds 
8821da177e4SLinus Torvalds 	struct backing_dev_info		*backing_dev_info;
8831da177e4SLinus Torvalds 
8841da177e4SLinus Torvalds 	struct io_context		*io_context;
8851da177e4SLinus Torvalds 
8865eca1c10SIngo Molnar 	/* Ptrace state: */
8871da177e4SLinus Torvalds 	unsigned long			ptrace_message;
8885eca1c10SIngo Molnar 	siginfo_t			*last_siginfo;
8895eca1c10SIngo Molnar 
8907c3ab738SAndrew Morton 	struct task_io_accounting	ioac;
8915eca1c10SIngo Molnar #ifdef CONFIG_TASK_XACCT
8925eca1c10SIngo Molnar 	/* Accumulated RSS usage: */
8935eca1c10SIngo Molnar 	u64				acct_rss_mem1;
8945eca1c10SIngo Molnar 	/* Accumulated virtual memory usage: */
8955eca1c10SIngo Molnar 	u64				acct_vm_mem1;
8965eca1c10SIngo Molnar 	/* stime + utime since last update: */
8975eca1c10SIngo Molnar 	u64				acct_timexpd;
8981da177e4SLinus Torvalds #endif
8991da177e4SLinus Torvalds #ifdef CONFIG_CPUSETS
9005eca1c10SIngo Molnar 	/* Protected by ->alloc_lock: */
9015eca1c10SIngo Molnar 	nodemask_t			mems_allowed;
9025eca1c10SIngo Molnar 	/* Seqence number to catch updates: */
9035eca1c10SIngo Molnar 	seqcount_t			mems_allowed_seq;
904825a46afSPaul Jackson 	int				cpuset_mem_spread_rotor;
9056adef3ebSJack Steiner 	int				cpuset_slab_spread_rotor;
9061da177e4SLinus Torvalds #endif
907ddbcc7e8SPaul Menage #ifdef CONFIG_CGROUPS
9085eca1c10SIngo Molnar 	/* Control Group info protected by css_set_lock: */
9092c392b8cSArnd Bergmann 	struct css_set __rcu		*cgroups;
9105eca1c10SIngo Molnar 	/* cg_list protected by css_set_lock and tsk->alloc_lock: */
911817929ecSPaul Menage 	struct list_head		cg_list;
912ddbcc7e8SPaul Menage #endif
913f01d7d51SVikas Shivappa #ifdef CONFIG_INTEL_RDT
9140734ded1SVikas Shivappa 	u32				closid;
915d6aaba61SVikas Shivappa 	u32				rmid;
916e02737d5SFenghua Yu #endif
91742b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX
9180771dfefSIngo Molnar 	struct robust_list_head __user	*robust_list;
91934f192c6SIngo Molnar #ifdef CONFIG_COMPAT
92034f192c6SIngo Molnar 	struct compat_robust_list_head __user *compat_robust_list;
92134f192c6SIngo Molnar #endif
922c87e2837SIngo Molnar 	struct list_head		pi_state_list;
923c87e2837SIngo Molnar 	struct futex_pi_state		*pi_state_cache;
92442b2dd0aSAlexey Dobriyan #endif
925cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
9268dc85d54SPeter Zijlstra 	struct perf_event_context	*perf_event_ctxp[perf_nr_task_contexts];
927cdd6c482SIngo Molnar 	struct mutex			perf_event_mutex;
928cdd6c482SIngo Molnar 	struct list_head		perf_event_list;
929a63eaf34SPaul Mackerras #endif
9308f47b187SThomas Gleixner #ifdef CONFIG_DEBUG_PREEMPT
9318f47b187SThomas Gleixner 	unsigned long			preempt_disable_ip;
9328f47b187SThomas Gleixner #endif
933c7aceabaSRichard Kennedy #ifdef CONFIG_NUMA
9345eca1c10SIngo Molnar 	/* Protected by alloc_lock: */
9355eca1c10SIngo Molnar 	struct mempolicy		*mempolicy;
93645816682SVlastimil Babka 	short				il_prev;
937207205a2SEric Dumazet 	short				pref_node_fork;
938c7aceabaSRichard Kennedy #endif
939cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
940cbee9f88SPeter Zijlstra 	int				numa_scan_seq;
941cbee9f88SPeter Zijlstra 	unsigned int			numa_scan_period;
942598f0ec0SMel Gorman 	unsigned int			numa_scan_period_max;
943de1c9ce6SRik van Riel 	int				numa_preferred_nid;
9446b9a7460SMel Gorman 	unsigned long			numa_migrate_retry;
9455eca1c10SIngo Molnar 	/* Migration stamp: */
9465eca1c10SIngo Molnar 	u64				node_stamp;
9477e2703e6SRik van Riel 	u64				last_task_numa_placement;
9487e2703e6SRik van Riel 	u64				last_sum_exec_runtime;
949cbee9f88SPeter Zijlstra 	struct callback_head		numa_work;
950f809ca9aSMel Gorman 
9518c8a743cSPeter Zijlstra 	struct list_head		numa_entry;
9528c8a743cSPeter Zijlstra 	struct numa_group		*numa_group;
9538c8a743cSPeter Zijlstra 
954745d6147SMel Gorman 	/*
95544dba3d5SIulia Manda 	 * numa_faults is an array split into four regions:
95644dba3d5SIulia Manda 	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
95744dba3d5SIulia Manda 	 * in this precise order.
95844dba3d5SIulia Manda 	 *
95944dba3d5SIulia Manda 	 * faults_memory: Exponential decaying average of faults on a per-node
96044dba3d5SIulia Manda 	 * basis. Scheduling placement decisions are made based on these
96144dba3d5SIulia Manda 	 * counts. The values remain static for the duration of a PTE scan.
96244dba3d5SIulia Manda 	 * faults_cpu: Track the nodes the process was running on when a NUMA
96344dba3d5SIulia Manda 	 * hinting fault was incurred.
96444dba3d5SIulia Manda 	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
96544dba3d5SIulia Manda 	 * during the current scan window. When the scan completes, the counts
96644dba3d5SIulia Manda 	 * in faults_memory and faults_cpu decay and these values are copied.
967745d6147SMel Gorman 	 */
96844dba3d5SIulia Manda 	unsigned long			*numa_faults;
96983e1d2cdSMel Gorman 	unsigned long			total_numa_faults;
970745d6147SMel Gorman 
971745d6147SMel Gorman 	/*
97204bb2f94SRik van Riel 	 * numa_faults_locality tracks if faults recorded during the last
973074c2381SMel Gorman 	 * scan window were remote/local or failed to migrate. The task scan
974074c2381SMel Gorman 	 * period is adapted based on the locality of the faults with different
975074c2381SMel Gorman 	 * weights depending on whether they were shared or private faults
97604bb2f94SRik van Riel 	 */
977074c2381SMel Gorman 	unsigned long			numa_faults_locality[3];
97804bb2f94SRik van Riel 
979b32e86b4SIngo Molnar 	unsigned long			numa_pages_migrated;
980cbee9f88SPeter Zijlstra #endif /* CONFIG_NUMA_BALANCING */
981cbee9f88SPeter Zijlstra 
98272b252aeSMel Gorman 	struct tlbflush_unmap_batch	tlb_ubc;
98372b252aeSMel Gorman 
984e56d0903SIngo Molnar 	struct rcu_head			rcu;
985b92ce558SJens Axboe 
9865eca1c10SIngo Molnar 	/* Cache last used pipe for splice(): */
987b92ce558SJens Axboe 	struct pipe_inode_info		*splice_pipe;
9885640f768SEric Dumazet 
9895640f768SEric Dumazet 	struct page_frag		task_frag;
9905640f768SEric Dumazet 
991ca74e92bSShailabh Nagar #ifdef CONFIG_TASK_DELAY_ACCT
992ca74e92bSShailabh Nagar 	struct task_delay_info		*delays;
993ca74e92bSShailabh Nagar #endif
99447913d4eSIngo Molnar 
995f4f154fdSAkinobu Mita #ifdef CONFIG_FAULT_INJECTION
996f4f154fdSAkinobu Mita 	int				make_it_fail;
9979049f2f6SAkinobu Mita 	unsigned int			fail_nth;
998f4f154fdSAkinobu Mita #endif
9999d823e8fSWu Fengguang 	/*
10005eca1c10SIngo Molnar 	 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
10015eca1c10SIngo Molnar 	 * balance_dirty_pages() for a dirty throttling pause:
10029d823e8fSWu Fengguang 	 */
10039d823e8fSWu Fengguang 	int				nr_dirtied;
10049d823e8fSWu Fengguang 	int				nr_dirtied_pause;
10055eca1c10SIngo Molnar 	/* Start of a write-and-pause period: */
10065eca1c10SIngo Molnar 	unsigned long			dirty_paused_when;
10079d823e8fSWu Fengguang 
10089745512cSArjan van de Ven #ifdef CONFIG_LATENCYTOP
10099745512cSArjan van de Ven 	int				latency_record_count;
10109745512cSArjan van de Ven 	struct latency_record		latency_record[LT_SAVECOUNT];
10119745512cSArjan van de Ven #endif
10126976675dSArjan van de Ven 	/*
10135eca1c10SIngo Molnar 	 * Time slack values; these are used to round up poll() and
10146976675dSArjan van de Ven 	 * select() etc timeout values. These are in nanoseconds.
10156976675dSArjan van de Ven 	 */
1016da8b44d5SJohn Stultz 	u64				timer_slack_ns;
1017da8b44d5SJohn Stultz 	u64				default_timer_slack_ns;
1018f8d570a4SDavid Miller 
10190b24beccSAndrey Ryabinin #ifdef CONFIG_KASAN
10200b24beccSAndrey Ryabinin 	unsigned int			kasan_depth;
10210b24beccSAndrey Ryabinin #endif
10225eca1c10SIngo Molnar 
1023fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
10245eca1c10SIngo Molnar 	/* Index of current stored address in ret_stack: */
1025f201ae23SFrederic Weisbecker 	int				curr_ret_stack;
10265eca1c10SIngo Molnar 
10275eca1c10SIngo Molnar 	/* Stack of return addresses for return function tracing: */
1028f201ae23SFrederic Weisbecker 	struct ftrace_ret_stack		*ret_stack;
10295eca1c10SIngo Molnar 
10305eca1c10SIngo Molnar 	/* Timestamp for last schedule: */
10318aef2d28SSteven Rostedt 	unsigned long long		ftrace_timestamp;
10325eca1c10SIngo Molnar 
1033f201ae23SFrederic Weisbecker 	/*
1034f201ae23SFrederic Weisbecker 	 * Number of functions that haven't been traced
10355eca1c10SIngo Molnar 	 * because of depth overrun:
1036f201ae23SFrederic Weisbecker 	 */
1037f201ae23SFrederic Weisbecker 	atomic_t			trace_overrun;
10385eca1c10SIngo Molnar 
10395eca1c10SIngo Molnar 	/* Pause tracing: */
1040380c4b14SFrederic Weisbecker 	atomic_t			tracing_graph_pause;
1041f201ae23SFrederic Weisbecker #endif
10425eca1c10SIngo Molnar 
1043ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING
10445eca1c10SIngo Molnar 	/* State flags for use by tracers: */
1045ea4e2bc4SSteven Rostedt 	unsigned long			trace;
10465eca1c10SIngo Molnar 
10475eca1c10SIngo Molnar 	/* Bitmask and counter of trace recursion: */
1048261842b7SSteven Rostedt 	unsigned long			trace_recursion;
1049261842b7SSteven Rostedt #endif /* CONFIG_TRACING */
10505eca1c10SIngo Molnar 
10515c9a8750SDmitry Vyukov #ifdef CONFIG_KCOV
10525eca1c10SIngo Molnar 	/* Coverage collection mode enabled for this task (0 if disabled): */
10535c9a8750SDmitry Vyukov 	enum kcov_mode			kcov_mode;
10545eca1c10SIngo Molnar 
10555eca1c10SIngo Molnar 	/* Size of the kcov_area: */
10565eca1c10SIngo Molnar 	unsigned int			kcov_size;
10575eca1c10SIngo Molnar 
10585eca1c10SIngo Molnar 	/* Buffer for coverage collection: */
10595c9a8750SDmitry Vyukov 	void				*kcov_area;
10605eca1c10SIngo Molnar 
10615eca1c10SIngo Molnar 	/* KCOV descriptor wired with this task or NULL: */
10625c9a8750SDmitry Vyukov 	struct kcov			*kcov;
10635c9a8750SDmitry Vyukov #endif
10645eca1c10SIngo Molnar 
10656f185c29SVladimir Davydov #ifdef CONFIG_MEMCG
1066626ebc41STejun Heo 	struct mem_cgroup		*memcg_in_oom;
1067626ebc41STejun Heo 	gfp_t				memcg_oom_gfp_mask;
1068626ebc41STejun Heo 	int				memcg_oom_order;
1069b23afb93STejun Heo 
10705eca1c10SIngo Molnar 	/* Number of pages to reclaim on returning to userland: */
1071b23afb93STejun Heo 	unsigned int			memcg_nr_pages_over_high;
1072569b846dSKAMEZAWA Hiroyuki #endif
10735eca1c10SIngo Molnar 
10740326f5a9SSrikar Dronamraju #ifdef CONFIG_UPROBES
10750326f5a9SSrikar Dronamraju 	struct uprobe_task		*utask;
10760326f5a9SSrikar Dronamraju #endif
1077cafe5635SKent Overstreet #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1078cafe5635SKent Overstreet 	unsigned int			sequential_io;
1079cafe5635SKent Overstreet 	unsigned int			sequential_io_avg;
1080cafe5635SKent Overstreet #endif
10818eb23b9fSPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
10828eb23b9fSPeter Zijlstra 	unsigned long			task_state_change;
10838eb23b9fSPeter Zijlstra #endif
10848bcbde54SDavid Hildenbrand 	int				pagefault_disabled;
108503049269SMichal Hocko #ifdef CONFIG_MMU
108629c696e1SVladimir Davydov 	struct task_struct		*oom_reaper_list;
108703049269SMichal Hocko #endif
1088ba14a194SAndy Lutomirski #ifdef CONFIG_VMAP_STACK
1089ba14a194SAndy Lutomirski 	struct vm_struct		*stack_vm_area;
1090ba14a194SAndy Lutomirski #endif
109168f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
10925eca1c10SIngo Molnar 	/* A live task holds one reference: */
109368f24b08SAndy Lutomirski 	atomic_t			stack_refcount;
109468f24b08SAndy Lutomirski #endif
1095d83a7cb3SJosh Poimboeuf #ifdef CONFIG_LIVEPATCH
1096d83a7cb3SJosh Poimboeuf 	int patch_state;
1097d83a7cb3SJosh Poimboeuf #endif
1098e4e55b47STetsuo Handa #ifdef CONFIG_SECURITY
1099e4e55b47STetsuo Handa 	/* Used by LSM modules for access restriction: */
1100e4e55b47STetsuo Handa 	void				*security;
1101e4e55b47STetsuo Handa #endif
110229e48ce8SKees Cook 
110329e48ce8SKees Cook 	/*
110429e48ce8SKees Cook 	 * New fields for task_struct should be added above here, so that
110529e48ce8SKees Cook 	 * they are included in the randomized portion of task_struct.
110629e48ce8SKees Cook 	 */
110729e48ce8SKees Cook 	randomized_struct_fields_end
110829e48ce8SKees Cook 
11095eca1c10SIngo Molnar 	/* CPU-specific state of this task: */
11100c8c0f03SDave Hansen 	struct thread_struct		thread;
11115eca1c10SIngo Molnar 
11120c8c0f03SDave Hansen 	/*
11130c8c0f03SDave Hansen 	 * WARNING: on x86, 'thread_struct' contains a variable-sized
11140c8c0f03SDave Hansen 	 * structure.  It *MUST* be at the end of 'task_struct'.
11150c8c0f03SDave Hansen 	 *
11160c8c0f03SDave Hansen 	 * Do not put anything below here!
11170c8c0f03SDave Hansen 	 */
11181da177e4SLinus Torvalds };
11191da177e4SLinus Torvalds 
1120e868171aSAlexey Dobriyan static inline struct pid *task_pid(struct task_struct *task)
112122c935f4SEric W. Biederman {
112222c935f4SEric W. Biederman 	return task->pids[PIDTYPE_PID].pid;
112322c935f4SEric W. Biederman }
112422c935f4SEric W. Biederman 
1125e868171aSAlexey Dobriyan static inline struct pid *task_tgid(struct task_struct *task)
112622c935f4SEric W. Biederman {
112722c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PID].pid;
112822c935f4SEric W. Biederman }
112922c935f4SEric W. Biederman 
11306dda81f4SOleg Nesterov /*
11315eca1c10SIngo Molnar  * Without tasklist or RCU lock it is not safe to dereference
11326dda81f4SOleg Nesterov  * the result of task_pgrp/task_session even if task == current,
11336dda81f4SOleg Nesterov  * we can race with another thread doing sys_setsid/sys_setpgid.
11346dda81f4SOleg Nesterov  */
1135e868171aSAlexey Dobriyan static inline struct pid *task_pgrp(struct task_struct *task)
113622c935f4SEric W. Biederman {
113722c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PGID].pid;
113822c935f4SEric W. Biederman }
113922c935f4SEric W. Biederman 
1140e868171aSAlexey Dobriyan static inline struct pid *task_session(struct task_struct *task)
114122c935f4SEric W. Biederman {
114222c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_SID].pid;
114322c935f4SEric W. Biederman }
114422c935f4SEric W. Biederman 
11457af57294SPavel Emelyanov /*
11467af57294SPavel Emelyanov  * the helpers to get the task's different pids as they are seen
11477af57294SPavel Emelyanov  * from various namespaces
11487af57294SPavel Emelyanov  *
11497af57294SPavel Emelyanov  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
115044c4e1b2SEric W. Biederman  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
115144c4e1b2SEric W. Biederman  *                     current.
11527af57294SPavel Emelyanov  * task_xid_nr_ns()  : id seen from the ns specified;
11537af57294SPavel Emelyanov  *
11547af57294SPavel Emelyanov  * see also pid_nr() etc in include/linux/pid.h
11557af57294SPavel Emelyanov  */
11565eca1c10SIngo Molnar pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
11577af57294SPavel Emelyanov 
1158e868171aSAlexey Dobriyan static inline pid_t task_pid_nr(struct task_struct *tsk)
11597af57294SPavel Emelyanov {
11607af57294SPavel Emelyanov 	return tsk->pid;
11617af57294SPavel Emelyanov }
11627af57294SPavel Emelyanov 
11635eca1c10SIngo Molnar static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
116452ee2dfdSOleg Nesterov {
116552ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
116652ee2dfdSOleg Nesterov }
11677af57294SPavel Emelyanov 
11687af57294SPavel Emelyanov static inline pid_t task_pid_vnr(struct task_struct *tsk)
11697af57294SPavel Emelyanov {
117052ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
11717af57294SPavel Emelyanov }
11727af57294SPavel Emelyanov 
11737af57294SPavel Emelyanov 
1174e868171aSAlexey Dobriyan static inline pid_t task_tgid_nr(struct task_struct *tsk)
11757af57294SPavel Emelyanov {
11767af57294SPavel Emelyanov 	return tsk->tgid;
11777af57294SPavel Emelyanov }
11787af57294SPavel Emelyanov 
11795eca1c10SIngo Molnar /**
11805eca1c10SIngo Molnar  * pid_alive - check that a task structure is not stale
11815eca1c10SIngo Molnar  * @p: Task structure to be checked.
11825eca1c10SIngo Molnar  *
11835eca1c10SIngo Molnar  * Test if a process is not yet dead (at most zombie state)
11845eca1c10SIngo Molnar  * If pid_alive fails, then pointers within the task structure
11855eca1c10SIngo Molnar  * can be stale and must not be dereferenced.
11865eca1c10SIngo Molnar  *
11875eca1c10SIngo Molnar  * Return: 1 if the process is alive. 0 otherwise.
11885eca1c10SIngo Molnar  */
11895eca1c10SIngo Molnar static inline int pid_alive(const struct task_struct *p)
11905eca1c10SIngo Molnar {
11915eca1c10SIngo Molnar 	return p->pids[PIDTYPE_PID].pid != NULL;
11925eca1c10SIngo Molnar }
11937af57294SPavel Emelyanov 
11945eca1c10SIngo Molnar static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
11957af57294SPavel Emelyanov {
119652ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
11977af57294SPavel Emelyanov }
11987af57294SPavel Emelyanov 
11997af57294SPavel Emelyanov static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
12007af57294SPavel Emelyanov {
120152ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
12027af57294SPavel Emelyanov }
12037af57294SPavel Emelyanov 
12047af57294SPavel Emelyanov 
12055eca1c10SIngo Molnar static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
12067af57294SPavel Emelyanov {
120752ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
12087af57294SPavel Emelyanov }
12097af57294SPavel Emelyanov 
12107af57294SPavel Emelyanov static inline pid_t task_session_vnr(struct task_struct *tsk)
12117af57294SPavel Emelyanov {
121252ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
12137af57294SPavel Emelyanov }
12147af57294SPavel Emelyanov 
1215dd1c1f2fSOleg Nesterov static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1216dd1c1f2fSOleg Nesterov {
1217dd1c1f2fSOleg Nesterov 	return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
1218dd1c1f2fSOleg Nesterov }
1219dd1c1f2fSOleg Nesterov 
1220dd1c1f2fSOleg Nesterov static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1221dd1c1f2fSOleg Nesterov {
1222dd1c1f2fSOleg Nesterov 	return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
1223dd1c1f2fSOleg Nesterov }
1224dd1c1f2fSOleg Nesterov 
1225dd1c1f2fSOleg Nesterov static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1226dd1c1f2fSOleg Nesterov {
1227dd1c1f2fSOleg Nesterov 	pid_t pid = 0;
1228dd1c1f2fSOleg Nesterov 
1229dd1c1f2fSOleg Nesterov 	rcu_read_lock();
1230dd1c1f2fSOleg Nesterov 	if (pid_alive(tsk))
1231dd1c1f2fSOleg Nesterov 		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1232dd1c1f2fSOleg Nesterov 	rcu_read_unlock();
1233dd1c1f2fSOleg Nesterov 
1234dd1c1f2fSOleg Nesterov 	return pid;
1235dd1c1f2fSOleg Nesterov }
1236dd1c1f2fSOleg Nesterov 
1237dd1c1f2fSOleg Nesterov static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1238dd1c1f2fSOleg Nesterov {
1239dd1c1f2fSOleg Nesterov 	return task_ppid_nr_ns(tsk, &init_pid_ns);
1240dd1c1f2fSOleg Nesterov }
1241dd1c1f2fSOleg Nesterov 
12425eca1c10SIngo Molnar /* Obsolete, do not use: */
12431b0f7ffdSOleg Nesterov static inline pid_t task_pgrp_nr(struct task_struct *tsk)
12441b0f7ffdSOleg Nesterov {
12451b0f7ffdSOleg Nesterov 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
12461b0f7ffdSOleg Nesterov }
12477af57294SPavel Emelyanov 
124806eb6184SPeter Zijlstra #define TASK_REPORT_IDLE	(TASK_REPORT + 1)
124906eb6184SPeter Zijlstra #define TASK_REPORT_MAX		(TASK_REPORT_IDLE << 1)
125006eb6184SPeter Zijlstra 
1251*1d48b080SPeter Zijlstra static inline unsigned int task_state_index(struct task_struct *tsk)
125220435d84SXie XiuQi {
12531593baabSPeter Zijlstra 	unsigned int tsk_state = READ_ONCE(tsk->state);
12541593baabSPeter Zijlstra 	unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
125520435d84SXie XiuQi 
125606eb6184SPeter Zijlstra 	BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
125706eb6184SPeter Zijlstra 
125806eb6184SPeter Zijlstra 	if (tsk_state == TASK_IDLE)
125906eb6184SPeter Zijlstra 		state = TASK_REPORT_IDLE;
126006eb6184SPeter Zijlstra 
12611593baabSPeter Zijlstra 	return fls(state);
12621593baabSPeter Zijlstra }
126320435d84SXie XiuQi 
1264*1d48b080SPeter Zijlstra static inline char task_index_to_char(unsigned int state)
12651593baabSPeter Zijlstra {
12668ef9925bSPeter Zijlstra 	static const char state_char[] = "RSDTtXZPI";
12671593baabSPeter Zijlstra 
126806eb6184SPeter Zijlstra 	BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
12691593baabSPeter Zijlstra 
12701593baabSPeter Zijlstra 	return state_char[state];
12711593baabSPeter Zijlstra }
12721593baabSPeter Zijlstra 
12731593baabSPeter Zijlstra static inline char task_state_to_char(struct task_struct *tsk)
12741593baabSPeter Zijlstra {
1275*1d48b080SPeter Zijlstra 	return task_index_to_char(task_state_index(tsk));
127620435d84SXie XiuQi }
127720435d84SXie XiuQi 
12781da177e4SLinus Torvalds /**
1279570f5241SSergey Senozhatsky  * is_global_init - check if a task structure is init. Since init
1280570f5241SSergey Senozhatsky  * is free to have sub-threads we need to check tgid.
12813260259fSHenne  * @tsk: Task structure to be checked.
12823260259fSHenne  *
12833260259fSHenne  * Check if a task structure is the first user space task the kernel created.
1284e69f6186SYacine Belkadi  *
1285e69f6186SYacine Belkadi  * Return: 1 if the task structure is init. 0 otherwise.
1286f400e198SSukadev Bhattiprolu  */
1287e868171aSAlexey Dobriyan static inline int is_global_init(struct task_struct *tsk)
1288b461cc03SPavel Emelyanov {
1289570f5241SSergey Senozhatsky 	return task_tgid_nr(tsk) == 1;
1290b461cc03SPavel Emelyanov }
1291b460cbc5SSerge E. Hallyn 
12929ec52099SCedric Le Goater extern struct pid *cad_pid;
12939ec52099SCedric Le Goater 
12941da177e4SLinus Torvalds /*
12951da177e4SLinus Torvalds  * Per process flags
12961da177e4SLinus Torvalds  */
1297c1de45caSPeter Zijlstra #define PF_IDLE			0x00000002	/* I am an IDLE thread */
12985eca1c10SIngo Molnar #define PF_EXITING		0x00000004	/* Getting shut down */
12995eca1c10SIngo Molnar #define PF_EXITPIDONE		0x00000008	/* PI exit done on shut down */
130094886b84SLaurent Vivier #define PF_VCPU			0x00000010	/* I'm a virtual CPU */
130121aa9af0STejun Heo #define PF_WQ_WORKER		0x00000020	/* I'm a workqueue worker */
13025eca1c10SIngo Molnar #define PF_FORKNOEXEC		0x00000040	/* Forked but didn't exec */
13035eca1c10SIngo Molnar #define PF_MCE_PROCESS		0x00000080      /* Process policy on mce errors */
13045eca1c10SIngo Molnar #define PF_SUPERPRIV		0x00000100	/* Used super-user privileges */
13055eca1c10SIngo Molnar #define PF_DUMPCORE		0x00000200	/* Dumped core */
13065eca1c10SIngo Molnar #define PF_SIGNALED		0x00000400	/* Killed by a signal */
13071da177e4SLinus Torvalds #define PF_MEMALLOC		0x00000800	/* Allocating memory */
13085eca1c10SIngo Molnar #define PF_NPROC_EXCEEDED	0x00001000	/* set_user() noticed that RLIMIT_NPROC was exceeded */
13095eca1c10SIngo Molnar #define PF_USED_MATH		0x00002000	/* If unset the fpu must be initialized before use */
13105eca1c10SIngo Molnar #define PF_USED_ASYNC		0x00004000	/* Used async_schedule*(), used by module init */
13115eca1c10SIngo Molnar #define PF_NOFREEZE		0x00008000	/* This thread should not be frozen */
13125eca1c10SIngo Molnar #define PF_FROZEN		0x00010000	/* Frozen for system suspend */
13137dea19f9SMichal Hocko #define PF_KSWAPD		0x00020000	/* I am kswapd */
13147dea19f9SMichal Hocko #define PF_MEMALLOC_NOFS	0x00040000	/* All allocation requests will inherit GFP_NOFS */
13157dea19f9SMichal Hocko #define PF_MEMALLOC_NOIO	0x00080000	/* All allocation requests will inherit GFP_NOIO */
13161da177e4SLinus Torvalds #define PF_LESS_THROTTLE	0x00100000	/* Throttle me less: I clean memory */
1317246bb0b1SOleg Nesterov #define PF_KTHREAD		0x00200000	/* I am a kernel thread */
13185eca1c10SIngo Molnar #define PF_RANDOMIZE		0x00400000	/* Randomize virtual address space */
1319b31dc66aSJens Axboe #define PF_SWAPWRITE		0x00800000	/* Allowed to write to swap */
132014a40ffcSTejun Heo #define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
13214db96cf0SAndi Kleen #define PF_MCE_EARLY		0x08000000      /* Early kill for mce process policy */
132261a87122SThomas Gleixner #define PF_MUTEX_TESTER		0x20000000	/* Thread belongs to the rt mutex tester */
132358a69cb4STejun Heo #define PF_FREEZER_SKIP		0x40000000	/* Freezer should not count it as freezable */
13245eca1c10SIngo Molnar #define PF_SUSPEND_TASK		0x80000000      /* This thread called freeze_processes() and should not be frozen */
13251da177e4SLinus Torvalds 
13261da177e4SLinus Torvalds /*
13271da177e4SLinus Torvalds  * Only the _current_ task can read/write to tsk->flags, but other
13281da177e4SLinus Torvalds  * tasks can access tsk->flags in readonly mode for example
13291da177e4SLinus Torvalds  * with tsk_used_math (like during threaded core dumping).
13301da177e4SLinus Torvalds  * There is however an exception to this rule during ptrace
13311da177e4SLinus Torvalds  * or during fork: the ptracer task is allowed to write to the
13321da177e4SLinus Torvalds  * child->flags of its traced child (same goes for fork, the parent
13331da177e4SLinus Torvalds  * can write to the child->flags), because we're guaranteed the
13341da177e4SLinus Torvalds  * child is not running and in turn not changing child->flags
13351da177e4SLinus Torvalds  * at the same time the parent does it.
13361da177e4SLinus Torvalds  */
13371da177e4SLinus Torvalds #define clear_stopped_child_used_math(child)	do { (child)->flags &= ~PF_USED_MATH; } while (0)
13381da177e4SLinus Torvalds #define set_stopped_child_used_math(child)	do { (child)->flags |= PF_USED_MATH; } while (0)
13391da177e4SLinus Torvalds #define clear_used_math()			clear_stopped_child_used_math(current)
13401da177e4SLinus Torvalds #define set_used_math()				set_stopped_child_used_math(current)
13415eca1c10SIngo Molnar 
13421da177e4SLinus Torvalds #define conditional_stopped_child_used_math(condition, child) \
13431da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
13445eca1c10SIngo Molnar 
13455eca1c10SIngo Molnar #define conditional_used_math(condition)	conditional_stopped_child_used_math(condition, current)
13465eca1c10SIngo Molnar 
13471da177e4SLinus Torvalds #define copy_to_stopped_child_used_math(child) \
13481da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
13495eca1c10SIngo Molnar 
13501da177e4SLinus Torvalds /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
13511da177e4SLinus Torvalds #define tsk_used_math(p)			((p)->flags & PF_USED_MATH)
13521da177e4SLinus Torvalds #define used_math()				tsk_used_math(current)
13531da177e4SLinus Torvalds 
135462ec05ddSThomas Gleixner static inline bool is_percpu_thread(void)
135562ec05ddSThomas Gleixner {
135662ec05ddSThomas Gleixner #ifdef CONFIG_SMP
135762ec05ddSThomas Gleixner 	return (current->flags & PF_NO_SETAFFINITY) &&
135862ec05ddSThomas Gleixner 		(current->nr_cpus_allowed  == 1);
135962ec05ddSThomas Gleixner #else
136062ec05ddSThomas Gleixner 	return true;
136162ec05ddSThomas Gleixner #endif
136262ec05ddSThomas Gleixner }
136362ec05ddSThomas Gleixner 
13641d4457f9SKees Cook /* Per-process atomic flags. */
1365a2b86f77SZefan Li #define PFA_NO_NEW_PRIVS		0	/* May not gain new privileges. */
13662ad654bcSZefan Li #define PFA_SPREAD_PAGE			1	/* Spread page cache over cpuset */
13672ad654bcSZefan Li #define PFA_SPREAD_SLAB			2	/* Spread some slab caches over cpuset */
13681d4457f9SKees Cook 
13691d4457f9SKees Cook 
1370e0e5070bSZefan Li #define TASK_PFA_TEST(name, func)					\
1371e0e5070bSZefan Li 	static inline bool task_##func(struct task_struct *p)		\
1372e0e5070bSZefan Li 	{ return test_bit(PFA_##name, &p->atomic_flags); }
13735eca1c10SIngo Molnar 
1374e0e5070bSZefan Li #define TASK_PFA_SET(name, func)					\
1375e0e5070bSZefan Li 	static inline void task_set_##func(struct task_struct *p)	\
1376e0e5070bSZefan Li 	{ set_bit(PFA_##name, &p->atomic_flags); }
13775eca1c10SIngo Molnar 
1378e0e5070bSZefan Li #define TASK_PFA_CLEAR(name, func)					\
1379e0e5070bSZefan Li 	static inline void task_clear_##func(struct task_struct *p)	\
1380e0e5070bSZefan Li 	{ clear_bit(PFA_##name, &p->atomic_flags); }
13811d4457f9SKees Cook 
1382e0e5070bSZefan Li TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1383e0e5070bSZefan Li TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
13841d4457f9SKees Cook 
13852ad654bcSZefan Li TASK_PFA_TEST(SPREAD_PAGE, spread_page)
13862ad654bcSZefan Li TASK_PFA_SET(SPREAD_PAGE, spread_page)
13872ad654bcSZefan Li TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
13882ad654bcSZefan Li 
13892ad654bcSZefan Li TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
13902ad654bcSZefan Li TASK_PFA_SET(SPREAD_SLAB, spread_slab)
13912ad654bcSZefan Li TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1392544b2c91STejun Heo 
13935eca1c10SIngo Molnar static inline void
1394717a94b5SNeilBrown current_restore_flags(unsigned long orig_flags, unsigned long flags)
1395907aed48SMel Gorman {
1396717a94b5SNeilBrown 	current->flags &= ~flags;
1397717a94b5SNeilBrown 	current->flags |= orig_flags & flags;
1398907aed48SMel Gorman }
1399907aed48SMel Gorman 
14005eca1c10SIngo Molnar extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
14015eca1c10SIngo Molnar extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
14021da177e4SLinus Torvalds #ifdef CONFIG_SMP
14035eca1c10SIngo Molnar extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
14045eca1c10SIngo Molnar extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
14051da177e4SLinus Torvalds #else
14065eca1c10SIngo Molnar static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
14071e1b6c51SKOSAKI Motohiro {
14081e1b6c51SKOSAKI Motohiro }
14095eca1c10SIngo Molnar static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
14101da177e4SLinus Torvalds {
141196f874e2SRusty Russell 	if (!cpumask_test_cpu(0, new_mask))
14121da177e4SLinus Torvalds 		return -EINVAL;
14131da177e4SLinus Torvalds 	return 0;
14141da177e4SLinus Torvalds }
14151da177e4SLinus Torvalds #endif
1416e0ad9556SRusty Russell 
14176d0d2878SChristian Borntraeger #ifndef cpu_relax_yield
14186d0d2878SChristian Borntraeger #define cpu_relax_yield() cpu_relax()
14196d0d2878SChristian Borntraeger #endif
14206d0d2878SChristian Borntraeger 
1421fa93384fSDan Carpenter extern int yield_to(struct task_struct *p, bool preempt);
142236c8b586SIngo Molnar extern void set_user_nice(struct task_struct *p, long nice);
142336c8b586SIngo Molnar extern int task_prio(const struct task_struct *p);
14245eca1c10SIngo Molnar 
1425d0ea0268SDongsheng Yang /**
1426d0ea0268SDongsheng Yang  * task_nice - return the nice value of a given task.
1427d0ea0268SDongsheng Yang  * @p: the task in question.
1428d0ea0268SDongsheng Yang  *
1429d0ea0268SDongsheng Yang  * Return: The nice value [ -20 ... 0 ... 19 ].
1430d0ea0268SDongsheng Yang  */
1431d0ea0268SDongsheng Yang static inline int task_nice(const struct task_struct *p)
1432d0ea0268SDongsheng Yang {
1433d0ea0268SDongsheng Yang 	return PRIO_TO_NICE((p)->static_prio);
1434d0ea0268SDongsheng Yang }
14355eca1c10SIngo Molnar 
143636c8b586SIngo Molnar extern int can_nice(const struct task_struct *p, const int nice);
143736c8b586SIngo Molnar extern int task_curr(const struct task_struct *p);
14381da177e4SLinus Torvalds extern int idle_cpu(int cpu);
14395eca1c10SIngo Molnar extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
14405eca1c10SIngo Molnar extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
14415eca1c10SIngo Molnar extern int sched_setattr(struct task_struct *, const struct sched_attr *);
144236c8b586SIngo Molnar extern struct task_struct *idle_task(int cpu);
14435eca1c10SIngo Molnar 
1444c4f30608SPaul E. McKenney /**
1445c4f30608SPaul E. McKenney  * is_idle_task - is the specified task an idle task?
1446fa757281SRandy Dunlap  * @p: the task in question.
1447e69f6186SYacine Belkadi  *
1448e69f6186SYacine Belkadi  * Return: 1 if @p is an idle task. 0 otherwise.
1449c4f30608SPaul E. McKenney  */
14507061ca3bSPaul E. McKenney static inline bool is_idle_task(const struct task_struct *p)
1451c4f30608SPaul E. McKenney {
1452c1de45caSPeter Zijlstra 	return !!(p->flags & PF_IDLE);
1453c4f30608SPaul E. McKenney }
14545eca1c10SIngo Molnar 
145536c8b586SIngo Molnar extern struct task_struct *curr_task(int cpu);
1456a458ae2eSPeter Zijlstra extern void ia64_set_curr_task(int cpu, struct task_struct *p);
14571da177e4SLinus Torvalds 
14581da177e4SLinus Torvalds void yield(void);
14591da177e4SLinus Torvalds 
14601da177e4SLinus Torvalds union thread_union {
1461c65eacbeSAndy Lutomirski #ifndef CONFIG_THREAD_INFO_IN_TASK
14621da177e4SLinus Torvalds 	struct thread_info thread_info;
1463c65eacbeSAndy Lutomirski #endif
14641da177e4SLinus Torvalds 	unsigned long stack[THREAD_SIZE/sizeof(long)];
14651da177e4SLinus Torvalds };
14661da177e4SLinus Torvalds 
1467f3ac6067SIngo Molnar #ifdef CONFIG_THREAD_INFO_IN_TASK
1468f3ac6067SIngo Molnar static inline struct thread_info *task_thread_info(struct task_struct *task)
1469f3ac6067SIngo Molnar {
1470f3ac6067SIngo Molnar 	return &task->thread_info;
1471f3ac6067SIngo Molnar }
1472f3ac6067SIngo Molnar #elif !defined(__HAVE_THREAD_FUNCTIONS)
1473f3ac6067SIngo Molnar # define task_thread_info(task)	((struct thread_info *)(task)->stack)
1474f3ac6067SIngo Molnar #endif
1475f3ac6067SIngo Molnar 
1476198fe21bSPavel Emelyanov /*
1477198fe21bSPavel Emelyanov  * find a task by one of its numerical ids
1478198fe21bSPavel Emelyanov  *
1479198fe21bSPavel Emelyanov  * find_task_by_pid_ns():
1480198fe21bSPavel Emelyanov  *      finds a task by its pid in the specified namespace
1481228ebcbeSPavel Emelyanov  * find_task_by_vpid():
1482228ebcbeSPavel Emelyanov  *      finds a task by its virtual pid
1483198fe21bSPavel Emelyanov  *
1484e49859e7SPavel Emelyanov  * see also find_vpid() etc in include/linux/pid.h
1485198fe21bSPavel Emelyanov  */
1486198fe21bSPavel Emelyanov 
1487228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_vpid(pid_t nr);
14885eca1c10SIngo Molnar extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1489198fe21bSPavel Emelyanov 
1490b3c97528SHarvey Harrison extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1491b3c97528SHarvey Harrison extern int wake_up_process(struct task_struct *tsk);
14923e51e3edSSamir Bellabes extern void wake_up_new_task(struct task_struct *tsk);
14935eca1c10SIngo Molnar 
14941da177e4SLinus Torvalds #ifdef CONFIG_SMP
14951da177e4SLinus Torvalds extern void kick_process(struct task_struct *tsk);
14961da177e4SLinus Torvalds #else
14971da177e4SLinus Torvalds static inline void kick_process(struct task_struct *tsk) { }
14981da177e4SLinus Torvalds #endif
14991da177e4SLinus Torvalds 
150082b89778SAdrian Hunter extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
15015eca1c10SIngo Molnar 
150282b89778SAdrian Hunter static inline void set_task_comm(struct task_struct *tsk, const char *from)
150382b89778SAdrian Hunter {
150482b89778SAdrian Hunter 	__set_task_comm(tsk, from, false);
150582b89778SAdrian Hunter }
15065eca1c10SIngo Molnar 
150759714d65SAndrew Morton extern char *get_task_comm(char *to, struct task_struct *tsk);
15081da177e4SLinus Torvalds 
15091da177e4SLinus Torvalds #ifdef CONFIG_SMP
1510317f3941SPeter Zijlstra void scheduler_ipi(void);
151185ba2d86SRoland McGrath extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
15121da177e4SLinus Torvalds #else
1513184748ccSPeter Zijlstra static inline void scheduler_ipi(void) { }
15145eca1c10SIngo Molnar static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
151585ba2d86SRoland McGrath {
151685ba2d86SRoland McGrath 	return 1;
151785ba2d86SRoland McGrath }
15181da177e4SLinus Torvalds #endif
15191da177e4SLinus Torvalds 
15205eca1c10SIngo Molnar /*
15215eca1c10SIngo Molnar  * Set thread flags in other task's structures.
15225eca1c10SIngo Molnar  * See asm/thread_info.h for TIF_xxxx flags available:
15231da177e4SLinus Torvalds  */
15241da177e4SLinus Torvalds static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
15251da177e4SLinus Torvalds {
1526a1261f54SAl Viro 	set_ti_thread_flag(task_thread_info(tsk), flag);
15271da177e4SLinus Torvalds }
15281da177e4SLinus Torvalds 
15291da177e4SLinus Torvalds static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
15301da177e4SLinus Torvalds {
1531a1261f54SAl Viro 	clear_ti_thread_flag(task_thread_info(tsk), flag);
15321da177e4SLinus Torvalds }
15331da177e4SLinus Torvalds 
15341da177e4SLinus Torvalds static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
15351da177e4SLinus Torvalds {
1536a1261f54SAl Viro 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
15371da177e4SLinus Torvalds }
15381da177e4SLinus Torvalds 
15391da177e4SLinus Torvalds static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
15401da177e4SLinus Torvalds {
1541a1261f54SAl Viro 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
15421da177e4SLinus Torvalds }
15431da177e4SLinus Torvalds 
15441da177e4SLinus Torvalds static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
15451da177e4SLinus Torvalds {
1546a1261f54SAl Viro 	return test_ti_thread_flag(task_thread_info(tsk), flag);
15471da177e4SLinus Torvalds }
15481da177e4SLinus Torvalds 
15491da177e4SLinus Torvalds static inline void set_tsk_need_resched(struct task_struct *tsk)
15501da177e4SLinus Torvalds {
15511da177e4SLinus Torvalds 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
15521da177e4SLinus Torvalds }
15531da177e4SLinus Torvalds 
15541da177e4SLinus Torvalds static inline void clear_tsk_need_resched(struct task_struct *tsk)
15551da177e4SLinus Torvalds {
15561da177e4SLinus Torvalds 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
15571da177e4SLinus Torvalds }
15581da177e4SLinus Torvalds 
15598ae121acSGregory Haskins static inline int test_tsk_need_resched(struct task_struct *tsk)
15608ae121acSGregory Haskins {
15618ae121acSGregory Haskins 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
15628ae121acSGregory Haskins }
15638ae121acSGregory Haskins 
15641da177e4SLinus Torvalds /*
15651da177e4SLinus Torvalds  * cond_resched() and cond_resched_lock(): latency reduction via
15661da177e4SLinus Torvalds  * explicit rescheduling in places that are safe. The return
15671da177e4SLinus Torvalds  * value indicates whether a reschedule was done in fact.
15681da177e4SLinus Torvalds  * cond_resched_lock() will drop the spinlock before scheduling,
15691da177e4SLinus Torvalds  * cond_resched_softirq() will enable bhs before scheduling.
15701da177e4SLinus Torvalds  */
157135a773a0SPeter Zijlstra #ifndef CONFIG_PREEMPT
1572c3921ab7SLinus Torvalds extern int _cond_resched(void);
157335a773a0SPeter Zijlstra #else
157435a773a0SPeter Zijlstra static inline int _cond_resched(void) { return 0; }
157535a773a0SPeter Zijlstra #endif
15766f80bd98SFrederic Weisbecker 
1577613afbf8SFrederic Weisbecker #define cond_resched() ({			\
15783427445aSPeter Zijlstra 	___might_sleep(__FILE__, __LINE__, 0);	\
1579613afbf8SFrederic Weisbecker 	_cond_resched();			\
1580613afbf8SFrederic Weisbecker })
15816f80bd98SFrederic Weisbecker 
1582613afbf8SFrederic Weisbecker extern int __cond_resched_lock(spinlock_t *lock);
1583613afbf8SFrederic Weisbecker 
1584613afbf8SFrederic Weisbecker #define cond_resched_lock(lock) ({				\
15853427445aSPeter Zijlstra 	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1586613afbf8SFrederic Weisbecker 	__cond_resched_lock(lock);				\
1587613afbf8SFrederic Weisbecker })
1588613afbf8SFrederic Weisbecker 
1589613afbf8SFrederic Weisbecker extern int __cond_resched_softirq(void);
1590613afbf8SFrederic Weisbecker 
1591613afbf8SFrederic Weisbecker #define cond_resched_softirq() ({					\
15923427445aSPeter Zijlstra 	___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
1593613afbf8SFrederic Weisbecker 	__cond_resched_softirq();					\
1594613afbf8SFrederic Weisbecker })
15951da177e4SLinus Torvalds 
1596f6f3c437SSimon Horman static inline void cond_resched_rcu(void)
1597f6f3c437SSimon Horman {
1598f6f3c437SSimon Horman #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1599f6f3c437SSimon Horman 	rcu_read_unlock();
1600f6f3c437SSimon Horman 	cond_resched();
1601f6f3c437SSimon Horman 	rcu_read_lock();
1602f6f3c437SSimon Horman #endif
1603f6f3c437SSimon Horman }
1604f6f3c437SSimon Horman 
16051da177e4SLinus Torvalds /*
16061da177e4SLinus Torvalds  * Does a critical section need to be broken due to another
160795c354feSNick Piggin  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
160895c354feSNick Piggin  * but a general need for low latency)
16091da177e4SLinus Torvalds  */
161095c354feSNick Piggin static inline int spin_needbreak(spinlock_t *lock)
16111da177e4SLinus Torvalds {
161295c354feSNick Piggin #ifdef CONFIG_PREEMPT
161395c354feSNick Piggin 	return spin_is_contended(lock);
161495c354feSNick Piggin #else
16151da177e4SLinus Torvalds 	return 0;
161695c354feSNick Piggin #endif
16171da177e4SLinus Torvalds }
16181da177e4SLinus Torvalds 
161975f93fedSPeter Zijlstra static __always_inline bool need_resched(void)
162075f93fedSPeter Zijlstra {
162175f93fedSPeter Zijlstra 	return unlikely(tif_need_resched());
162275f93fedSPeter Zijlstra }
162375f93fedSPeter Zijlstra 
1624ee761f62SThomas Gleixner /*
16251da177e4SLinus Torvalds  * Wrappers for p->thread_info->cpu access. No-op on UP.
16261da177e4SLinus Torvalds  */
16271da177e4SLinus Torvalds #ifdef CONFIG_SMP
16281da177e4SLinus Torvalds 
16291da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
16301da177e4SLinus Torvalds {
1631c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
1632c65eacbeSAndy Lutomirski 	return p->cpu;
1633c65eacbeSAndy Lutomirski #else
1634a1261f54SAl Viro 	return task_thread_info(p)->cpu;
1635c65eacbeSAndy Lutomirski #endif
16361da177e4SLinus Torvalds }
16371da177e4SLinus Torvalds 
1638c65cc870SIngo Molnar extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
16391da177e4SLinus Torvalds 
16401da177e4SLinus Torvalds #else
16411da177e4SLinus Torvalds 
16421da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
16431da177e4SLinus Torvalds {
16441da177e4SLinus Torvalds 	return 0;
16451da177e4SLinus Torvalds }
16461da177e4SLinus Torvalds 
16471da177e4SLinus Torvalds static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
16481da177e4SLinus Torvalds {
16491da177e4SLinus Torvalds }
16501da177e4SLinus Torvalds 
16511da177e4SLinus Torvalds #endif /* CONFIG_SMP */
16521da177e4SLinus Torvalds 
1653d9345c65SPan Xinhui /*
1654d9345c65SPan Xinhui  * In order to reduce various lock holder preemption latencies provide an
1655d9345c65SPan Xinhui  * interface to see if a vCPU is currently running or not.
1656d9345c65SPan Xinhui  *
1657d9345c65SPan Xinhui  * This allows us to terminate optimistic spin loops and block, analogous to
1658d9345c65SPan Xinhui  * the native optimistic spin heuristic of testing if the lock owner task is
1659d9345c65SPan Xinhui  * running or not.
1660d9345c65SPan Xinhui  */
1661d9345c65SPan Xinhui #ifndef vcpu_is_preempted
1662d9345c65SPan Xinhui # define vcpu_is_preempted(cpu)	false
1663d9345c65SPan Xinhui #endif
1664d9345c65SPan Xinhui 
166596f874e2SRusty Russell extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
166696f874e2SRusty Russell extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
16675c45bf27SSiddha, Suresh B 
166882455257SDave Hansen #ifndef TASK_SIZE_OF
166982455257SDave Hansen #define TASK_SIZE_OF(tsk)	TASK_SIZE
167082455257SDave Hansen #endif
167182455257SDave Hansen 
16721da177e4SLinus Torvalds #endif
1673