xref: /linux/include/linux/sched.h (revision 70b8157e61d0143fb44ae9482557d7aca365da3d)
11da177e4SLinus Torvalds #ifndef _LINUX_SCHED_H
21da177e4SLinus Torvalds #define _LINUX_SCHED_H
31da177e4SLinus Torvalds 
4607ca46eSDavid Howells #include <uapi/linux/sched.h>
5b7b3c76aSDavid Woodhouse 
65c228079SDongsheng Yang #include <linux/sched/prio.h>
75c228079SDongsheng Yang 
81da177e4SLinus Torvalds #include <linux/capability.h>
9b69339baSIngo Molnar #include <linux/mutex.h>
10fb00aca4SPeter Zijlstra #include <linux/plist.h>
11c92ff1bdSMartin Schwidefsky #include <linux/mm_types.h>
121da177e4SLinus Torvalds #include <asm/ptrace.h>
131da177e4SLinus Torvalds 
141da177e4SLinus Torvalds #include <linux/sem.h>
15ab602f79SJack Miller #include <linux/shm.h>
161da177e4SLinus Torvalds #include <linux/signal.h>
17f361bf4aSIngo Molnar #include <linux/signal_types.h>
181da177e4SLinus Torvalds #include <linux/pid.h>
191da177e4SLinus Torvalds #include <linux/seccomp.h>
2005725f7eSJiri Pirko #include <linux/rculist.h>
2123f78d4aSIngo Molnar #include <linux/rtmutex.h>
221da177e4SLinus Torvalds 
23a3b6714eSDavid Woodhouse #include <linux/resource.h>
24a3b6714eSDavid Woodhouse #include <linux/hrtimer.h>
255c9a8750SDmitry Vyukov #include <linux/kcov.h>
267c3ab738SAndrew Morton #include <linux/task_io_accounting.h>
279745512cSArjan van de Ven #include <linux/latencytop.h>
289e2b2dc4SDavid Howells #include <linux/cred.h>
2921caf2fcSMing Lei #include <linux/gfp.h>
30fd771233SIngo Molnar #include <linux/topology.h>
31d4311ff1SAaron Tomlin #include <linux/magic.h>
327d7efec3STejun Heo #include <linux/cgroup-defs.h>
33a3b6714eSDavid Woodhouse 
34*70b8157eSIngo Molnar #include <asm/current.h>
35*70b8157eSIngo Molnar 
36e2d1e2aeSIngo Molnar struct sched_attr;
37e2d1e2aeSIngo Molnar struct sched_param;
38d50dde5aSDario Faggioli 
39c87e2837SIngo Molnar struct futex_pi_state;
40286100a6SAlexey Dobriyan struct robust_list_head;
41bddd87c7SAkinobu Mita struct bio_list;
425ad4e53bSAl Viro struct fs_struct;
43cdd6c482SIngo Molnar struct perf_event_context;
4473c10101SJens Axboe struct blk_plug;
45c4ad8f98SLinus Torvalds struct filename;
4689076bc3SAl Viro struct nameidata;
471da177e4SLinus Torvalds 
48c3edc401SIngo Molnar struct signal_struct;
49c3edc401SIngo Molnar struct sighand_struct;
50c3edc401SIngo Molnar 
5143ae34cbSIngo Molnar struct seq_file;
5243ae34cbSIngo Molnar struct cfs_rq;
534cf86d77SIngo Molnar struct task_group;
541da177e4SLinus Torvalds 
554a8342d2SLinus Torvalds /*
564a8342d2SLinus Torvalds  * Task state bitmask. NOTE! These bits are also
574a8342d2SLinus Torvalds  * encoded in fs/proc/array.c: get_task_state().
584a8342d2SLinus Torvalds  *
594a8342d2SLinus Torvalds  * We have two separate sets of flags: task->state
604a8342d2SLinus Torvalds  * is about runnability, while task->exit_state are
614a8342d2SLinus Torvalds  * about the task exiting. Confusing, but this way
624a8342d2SLinus Torvalds  * modifying one set can't modify the other one by
634a8342d2SLinus Torvalds  * mistake.
644a8342d2SLinus Torvalds  */
651da177e4SLinus Torvalds #define TASK_RUNNING		0
661da177e4SLinus Torvalds #define TASK_INTERRUPTIBLE	1
671da177e4SLinus Torvalds #define TASK_UNINTERRUPTIBLE	2
68f021a3c2SMatthew Wilcox #define __TASK_STOPPED		4
69f021a3c2SMatthew Wilcox #define __TASK_TRACED		8
704a8342d2SLinus Torvalds /* in tsk->exit_state */
71ad86622bSOleg Nesterov #define EXIT_DEAD		16
72ad86622bSOleg Nesterov #define EXIT_ZOMBIE		32
73abd50b39SOleg Nesterov #define EXIT_TRACE		(EXIT_ZOMBIE | EXIT_DEAD)
744a8342d2SLinus Torvalds /* in tsk->state again */
75af927232SMike Galbraith #define TASK_DEAD		64
76f021a3c2SMatthew Wilcox #define TASK_WAKEKILL		128
77e9c84311SPeter Zijlstra #define TASK_WAKING		256
78f2530dc7SThomas Gleixner #define TASK_PARKED		512
7980ed87c8SPeter Zijlstra #define TASK_NOLOAD		1024
807dc603c9SPeter Zijlstra #define TASK_NEW		2048
817dc603c9SPeter Zijlstra #define TASK_STATE_MAX		4096
82f021a3c2SMatthew Wilcox 
837dc603c9SPeter Zijlstra #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
8473342151SPeter Zijlstra 
85642fa448SDavidlohr Bueso /* Convenience macros for the sake of set_current_state */
86f021a3c2SMatthew Wilcox #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
87f021a3c2SMatthew Wilcox #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
88f021a3c2SMatthew Wilcox #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
891da177e4SLinus Torvalds 
9080ed87c8SPeter Zijlstra #define TASK_IDLE		(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
9180ed87c8SPeter Zijlstra 
9292a1f4bcSMatthew Wilcox /* Convenience macros for the sake of wake_up */
9392a1f4bcSMatthew Wilcox #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
94f021a3c2SMatthew Wilcox #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
9592a1f4bcSMatthew Wilcox 
9692a1f4bcSMatthew Wilcox /* get_task_state() */
9792a1f4bcSMatthew Wilcox #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
98f021a3c2SMatthew Wilcox 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
9974e37200SOleg Nesterov 				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
10092a1f4bcSMatthew Wilcox 
101f021a3c2SMatthew Wilcox #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
102f021a3c2SMatthew Wilcox #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
10392a1f4bcSMatthew Wilcox #define task_is_stopped_or_traced(task)	\
104f021a3c2SMatthew Wilcox 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
10592a1f4bcSMatthew Wilcox #define task_contributes_to_load(task)	\
106e3c8ca83SNathan Lynch 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
10780ed87c8SPeter Zijlstra 				 (task->flags & PF_FROZEN) == 0 && \
10880ed87c8SPeter Zijlstra 				 (task->state & TASK_NOLOAD) == 0)
1091da177e4SLinus Torvalds 
1108eb23b9fSPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1118eb23b9fSPeter Zijlstra 
1128eb23b9fSPeter Zijlstra #define __set_current_state(state_value)			\
1138eb23b9fSPeter Zijlstra 	do {							\
1148eb23b9fSPeter Zijlstra 		current->task_state_change = _THIS_IP_;		\
1158eb23b9fSPeter Zijlstra 		current->state = (state_value);			\
1168eb23b9fSPeter Zijlstra 	} while (0)
1178eb23b9fSPeter Zijlstra #define set_current_state(state_value)				\
1188eb23b9fSPeter Zijlstra 	do {							\
1198eb23b9fSPeter Zijlstra 		current->task_state_change = _THIS_IP_;		\
120b92b8b35SPeter Zijlstra 		smp_store_mb(current->state, (state_value));	\
1218eb23b9fSPeter Zijlstra 	} while (0)
1228eb23b9fSPeter Zijlstra 
1238eb23b9fSPeter Zijlstra #else
124498d0c57SAndrew Morton /*
125498d0c57SAndrew Morton  * set_current_state() includes a barrier so that the write of current->state
126498d0c57SAndrew Morton  * is correctly serialised wrt the caller's subsequent test of whether to
127498d0c57SAndrew Morton  * actually sleep:
128498d0c57SAndrew Morton  *
129a2250238SPeter Zijlstra  *   for (;;) {
130498d0c57SAndrew Morton  *	set_current_state(TASK_UNINTERRUPTIBLE);
131a2250238SPeter Zijlstra  *	if (!need_sleep)
132a2250238SPeter Zijlstra  *		break;
133498d0c57SAndrew Morton  *
134a2250238SPeter Zijlstra  *	schedule();
135a2250238SPeter Zijlstra  *   }
136a2250238SPeter Zijlstra  *   __set_current_state(TASK_RUNNING);
137a2250238SPeter Zijlstra  *
138a2250238SPeter Zijlstra  * If the caller does not need such serialisation (because, for instance, the
139a2250238SPeter Zijlstra  * condition test and condition change and wakeup are under the same lock) then
140a2250238SPeter Zijlstra  * use __set_current_state().
141a2250238SPeter Zijlstra  *
142a2250238SPeter Zijlstra  * The above is typically ordered against the wakeup, which does:
143a2250238SPeter Zijlstra  *
144a2250238SPeter Zijlstra  *	need_sleep = false;
145a2250238SPeter Zijlstra  *	wake_up_state(p, TASK_UNINTERRUPTIBLE);
146a2250238SPeter Zijlstra  *
147a2250238SPeter Zijlstra  * Where wake_up_state() (and all other wakeup primitives) imply enough
148a2250238SPeter Zijlstra  * barriers to order the store of the variable against wakeup.
149a2250238SPeter Zijlstra  *
150a2250238SPeter Zijlstra  * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
151a2250238SPeter Zijlstra  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
152a2250238SPeter Zijlstra  * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
153a2250238SPeter Zijlstra  *
154a2250238SPeter Zijlstra  * This is obviously fine, since they both store the exact same value.
155a2250238SPeter Zijlstra  *
156a2250238SPeter Zijlstra  * Also see the comments of try_to_wake_up().
157498d0c57SAndrew Morton  */
1581da177e4SLinus Torvalds #define __set_current_state(state_value)		\
1591da177e4SLinus Torvalds 	do { current->state = (state_value); } while (0)
1601da177e4SLinus Torvalds #define set_current_state(state_value)			\
161b92b8b35SPeter Zijlstra 	smp_store_mb(current->state, (state_value))
1621da177e4SLinus Torvalds 
1638eb23b9fSPeter Zijlstra #endif
1648eb23b9fSPeter Zijlstra 
1651da177e4SLinus Torvalds /* Task command name length */
1661da177e4SLinus Torvalds #define TASK_COMM_LEN 16
1671da177e4SLinus Torvalds 
1681da177e4SLinus Torvalds #include <linux/spinlock.h>
1691da177e4SLinus Torvalds 
1701da177e4SLinus Torvalds /*
1711da177e4SLinus Torvalds  * This serializes "schedule()" and also protects
1721da177e4SLinus Torvalds  * the run-queue from deletions/modifications (but
1731da177e4SLinus Torvalds  * _adding_ to the beginning of the run-queue has
1741da177e4SLinus Torvalds  * a separate lock).
1751da177e4SLinus Torvalds  */
1761da177e4SLinus Torvalds extern rwlock_t tasklist_lock;
1771da177e4SLinus Torvalds extern spinlock_t mmlist_lock;
1781da177e4SLinus Torvalds 
17936c8b586SIngo Molnar struct task_struct;
1801da177e4SLinus Torvalds 
181db1466b3SPaul E. McKenney #ifdef CONFIG_PROVE_RCU
182db1466b3SPaul E. McKenney extern int lockdep_tasklist_lock_is_held(void);
183db1466b3SPaul E. McKenney #endif /* #ifdef CONFIG_PROVE_RCU */
184db1466b3SPaul E. McKenney 
1851da177e4SLinus Torvalds extern void sched_init(void);
1861da177e4SLinus Torvalds extern void sched_init_smp(void);
1872d07b255SHarvey Harrison extern asmlinkage void schedule_tail(struct task_struct *prev);
18836c8b586SIngo Molnar extern void init_idle(struct task_struct *idle, int cpu);
1891df21055SIngo Molnar extern void init_idle_bootup_task(struct task_struct *idle);
1901da177e4SLinus Torvalds 
1913fa0818bSRik van Riel extern cpumask_var_t cpu_isolated_map;
1923fa0818bSRik van Riel 
19389f19f04SAndrew Morton extern int runqueue_is_locked(int cpu);
194017730c1SIngo Molnar 
1951da177e4SLinus Torvalds extern void cpu_init (void);
1961da177e4SLinus Torvalds extern void trap_init(void);
1971da177e4SLinus Torvalds extern void update_process_times(int user);
1981da177e4SLinus Torvalds extern void scheduler_tick(void);
1999cf7243dSThomas Gleixner extern int sched_cpu_starting(unsigned int cpu);
20040190a78SThomas Gleixner extern int sched_cpu_activate(unsigned int cpu);
20140190a78SThomas Gleixner extern int sched_cpu_deactivate(unsigned int cpu);
2021da177e4SLinus Torvalds 
203f2785ddbSThomas Gleixner #ifdef CONFIG_HOTPLUG_CPU
204f2785ddbSThomas Gleixner extern int sched_cpu_dying(unsigned int cpu);
205f2785ddbSThomas Gleixner #else
206f2785ddbSThomas Gleixner # define sched_cpu_dying	NULL
207f2785ddbSThomas Gleixner #endif
2081da177e4SLinus Torvalds 
2091da177e4SLinus Torvalds #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
210b3c97528SHarvey Harrison extern signed long schedule_timeout(signed long timeout);
21164ed93a2SNishanth Aravamudan extern signed long schedule_timeout_interruptible(signed long timeout);
212294d5cc2SMatthew Wilcox extern signed long schedule_timeout_killable(signed long timeout);
21364ed93a2SNishanth Aravamudan extern signed long schedule_timeout_uninterruptible(signed long timeout);
21469b27bafSAndrew Morton extern signed long schedule_timeout_idle(signed long timeout);
2151da177e4SLinus Torvalds asmlinkage void schedule(void);
216c5491ea7SThomas Gleixner extern void schedule_preempt_disabled(void);
2171da177e4SLinus Torvalds 
21810ab5643STejun Heo extern int __must_check io_schedule_prepare(void);
21910ab5643STejun Heo extern void io_schedule_finish(int token);
2209cff8adeSNeilBrown extern long io_schedule_timeout(long timeout);
22110ab5643STejun Heo extern void io_schedule(void);
2229cff8adeSNeilBrown 
2239af6528eSPeter Zijlstra void __noreturn do_task_dead(void);
2249af6528eSPeter Zijlstra 
225ab516013SSerge E. Hallyn struct nsproxy;
2261da177e4SLinus Torvalds 
227f06febc9SFrank Mayhar /**
2289d7fb042SPeter Zijlstra  * struct prev_cputime - snaphsot of system and user cputime
229d37f761dSFrederic Weisbecker  * @utime: time spent in user mode
230d37f761dSFrederic Weisbecker  * @stime: time spent in system mode
2319d7fb042SPeter Zijlstra  * @lock: protects the above two fields
232d37f761dSFrederic Weisbecker  *
2339d7fb042SPeter Zijlstra  * Stores previous user/system time values such that we can guarantee
2349d7fb042SPeter Zijlstra  * monotonicity.
235d37f761dSFrederic Weisbecker  */
2369d7fb042SPeter Zijlstra struct prev_cputime {
2379d7fb042SPeter Zijlstra #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
2385613fda9SFrederic Weisbecker 	u64 utime;
2395613fda9SFrederic Weisbecker 	u64 stime;
2409d7fb042SPeter Zijlstra 	raw_spinlock_t lock;
2419d7fb042SPeter Zijlstra #endif
242d37f761dSFrederic Weisbecker };
243d37f761dSFrederic Weisbecker 
2449d7fb042SPeter Zijlstra static inline void prev_cputime_init(struct prev_cputime *prev)
2459d7fb042SPeter Zijlstra {
2469d7fb042SPeter Zijlstra #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
2479d7fb042SPeter Zijlstra 	prev->utime = prev->stime = 0;
2489d7fb042SPeter Zijlstra 	raw_spin_lock_init(&prev->lock);
2499d7fb042SPeter Zijlstra #endif
2509d7fb042SPeter Zijlstra }
2519d7fb042SPeter Zijlstra 
252d37f761dSFrederic Weisbecker /**
253f06febc9SFrank Mayhar  * struct task_cputime - collected CPU time counts
2545613fda9SFrederic Weisbecker  * @utime:		time spent in user mode, in nanoseconds
2555613fda9SFrederic Weisbecker  * @stime:		time spent in kernel mode, in nanoseconds
256f06febc9SFrank Mayhar  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
257f06febc9SFrank Mayhar  *
2589d7fb042SPeter Zijlstra  * This structure groups together three kinds of CPU time that are tracked for
2599d7fb042SPeter Zijlstra  * threads and thread groups.  Most things considering CPU time want to group
2609d7fb042SPeter Zijlstra  * these counts together and treat all three of them in parallel.
261f06febc9SFrank Mayhar  */
262f06febc9SFrank Mayhar struct task_cputime {
2635613fda9SFrederic Weisbecker 	u64 utime;
2645613fda9SFrederic Weisbecker 	u64 stime;
265f06febc9SFrank Mayhar 	unsigned long long sum_exec_runtime;
266f06febc9SFrank Mayhar };
2679d7fb042SPeter Zijlstra 
268f06febc9SFrank Mayhar /* Alternate field names when used to cache expirations. */
269f06febc9SFrank Mayhar #define virt_exp	utime
2709d7fb042SPeter Zijlstra #define prof_exp	stime
271f06febc9SFrank Mayhar #define sched_exp	sum_exec_runtime
272f06febc9SFrank Mayhar 
273971e8a98SJason Low /*
274971e8a98SJason Low  * This is the atomic variant of task_cputime, which can be used for
275971e8a98SJason Low  * storing and updating task_cputime statistics without locking.
276971e8a98SJason Low  */
277971e8a98SJason Low struct task_cputime_atomic {
278971e8a98SJason Low 	atomic64_t utime;
279971e8a98SJason Low 	atomic64_t stime;
280971e8a98SJason Low 	atomic64_t sum_exec_runtime;
281971e8a98SJason Low };
282971e8a98SJason Low 
283971e8a98SJason Low #define INIT_CPUTIME_ATOMIC \
284971e8a98SJason Low 	(struct task_cputime_atomic) {				\
285971e8a98SJason Low 		.utime = ATOMIC64_INIT(0),			\
286971e8a98SJason Low 		.stime = ATOMIC64_INIT(0),			\
287971e8a98SJason Low 		.sum_exec_runtime = ATOMIC64_INIT(0),		\
288971e8a98SJason Low 	}
289971e8a98SJason Low 
290609ca066SPeter Zijlstra #define PREEMPT_DISABLED	(PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
291a233f112SPeter Zijlstra 
292c99e6efeSPeter Zijlstra /*
29387dcbc06SPeter Zijlstra  * Disable preemption until the scheduler is running -- use an unconditional
29487dcbc06SPeter Zijlstra  * value so that it also works on !PREEMPT_COUNT kernels.
295d86ee480SPeter Zijlstra  *
29687dcbc06SPeter Zijlstra  * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
297c99e6efeSPeter Zijlstra  */
29887dcbc06SPeter Zijlstra #define INIT_PREEMPT_COUNT	PREEMPT_OFFSET
299c99e6efeSPeter Zijlstra 
300609ca066SPeter Zijlstra /*
301609ca066SPeter Zijlstra  * Initial preempt_count value; reflects the preempt_count schedule invariant
302609ca066SPeter Zijlstra  * which states that during context switches:
303609ca066SPeter Zijlstra  *
304609ca066SPeter Zijlstra  *    preempt_count() == 2*PREEMPT_DISABLE_OFFSET
305609ca066SPeter Zijlstra  *
306609ca066SPeter Zijlstra  * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
307609ca066SPeter Zijlstra  * Note: See finish_task_switch().
308609ca066SPeter Zijlstra  */
309609ca066SPeter Zijlstra #define FORK_PREEMPT_COUNT	(2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
3104cd4c1b4SPeter Zijlstra 
311f06febc9SFrank Mayhar /**
312f06febc9SFrank Mayhar  * struct thread_group_cputimer - thread group interval timer counts
313920ce39fSJason Low  * @cputime_atomic:	atomic thread group interval timers.
314d5c373ebSJason Low  * @running:		true when there are timers running and
315d5c373ebSJason Low  *			@cputime_atomic receives updates.
316c8d75aa4SJason Low  * @checking_timer:	true when a thread in the group is in the
317c8d75aa4SJason Low  *			process of checking for thread group timers.
318f06febc9SFrank Mayhar  *
319f06febc9SFrank Mayhar  * This structure contains the version of task_cputime, above, that is
3204cd4c1b4SPeter Zijlstra  * used for thread group CPU timer calculations.
321f06febc9SFrank Mayhar  */
3224cd4c1b4SPeter Zijlstra struct thread_group_cputimer {
32371107445SJason Low 	struct task_cputime_atomic cputime_atomic;
324d5c373ebSJason Low 	bool running;
325c8d75aa4SJason Low 	bool checking_timer;
326f06febc9SFrank Mayhar };
327f06febc9SFrank Mayhar 
3284714d1d3SBen Blum #include <linux/rwsem.h>
3295091faa4SMike Galbraith struct autogroup;
3305091faa4SMike Galbraith 
3311da177e4SLinus Torvalds struct backing_dev_info;
3321da177e4SLinus Torvalds struct reclaim_state;
3331da177e4SLinus Torvalds 
334f6db8347SNaveen N. Rao #ifdef CONFIG_SCHED_INFO
3351da177e4SLinus Torvalds struct sched_info {
3361da177e4SLinus Torvalds 	/* cumulative counters */
3372d72376bSIngo Molnar 	unsigned long pcount;	      /* # of times run on this cpu */
3389c2c4802SKen Chen 	unsigned long long run_delay; /* time spent waiting on a runqueue */
3391da177e4SLinus Torvalds 
3401da177e4SLinus Torvalds 	/* timestamps */
341172ba844SBalbir Singh 	unsigned long long last_arrival,/* when we last ran on a cpu */
3421da177e4SLinus Torvalds 			   last_queued;	/* when we were last queued to run */
3431da177e4SLinus Torvalds };
344f6db8347SNaveen N. Rao #endif /* CONFIG_SCHED_INFO */
3451da177e4SLinus Torvalds 
34647913d4eSIngo Molnar struct task_delay_info;
34752f17b6cSChandra Seetharaman 
34852f17b6cSChandra Seetharaman static inline int sched_info_on(void)
34952f17b6cSChandra Seetharaman {
35052f17b6cSChandra Seetharaman #ifdef CONFIG_SCHEDSTATS
35152f17b6cSChandra Seetharaman 	return 1;
35252f17b6cSChandra Seetharaman #elif defined(CONFIG_TASK_DELAY_ACCT)
35352f17b6cSChandra Seetharaman 	extern int delayacct_on;
35452f17b6cSChandra Seetharaman 	return delayacct_on;
35552f17b6cSChandra Seetharaman #else
35652f17b6cSChandra Seetharaman 	return 0;
357ca74e92bSShailabh Nagar #endif
35852f17b6cSChandra Seetharaman }
359ca74e92bSShailabh Nagar 
360cb251765SMel Gorman #ifdef CONFIG_SCHEDSTATS
361cb251765SMel Gorman void force_schedstat_enabled(void);
362cb251765SMel Gorman #endif
363cb251765SMel Gorman 
3641da177e4SLinus Torvalds /*
3656ecdd749SYuyang Du  * Integer metrics need fixed point arithmetic, e.g., sched/fair
3666ecdd749SYuyang Du  * has a few: load, load_avg, util_avg, freq, and capacity.
3676ecdd749SYuyang Du  *
3686ecdd749SYuyang Du  * We define a basic fixed point arithmetic range, and then formalize
3696ecdd749SYuyang Du  * all these metrics based on that basic range.
3706ecdd749SYuyang Du  */
3716ecdd749SYuyang Du # define SCHED_FIXEDPOINT_SHIFT	10
3726ecdd749SYuyang Du # define SCHED_FIXEDPOINT_SCALE	(1L << SCHED_FIXEDPOINT_SHIFT)
3736ecdd749SYuyang Du 
3741da177e4SLinus Torvalds struct io_context;			/* See blkdev.h */
3751da177e4SLinus Torvalds 
3761da177e4SLinus Torvalds 
377383f2835SChen, Kenneth W #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
37836c8b586SIngo Molnar extern void prefetch_stack(struct task_struct *t);
379383f2835SChen, Kenneth W #else
380383f2835SChen, Kenneth W static inline void prefetch_stack(struct task_struct *t) { }
381383f2835SChen, Kenneth W #endif
3821da177e4SLinus Torvalds 
3831da177e4SLinus Torvalds struct audit_context;		/* See audit.c */
3841da177e4SLinus Torvalds struct mempolicy;
385b92ce558SJens Axboe struct pipe_inode_info;
3864865ecf1SSerge E. Hallyn struct uts_namespace;
3871da177e4SLinus Torvalds 
38820b8a59fSIngo Molnar struct load_weight {
3899dbdb155SPeter Zijlstra 	unsigned long weight;
3909dbdb155SPeter Zijlstra 	u32 inv_weight;
39120b8a59fSIngo Molnar };
39220b8a59fSIngo Molnar 
3939d89c257SYuyang Du /*
3947b595334SYuyang Du  * The load_avg/util_avg accumulates an infinite geometric series
3957b595334SYuyang Du  * (see __update_load_avg() in kernel/sched/fair.c).
3967b595334SYuyang Du  *
3977b595334SYuyang Du  * [load_avg definition]
3987b595334SYuyang Du  *
3997b595334SYuyang Du  *   load_avg = runnable% * scale_load_down(load)
4007b595334SYuyang Du  *
4017b595334SYuyang Du  * where runnable% is the time ratio that a sched_entity is runnable.
4027b595334SYuyang Du  * For cfs_rq, it is the aggregated load_avg of all runnable and
4039d89c257SYuyang Du  * blocked sched_entities.
4047b595334SYuyang Du  *
4057b595334SYuyang Du  * load_avg may also take frequency scaling into account:
4067b595334SYuyang Du  *
4077b595334SYuyang Du  *   load_avg = runnable% * scale_load_down(load) * freq%
4087b595334SYuyang Du  *
4097b595334SYuyang Du  * where freq% is the CPU frequency normalized to the highest frequency.
4107b595334SYuyang Du  *
4117b595334SYuyang Du  * [util_avg definition]
4127b595334SYuyang Du  *
4137b595334SYuyang Du  *   util_avg = running% * SCHED_CAPACITY_SCALE
4147b595334SYuyang Du  *
4157b595334SYuyang Du  * where running% is the time ratio that a sched_entity is running on
4167b595334SYuyang Du  * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
4177b595334SYuyang Du  * and blocked sched_entities.
4187b595334SYuyang Du  *
4197b595334SYuyang Du  * util_avg may also factor frequency scaling and CPU capacity scaling:
4207b595334SYuyang Du  *
4217b595334SYuyang Du  *   util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
4227b595334SYuyang Du  *
4237b595334SYuyang Du  * where freq% is the same as above, and capacity% is the CPU capacity
4247b595334SYuyang Du  * normalized to the greatest capacity (due to uarch differences, etc).
4257b595334SYuyang Du  *
4267b595334SYuyang Du  * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
4277b595334SYuyang Du  * themselves are in the range of [0, 1]. To do fixed point arithmetics,
4287b595334SYuyang Du  * we therefore scale them to as large a range as necessary. This is for
4297b595334SYuyang Du  * example reflected by util_avg's SCHED_CAPACITY_SCALE.
4307b595334SYuyang Du  *
4317b595334SYuyang Du  * [Overflow issue]
4327b595334SYuyang Du  *
4337b595334SYuyang Du  * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
4347b595334SYuyang Du  * with the highest load (=88761), always runnable on a single cfs_rq,
4357b595334SYuyang Du  * and should not overflow as the number already hits PID_MAX_LIMIT.
4367b595334SYuyang Du  *
4377b595334SYuyang Du  * For all other cases (including 32-bit kernels), struct load_weight's
4387b595334SYuyang Du  * weight will overflow first before we do, because:
4397b595334SYuyang Du  *
4407b595334SYuyang Du  *    Max(load_avg) <= Max(load.weight)
4417b595334SYuyang Du  *
4427b595334SYuyang Du  * Then it is the load_weight's responsibility to consider overflow
4437b595334SYuyang Du  * issues.
4449d89c257SYuyang Du  */
4459d85f21cSPaul Turner struct sched_avg {
4469d89c257SYuyang Du 	u64 last_update_time, load_sum;
4479d89c257SYuyang Du 	u32 util_sum, period_contrib;
4489d89c257SYuyang Du 	unsigned long load_avg, util_avg;
4499d85f21cSPaul Turner };
4509d85f21cSPaul Turner 
45194c18227SIngo Molnar #ifdef CONFIG_SCHEDSTATS
45241acab88SLucas De Marchi struct sched_statistics {
45394c18227SIngo Molnar 	u64			wait_start;
45494c18227SIngo Molnar 	u64			wait_max;
4556d082592SArjan van de Ven 	u64			wait_count;
4566d082592SArjan van de Ven 	u64			wait_sum;
4578f0dfc34SArjan van de Ven 	u64			iowait_count;
4588f0dfc34SArjan van de Ven 	u64			iowait_sum;
45994c18227SIngo Molnar 
46094c18227SIngo Molnar 	u64			sleep_start;
46120b8a59fSIngo Molnar 	u64			sleep_max;
46294c18227SIngo Molnar 	s64			sum_sleep_runtime;
46394c18227SIngo Molnar 
46494c18227SIngo Molnar 	u64			block_start;
46520b8a59fSIngo Molnar 	u64			block_max;
46620b8a59fSIngo Molnar 	u64			exec_max;
467eba1ed4bSIngo Molnar 	u64			slice_max;
468cc367732SIngo Molnar 
469cc367732SIngo Molnar 	u64			nr_migrations_cold;
470cc367732SIngo Molnar 	u64			nr_failed_migrations_affine;
471cc367732SIngo Molnar 	u64			nr_failed_migrations_running;
472cc367732SIngo Molnar 	u64			nr_failed_migrations_hot;
473cc367732SIngo Molnar 	u64			nr_forced_migrations;
474cc367732SIngo Molnar 
475cc367732SIngo Molnar 	u64			nr_wakeups;
476cc367732SIngo Molnar 	u64			nr_wakeups_sync;
477cc367732SIngo Molnar 	u64			nr_wakeups_migrate;
478cc367732SIngo Molnar 	u64			nr_wakeups_local;
479cc367732SIngo Molnar 	u64			nr_wakeups_remote;
480cc367732SIngo Molnar 	u64			nr_wakeups_affine;
481cc367732SIngo Molnar 	u64			nr_wakeups_affine_attempts;
482cc367732SIngo Molnar 	u64			nr_wakeups_passive;
483cc367732SIngo Molnar 	u64			nr_wakeups_idle;
48441acab88SLucas De Marchi };
48541acab88SLucas De Marchi #endif
48641acab88SLucas De Marchi 
48741acab88SLucas De Marchi struct sched_entity {
48841acab88SLucas De Marchi 	struct load_weight	load;		/* for load-balancing */
48941acab88SLucas De Marchi 	struct rb_node		run_node;
49041acab88SLucas De Marchi 	struct list_head	group_node;
49141acab88SLucas De Marchi 	unsigned int		on_rq;
49241acab88SLucas De Marchi 
49341acab88SLucas De Marchi 	u64			exec_start;
49441acab88SLucas De Marchi 	u64			sum_exec_runtime;
49541acab88SLucas De Marchi 	u64			vruntime;
49641acab88SLucas De Marchi 	u64			prev_sum_exec_runtime;
49741acab88SLucas De Marchi 
49841acab88SLucas De Marchi 	u64			nr_migrations;
49941acab88SLucas De Marchi 
50041acab88SLucas De Marchi #ifdef CONFIG_SCHEDSTATS
50141acab88SLucas De Marchi 	struct sched_statistics statistics;
50294c18227SIngo Molnar #endif
50394c18227SIngo Molnar 
50420b8a59fSIngo Molnar #ifdef CONFIG_FAIR_GROUP_SCHED
505fed14d45SPeter Zijlstra 	int			depth;
50620b8a59fSIngo Molnar 	struct sched_entity	*parent;
50720b8a59fSIngo Molnar 	/* rq on which this entity is (to be) queued: */
50820b8a59fSIngo Molnar 	struct cfs_rq		*cfs_rq;
50920b8a59fSIngo Molnar 	/* rq "owned" by this entity/group: */
51020b8a59fSIngo Molnar 	struct cfs_rq		*my_q;
51120b8a59fSIngo Molnar #endif
5128bd75c77SClark Williams 
513141965c7SAlex Shi #ifdef CONFIG_SMP
5145a107804SJiri Olsa 	/*
5155a107804SJiri Olsa 	 * Per entity load average tracking.
5165a107804SJiri Olsa 	 *
5175a107804SJiri Olsa 	 * Put into separate cache line so it does not
5185a107804SJiri Olsa 	 * collide with read-mostly values above.
5195a107804SJiri Olsa 	 */
5205a107804SJiri Olsa 	struct sched_avg	avg ____cacheline_aligned_in_smp;
5219d85f21cSPaul Turner #endif
52220b8a59fSIngo Molnar };
52370b97a7fSIngo Molnar 
524fa717060SPeter Zijlstra struct sched_rt_entity {
525fa717060SPeter Zijlstra 	struct list_head run_list;
52678f2c7dbSPeter Zijlstra 	unsigned long timeout;
52757d2aa00SYing Xue 	unsigned long watchdog_stamp;
528bee367edSRichard Kennedy 	unsigned int time_slice;
529ff77e468SPeter Zijlstra 	unsigned short on_rq;
530ff77e468SPeter Zijlstra 	unsigned short on_list;
5316f505b16SPeter Zijlstra 
53258d6c2d7SPeter Zijlstra 	struct sched_rt_entity *back;
533052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
5346f505b16SPeter Zijlstra 	struct sched_rt_entity	*parent;
5356f505b16SPeter Zijlstra 	/* rq on which this entity is (to be) queued: */
5366f505b16SPeter Zijlstra 	struct rt_rq		*rt_rq;
5376f505b16SPeter Zijlstra 	/* rq "owned" by this entity/group: */
5386f505b16SPeter Zijlstra 	struct rt_rq		*my_q;
5396f505b16SPeter Zijlstra #endif
540fa717060SPeter Zijlstra };
541fa717060SPeter Zijlstra 
542aab03e05SDario Faggioli struct sched_dl_entity {
543aab03e05SDario Faggioli 	struct rb_node	rb_node;
544aab03e05SDario Faggioli 
545aab03e05SDario Faggioli 	/*
546aab03e05SDario Faggioli 	 * Original scheduling parameters. Copied here from sched_attr
5474027d080Sxiaofeng.yan 	 * during sched_setattr(), they will remain the same until
5484027d080Sxiaofeng.yan 	 * the next sched_setattr().
549aab03e05SDario Faggioli 	 */
550aab03e05SDario Faggioli 	u64 dl_runtime;		/* maximum runtime for each instance	*/
551aab03e05SDario Faggioli 	u64 dl_deadline;	/* relative deadline of each instance	*/
552755378a4SHarald Gustafsson 	u64 dl_period;		/* separation of two instances (period) */
553332ac17eSDario Faggioli 	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
554aab03e05SDario Faggioli 
555aab03e05SDario Faggioli 	/*
556aab03e05SDario Faggioli 	 * Actual scheduling parameters. Initialized with the values above,
557aab03e05SDario Faggioli 	 * they are continously updated during task execution. Note that
558aab03e05SDario Faggioli 	 * the remaining runtime could be < 0 in case we are in overrun.
559aab03e05SDario Faggioli 	 */
560aab03e05SDario Faggioli 	s64 runtime;		/* remaining runtime for this instance	*/
561aab03e05SDario Faggioli 	u64 deadline;		/* absolute deadline for this instance	*/
562aab03e05SDario Faggioli 	unsigned int flags;	/* specifying the scheduler behaviour	*/
563aab03e05SDario Faggioli 
564aab03e05SDario Faggioli 	/*
565aab03e05SDario Faggioli 	 * Some bool flags:
566aab03e05SDario Faggioli 	 *
567aab03e05SDario Faggioli 	 * @dl_throttled tells if we exhausted the runtime. If so, the
568aab03e05SDario Faggioli 	 * task has to wait for a replenishment to be performed at the
569aab03e05SDario Faggioli 	 * next firing of dl_timer.
570aab03e05SDario Faggioli 	 *
5712d3d891dSDario Faggioli 	 * @dl_boosted tells if we are boosted due to DI. If so we are
5722d3d891dSDario Faggioli 	 * outside bandwidth enforcement mechanism (but only until we
5735bfd126eSJuri Lelli 	 * exit the critical section);
5745bfd126eSJuri Lelli 	 *
5755bfd126eSJuri Lelli 	 * @dl_yielded tells if task gave up the cpu before consuming
5765bfd126eSJuri Lelli 	 * all its available runtime during the last job.
577aab03e05SDario Faggioli 	 */
57872f9f3fdSLuca Abeni 	int dl_throttled, dl_boosted, dl_yielded;
579aab03e05SDario Faggioli 
580aab03e05SDario Faggioli 	/*
581aab03e05SDario Faggioli 	 * Bandwidth enforcement timer. Each -deadline task has its
582aab03e05SDario Faggioli 	 * own bandwidth to be enforced, thus we need one timer per task.
583aab03e05SDario Faggioli 	 */
584aab03e05SDario Faggioli 	struct hrtimer dl_timer;
585aab03e05SDario Faggioli };
5868bd75c77SClark Williams 
5871d082fd0SPaul E. McKenney union rcu_special {
5881d082fd0SPaul E. McKenney 	struct {
5898203d6d0SPaul E. McKenney 		u8 blocked;
5908203d6d0SPaul E. McKenney 		u8 need_qs;
5918203d6d0SPaul E. McKenney 		u8 exp_need_qs;
5928203d6d0SPaul E. McKenney 		u8 pad;	/* Otherwise the compiler can store garbage here. */
5938203d6d0SPaul E. McKenney 	} b; /* Bits. */
5948203d6d0SPaul E. McKenney 	u32 s; /* Set of bits. */
5951d082fd0SPaul E. McKenney };
59686848966SPaul E. McKenney struct rcu_node;
59786848966SPaul E. McKenney 
5988dc85d54SPeter Zijlstra enum perf_event_task_context {
5998dc85d54SPeter Zijlstra 	perf_invalid_context = -1,
6008dc85d54SPeter Zijlstra 	perf_hw_context = 0,
60189a1e187SPeter Zijlstra 	perf_sw_context,
6028dc85d54SPeter Zijlstra 	perf_nr_task_contexts,
6038dc85d54SPeter Zijlstra };
6048dc85d54SPeter Zijlstra 
605eb61baf6SIngo Molnar struct wake_q_node {
606eb61baf6SIngo Molnar 	struct wake_q_node *next;
607eb61baf6SIngo Molnar };
608eb61baf6SIngo Molnar 
60972b252aeSMel Gorman /* Track pages that require TLB flushes */
61072b252aeSMel Gorman struct tlbflush_unmap_batch {
61172b252aeSMel Gorman 	/*
61272b252aeSMel Gorman 	 * Each bit set is a CPU that potentially has a TLB entry for one of
61372b252aeSMel Gorman 	 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
61472b252aeSMel Gorman 	 */
61572b252aeSMel Gorman 	struct cpumask cpumask;
61672b252aeSMel Gorman 
61772b252aeSMel Gorman 	/* True if any bit in cpumask is set */
61872b252aeSMel Gorman 	bool flush_required;
619d950c947SMel Gorman 
620d950c947SMel Gorman 	/*
621d950c947SMel Gorman 	 * If true then the PTE was dirty when unmapped. The entry must be
622d950c947SMel Gorman 	 * flushed before IO is initiated or a stale TLB entry potentially
623d950c947SMel Gorman 	 * allows an update without redirtying the page.
624d950c947SMel Gorman 	 */
625d950c947SMel Gorman 	bool writable;
62672b252aeSMel Gorman };
62772b252aeSMel Gorman 
6281da177e4SLinus Torvalds struct task_struct {
629c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
630c65eacbeSAndy Lutomirski 	/*
631c65eacbeSAndy Lutomirski 	 * For reasons of header soup (see current_thread_info()), this
632c65eacbeSAndy Lutomirski 	 * must be the first element of task_struct.
633c65eacbeSAndy Lutomirski 	 */
634c65eacbeSAndy Lutomirski 	struct thread_info thread_info;
635c65eacbeSAndy Lutomirski #endif
6361da177e4SLinus Torvalds 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
637f7e4217bSRoman Zippel 	void *stack;
6381da177e4SLinus Torvalds 	atomic_t usage;
63997dc32cdSWilliam Cohen 	unsigned int flags;	/* per process flags, defined below */
64097dc32cdSWilliam Cohen 	unsigned int ptrace;
6411da177e4SLinus Torvalds 
6422dd73a4fSPeter Williams #ifdef CONFIG_SMP
643fa14ff4aSPeter Zijlstra 	struct llist_node wake_entry;
6443ca7a440SPeter Zijlstra 	int on_cpu;
645c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
646c65eacbeSAndy Lutomirski 	unsigned int cpu;	/* current CPU */
647c65eacbeSAndy Lutomirski #endif
64863b0e9edSMike Galbraith 	unsigned int wakee_flips;
64962470419SMichael Wang 	unsigned long wakee_flip_decay_ts;
65063b0e9edSMike Galbraith 	struct task_struct *last_wakee;
651ac66f547SPeter Zijlstra 
652ac66f547SPeter Zijlstra 	int wake_cpu;
6534866cde0SNick Piggin #endif
654fd2f4419SPeter Zijlstra 	int on_rq;
65550e645a8SIngo Molnar 
656b29739f9SIngo Molnar 	int prio, static_prio, normal_prio;
657c7aceabaSRichard Kennedy 	unsigned int rt_priority;
6585522d5d5SIngo Molnar 	const struct sched_class *sched_class;
65920b8a59fSIngo Molnar 	struct sched_entity se;
660fa717060SPeter Zijlstra 	struct sched_rt_entity rt;
6618323f26cSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
6628323f26cSPeter Zijlstra 	struct task_group *sched_task_group;
6638323f26cSPeter Zijlstra #endif
664aab03e05SDario Faggioli 	struct sched_dl_entity dl;
6651da177e4SLinus Torvalds 
666e107be36SAvi Kivity #ifdef CONFIG_PREEMPT_NOTIFIERS
667e107be36SAvi Kivity 	/* list of struct preempt_notifier: */
668e107be36SAvi Kivity 	struct hlist_head preempt_notifiers;
669e107be36SAvi Kivity #endif
670e107be36SAvi Kivity 
6716c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE
6722056a782SJens Axboe 	unsigned int btrace_seq;
6736c5c9341SAlexey Dobriyan #endif
6741da177e4SLinus Torvalds 
67597dc32cdSWilliam Cohen 	unsigned int policy;
67629baa747SPeter Zijlstra 	int nr_cpus_allowed;
6771da177e4SLinus Torvalds 	cpumask_t cpus_allowed;
6781da177e4SLinus Torvalds 
679a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
680e260be67SPaul E. McKenney 	int rcu_read_lock_nesting;
6811d082fd0SPaul E. McKenney 	union rcu_special rcu_read_unlock_special;
682f41d911fSPaul E. McKenney 	struct list_head rcu_node_entry;
683a57eb940SPaul E. McKenney 	struct rcu_node *rcu_blocked_node;
68428f6569aSPranith Kumar #endif /* #ifdef CONFIG_PREEMPT_RCU */
6858315f422SPaul E. McKenney #ifdef CONFIG_TASKS_RCU
6868315f422SPaul E. McKenney 	unsigned long rcu_tasks_nvcsw;
6878315f422SPaul E. McKenney 	bool rcu_tasks_holdout;
6888315f422SPaul E. McKenney 	struct list_head rcu_tasks_holdout_list;
689176f8f7aSPaul E. McKenney 	int rcu_tasks_idle_cpu;
6908315f422SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_RCU */
691e260be67SPaul E. McKenney 
692f6db8347SNaveen N. Rao #ifdef CONFIG_SCHED_INFO
6931da177e4SLinus Torvalds 	struct sched_info sched_info;
6941da177e4SLinus Torvalds #endif
6951da177e4SLinus Torvalds 
6961da177e4SLinus Torvalds 	struct list_head tasks;
697806c09a7SDario Faggioli #ifdef CONFIG_SMP
698917b627dSGregory Haskins 	struct plist_node pushable_tasks;
6991baca4ceSJuri Lelli 	struct rb_node pushable_dl_tasks;
700806c09a7SDario Faggioli #endif
7011da177e4SLinus Torvalds 
7021da177e4SLinus Torvalds 	struct mm_struct *mm, *active_mm;
703314ff785SIngo Molnar 
704314ff785SIngo Molnar 	/* Per-thread vma caching: */
705314ff785SIngo Molnar 	struct vmacache vmacache;
706314ff785SIngo Molnar 
70734e55232SKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING)
70834e55232SKAMEZAWA Hiroyuki 	struct task_rss_stat	rss_stat;
70934e55232SKAMEZAWA Hiroyuki #endif
7101da177e4SLinus Torvalds /* task state */
71197dc32cdSWilliam Cohen 	int exit_state;
7121da177e4SLinus Torvalds 	int exit_code, exit_signal;
7131da177e4SLinus Torvalds 	int pdeath_signal;  /*  The signal sent when the parent dies  */
714e7cc4173SPalmer Dabbelt 	unsigned long jobctl;	/* JOBCTL_*, siglock protected */
7159b89f6baSAndrei Epure 
7169b89f6baSAndrei Epure 	/* Used for emulating ABI behavior of previous Linux versions */
71797dc32cdSWilliam Cohen 	unsigned int personality;
7189b89f6baSAndrei Epure 
719be958bdcSPeter Zijlstra 	/* scheduler bits, serialized by scheduler locks */
720ca94c442SLennart Poettering 	unsigned sched_reset_on_fork:1;
721a8e4f2eaSPeter Zijlstra 	unsigned sched_contributes_to_load:1;
722ff303e66SPeter Zijlstra 	unsigned sched_migrated:1;
723b7e7ade3SPeter Zijlstra 	unsigned sched_remote_wakeup:1;
724be958bdcSPeter Zijlstra 	unsigned :0; /* force alignment to the next boundary */
725be958bdcSPeter Zijlstra 
726be958bdcSPeter Zijlstra 	/* unserialized, strictly 'current' */
727be958bdcSPeter Zijlstra 	unsigned in_execve:1; /* bit to tell LSMs we're in execve */
728be958bdcSPeter Zijlstra 	unsigned in_iowait:1;
7297e781418SAndy Lutomirski #if !defined(TIF_RESTORE_SIGMASK)
7307e781418SAndy Lutomirski 	unsigned restore_sigmask:1;
7317e781418SAndy Lutomirski #endif
732626ebc41STejun Heo #ifdef CONFIG_MEMCG
733626ebc41STejun Heo 	unsigned memcg_may_oom:1;
734127424c8SJohannes Weiner #ifndef CONFIG_SLOB
7356f185c29SVladimir Davydov 	unsigned memcg_kmem_skip_account:1;
7366f185c29SVladimir Davydov #endif
737127424c8SJohannes Weiner #endif
738ff303e66SPeter Zijlstra #ifdef CONFIG_COMPAT_BRK
739ff303e66SPeter Zijlstra 	unsigned brk_randomized:1;
740ff303e66SPeter Zijlstra #endif
7416f185c29SVladimir Davydov 
7421d4457f9SKees Cook 	unsigned long atomic_flags; /* Flags needing atomic access. */
7431d4457f9SKees Cook 
744f56141e3SAndy Lutomirski 	struct restart_block restart_block;
745f56141e3SAndy Lutomirski 
7461da177e4SLinus Torvalds 	pid_t pid;
7471da177e4SLinus Torvalds 	pid_t tgid;
7480a425405SArjan van de Ven 
7491314562aSHiroshi Shimamoto #ifdef CONFIG_CC_STACKPROTECTOR
7500a425405SArjan van de Ven 	/* Canary value for the -fstack-protector gcc feature */
7510a425405SArjan van de Ven 	unsigned long stack_canary;
7521314562aSHiroshi Shimamoto #endif
7531da177e4SLinus Torvalds 	/*
7541da177e4SLinus Torvalds 	 * pointers to (original) parent process, youngest child, younger sibling,
7551da177e4SLinus Torvalds 	 * older sibling, respectively.  (p->father can be replaced with
756f470021aSRoland McGrath 	 * p->real_parent->pid)
7571da177e4SLinus Torvalds 	 */
758abd63bc3SKees Cook 	struct task_struct __rcu *real_parent; /* real parent process */
759abd63bc3SKees Cook 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
7601da177e4SLinus Torvalds 	/*
761f470021aSRoland McGrath 	 * children/sibling forms the list of my natural children
7621da177e4SLinus Torvalds 	 */
7631da177e4SLinus Torvalds 	struct list_head children;	/* list of my children */
7641da177e4SLinus Torvalds 	struct list_head sibling;	/* linkage in my parent's children list */
7651da177e4SLinus Torvalds 	struct task_struct *group_leader;	/* threadgroup leader */
7661da177e4SLinus Torvalds 
767f470021aSRoland McGrath 	/*
768f470021aSRoland McGrath 	 * ptraced is the list of tasks this task is using ptrace on.
769f470021aSRoland McGrath 	 * This includes both natural children and PTRACE_ATTACH targets.
770f470021aSRoland McGrath 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
771f470021aSRoland McGrath 	 */
772f470021aSRoland McGrath 	struct list_head ptraced;
773f470021aSRoland McGrath 	struct list_head ptrace_entry;
774f470021aSRoland McGrath 
7751da177e4SLinus Torvalds 	/* PID/PID hash table linkage. */
77692476d7fSEric W. Biederman 	struct pid_link pids[PIDTYPE_MAX];
77747e65328SOleg Nesterov 	struct list_head thread_group;
7780c740d0aSOleg Nesterov 	struct list_head thread_node;
7791da177e4SLinus Torvalds 
7801da177e4SLinus Torvalds 	struct completion *vfork_done;		/* for vfork() */
7811da177e4SLinus Torvalds 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
7821da177e4SLinus Torvalds 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
7831da177e4SLinus Torvalds 
7845613fda9SFrederic Weisbecker 	u64 utime, stime;
78540565b5aSStanislaw Gruszka #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
7865613fda9SFrederic Weisbecker 	u64 utimescaled, stimescaled;
78740565b5aSStanislaw Gruszka #endif
78816a6d9beSFrederic Weisbecker 	u64 gtime;
7899d7fb042SPeter Zijlstra 	struct prev_cputime prev_cputime;
7906a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
791b7ce2277SFrederic Weisbecker 	seqcount_t vtime_seqcount;
7926a61671bSFrederic Weisbecker 	unsigned long long vtime_snap;
7936a61671bSFrederic Weisbecker 	enum {
7947098c1eaSFrederic Weisbecker 		/* Task is sleeping or running in a CPU with VTIME inactive */
7957098c1eaSFrederic Weisbecker 		VTIME_INACTIVE = 0,
7967098c1eaSFrederic Weisbecker 		/* Task runs in userspace in a CPU with VTIME active */
7976a61671bSFrederic Weisbecker 		VTIME_USER,
7987098c1eaSFrederic Weisbecker 		/* Task runs in kernelspace in a CPU with VTIME active */
7996a61671bSFrederic Weisbecker 		VTIME_SYS,
8006a61671bSFrederic Weisbecker 	} vtime_snap_whence;
8016a61671bSFrederic Weisbecker #endif
802d027d45dSFrederic Weisbecker 
803d027d45dSFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL
804f009a7a7SFrederic Weisbecker 	atomic_t tick_dep_mask;
805d027d45dSFrederic Weisbecker #endif
8061da177e4SLinus Torvalds 	unsigned long nvcsw, nivcsw; /* context switch counts */
807ccbf62d8SThomas Gleixner 	u64 start_time;		/* monotonic time in nsec */
80857e0be04SThomas Gleixner 	u64 real_start_time;	/* boot based time in nsec */
8091da177e4SLinus Torvalds /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
8101da177e4SLinus Torvalds 	unsigned long min_flt, maj_flt;
8111da177e4SLinus Torvalds 
812b18b6a9cSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS
813f06febc9SFrank Mayhar 	struct task_cputime cputime_expires;
8141da177e4SLinus Torvalds 	struct list_head cpu_timers[3];
815b18b6a9cSNicolas Pitre #endif
8161da177e4SLinus Torvalds 
8171da177e4SLinus Torvalds /* process credentials */
81864b875f7SEric W. Biederman 	const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
8191b0ba1c9SArnd Bergmann 	const struct cred __rcu *real_cred; /* objective and real subjective task
8203b11a1deSDavid Howells 					 * credentials (COW) */
8211b0ba1c9SArnd Bergmann 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
8223b11a1deSDavid Howells 					 * credentials (COW) */
82336772092SPaolo 'Blaisorblade' Giarrusso 	char comm[TASK_COMM_LEN]; /* executable name excluding path
82436772092SPaolo 'Blaisorblade' Giarrusso 				     - access with [gs]et_task_comm (which lock
82536772092SPaolo 'Blaisorblade' Giarrusso 				       it with task_lock())
826221af7f8SLinus Torvalds 				     - initialized normally by setup_new_exec */
8271da177e4SLinus Torvalds /* file system info */
828756daf26SNeilBrown 	struct nameidata *nameidata;
8293d5b6fccSAlexey Dobriyan #ifdef CONFIG_SYSVIPC
8301da177e4SLinus Torvalds /* ipc stuff */
8311da177e4SLinus Torvalds 	struct sysv_sem sysvsem;
832ab602f79SJack Miller 	struct sysv_shm sysvshm;
8333d5b6fccSAlexey Dobriyan #endif
834e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK
83582a1fcb9SIngo Molnar /* hung task detection */
83682a1fcb9SIngo Molnar 	unsigned long last_switch_count;
83782a1fcb9SIngo Molnar #endif
8381da177e4SLinus Torvalds /* filesystem information */
8391da177e4SLinus Torvalds 	struct fs_struct *fs;
8401da177e4SLinus Torvalds /* open file information */
8411da177e4SLinus Torvalds 	struct files_struct *files;
8421651e14eSSerge E. Hallyn /* namespaces */
843ab516013SSerge E. Hallyn 	struct nsproxy *nsproxy;
8441da177e4SLinus Torvalds /* signal handlers */
8451da177e4SLinus Torvalds 	struct signal_struct *signal;
8461da177e4SLinus Torvalds 	struct sighand_struct *sighand;
8471da177e4SLinus Torvalds 
8481da177e4SLinus Torvalds 	sigset_t blocked, real_blocked;
849f3de272bSRoland McGrath 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
8501da177e4SLinus Torvalds 	struct sigpending pending;
8511da177e4SLinus Torvalds 
8521da177e4SLinus Torvalds 	unsigned long sas_ss_sp;
8531da177e4SLinus Torvalds 	size_t sas_ss_size;
8542a742138SStas Sergeev 	unsigned sas_ss_flags;
8552e01fabeSOleg Nesterov 
85667d12145SAl Viro 	struct callback_head *task_works;
857e73f8959SOleg Nesterov 
8581da177e4SLinus Torvalds 	struct audit_context *audit_context;
859bfef93a5SAl Viro #ifdef CONFIG_AUDITSYSCALL
860e1760bd5SEric W. Biederman 	kuid_t loginuid;
8614746ec5bSEric Paris 	unsigned int sessionid;
862bfef93a5SAl Viro #endif
863932ecebbSWill Drewry 	struct seccomp seccomp;
8641da177e4SLinus Torvalds 
8651da177e4SLinus Torvalds /* Thread group tracking */
8661da177e4SLinus Torvalds    	u32 parent_exec_id;
8671da177e4SLinus Torvalds    	u32 self_exec_id;
86858568d2aSMiao Xie /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
86958568d2aSMiao Xie  * mempolicy */
8701da177e4SLinus Torvalds 	spinlock_t alloc_lock;
8711da177e4SLinus Torvalds 
872b29739f9SIngo Molnar 	/* Protection of the PI data structures: */
8731d615482SThomas Gleixner 	raw_spinlock_t pi_lock;
874b29739f9SIngo Molnar 
87576751049SPeter Zijlstra 	struct wake_q_node wake_q;
87676751049SPeter Zijlstra 
87723f78d4aSIngo Molnar #ifdef CONFIG_RT_MUTEXES
87823f78d4aSIngo Molnar 	/* PI waiters blocked on a rt_mutex held by this task */
879fb00aca4SPeter Zijlstra 	struct rb_root pi_waiters;
880fb00aca4SPeter Zijlstra 	struct rb_node *pi_waiters_leftmost;
88123f78d4aSIngo Molnar 	/* Deadlock detection and priority inheritance handling */
88223f78d4aSIngo Molnar 	struct rt_mutex_waiter *pi_blocked_on;
88323f78d4aSIngo Molnar #endif
88423f78d4aSIngo Molnar 
885408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES
886408894eeSIngo Molnar 	/* mutex deadlock detection */
887408894eeSIngo Molnar 	struct mutex_waiter *blocked_on;
888408894eeSIngo Molnar #endif
889de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS
890de30a2b3SIngo Molnar 	unsigned int irq_events;
891de30a2b3SIngo Molnar 	unsigned long hardirq_enable_ip;
892de30a2b3SIngo Molnar 	unsigned long hardirq_disable_ip;
893fa1452e8SHiroshi Shimamoto 	unsigned int hardirq_enable_event;
894de30a2b3SIngo Molnar 	unsigned int hardirq_disable_event;
895fa1452e8SHiroshi Shimamoto 	int hardirqs_enabled;
896de30a2b3SIngo Molnar 	int hardirq_context;
897fa1452e8SHiroshi Shimamoto 	unsigned long softirq_disable_ip;
898fa1452e8SHiroshi Shimamoto 	unsigned long softirq_enable_ip;
899fa1452e8SHiroshi Shimamoto 	unsigned int softirq_disable_event;
900fa1452e8SHiroshi Shimamoto 	unsigned int softirq_enable_event;
901fa1452e8SHiroshi Shimamoto 	int softirqs_enabled;
902de30a2b3SIngo Molnar 	int softirq_context;
903de30a2b3SIngo Molnar #endif
904fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP
905bdb9441eSPeter Zijlstra # define MAX_LOCK_DEPTH 48UL
906fbb9ce95SIngo Molnar 	u64 curr_chain_key;
907fbb9ce95SIngo Molnar 	int lockdep_depth;
908fbb9ce95SIngo Molnar 	unsigned int lockdep_recursion;
909c7aceabaSRichard Kennedy 	struct held_lock held_locks[MAX_LOCK_DEPTH];
910cf40bd16SNick Piggin 	gfp_t lockdep_reclaim_gfp;
911fbb9ce95SIngo Molnar #endif
912c6d30853SAndrey Ryabinin #ifdef CONFIG_UBSAN
913c6d30853SAndrey Ryabinin 	unsigned int in_ubsan;
914c6d30853SAndrey Ryabinin #endif
915408894eeSIngo Molnar 
9161da177e4SLinus Torvalds /* journalling filesystem info */
9171da177e4SLinus Torvalds 	void *journal_info;
9181da177e4SLinus Torvalds 
919d89d8796SNeil Brown /* stacked block device info */
920bddd87c7SAkinobu Mita 	struct bio_list *bio_list;
921d89d8796SNeil Brown 
92273c10101SJens Axboe #ifdef CONFIG_BLOCK
92373c10101SJens Axboe /* stack plugging */
92473c10101SJens Axboe 	struct blk_plug *plug;
92573c10101SJens Axboe #endif
92673c10101SJens Axboe 
9271da177e4SLinus Torvalds /* VM state */
9281da177e4SLinus Torvalds 	struct reclaim_state *reclaim_state;
9291da177e4SLinus Torvalds 
9301da177e4SLinus Torvalds 	struct backing_dev_info *backing_dev_info;
9311da177e4SLinus Torvalds 
9321da177e4SLinus Torvalds 	struct io_context *io_context;
9331da177e4SLinus Torvalds 
9341da177e4SLinus Torvalds 	unsigned long ptrace_message;
9351da177e4SLinus Torvalds 	siginfo_t *last_siginfo; /* For ptrace use.  */
9367c3ab738SAndrew Morton 	struct task_io_accounting ioac;
9378f0ab514SJay Lan #if defined(CONFIG_TASK_XACCT)
9381da177e4SLinus Torvalds 	u64 acct_rss_mem1;	/* accumulated rss usage */
9391da177e4SLinus Torvalds 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
940605dc2b3SFrederic Weisbecker 	u64 acct_timexpd;	/* stime + utime since last update */
9411da177e4SLinus Torvalds #endif
9421da177e4SLinus Torvalds #ifdef CONFIG_CPUSETS
94358568d2aSMiao Xie 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
944cc9a6c87SMel Gorman 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
945825a46afSPaul Jackson 	int cpuset_mem_spread_rotor;
9466adef3ebSJack Steiner 	int cpuset_slab_spread_rotor;
9471da177e4SLinus Torvalds #endif
948ddbcc7e8SPaul Menage #ifdef CONFIG_CGROUPS
949817929ecSPaul Menage 	/* Control Group info protected by css_set_lock */
9502c392b8cSArnd Bergmann 	struct css_set __rcu *cgroups;
951817929ecSPaul Menage 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
952817929ecSPaul Menage 	struct list_head cg_list;
953ddbcc7e8SPaul Menage #endif
954e02737d5SFenghua Yu #ifdef CONFIG_INTEL_RDT_A
955e02737d5SFenghua Yu 	int closid;
956e02737d5SFenghua Yu #endif
95742b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX
9580771dfefSIngo Molnar 	struct robust_list_head __user *robust_list;
95934f192c6SIngo Molnar #ifdef CONFIG_COMPAT
96034f192c6SIngo Molnar 	struct compat_robust_list_head __user *compat_robust_list;
96134f192c6SIngo Molnar #endif
962c87e2837SIngo Molnar 	struct list_head pi_state_list;
963c87e2837SIngo Molnar 	struct futex_pi_state *pi_state_cache;
96442b2dd0aSAlexey Dobriyan #endif
965cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
9668dc85d54SPeter Zijlstra 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
967cdd6c482SIngo Molnar 	struct mutex perf_event_mutex;
968cdd6c482SIngo Molnar 	struct list_head perf_event_list;
969a63eaf34SPaul Mackerras #endif
9708f47b187SThomas Gleixner #ifdef CONFIG_DEBUG_PREEMPT
9718f47b187SThomas Gleixner 	unsigned long preempt_disable_ip;
9728f47b187SThomas Gleixner #endif
973c7aceabaSRichard Kennedy #ifdef CONFIG_NUMA
97458568d2aSMiao Xie 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
975c7aceabaSRichard Kennedy 	short il_next;
976207205a2SEric Dumazet 	short pref_node_fork;
977c7aceabaSRichard Kennedy #endif
978cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
979cbee9f88SPeter Zijlstra 	int numa_scan_seq;
980cbee9f88SPeter Zijlstra 	unsigned int numa_scan_period;
981598f0ec0SMel Gorman 	unsigned int numa_scan_period_max;
982de1c9ce6SRik van Riel 	int numa_preferred_nid;
9836b9a7460SMel Gorman 	unsigned long numa_migrate_retry;
984cbee9f88SPeter Zijlstra 	u64 node_stamp;			/* migration stamp  */
9857e2703e6SRik van Riel 	u64 last_task_numa_placement;
9867e2703e6SRik van Riel 	u64 last_sum_exec_runtime;
987cbee9f88SPeter Zijlstra 	struct callback_head numa_work;
988f809ca9aSMel Gorman 
9898c8a743cSPeter Zijlstra 	struct list_head numa_entry;
9908c8a743cSPeter Zijlstra 	struct numa_group *numa_group;
9918c8a743cSPeter Zijlstra 
992745d6147SMel Gorman 	/*
99344dba3d5SIulia Manda 	 * numa_faults is an array split into four regions:
99444dba3d5SIulia Manda 	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
99544dba3d5SIulia Manda 	 * in this precise order.
99644dba3d5SIulia Manda 	 *
99744dba3d5SIulia Manda 	 * faults_memory: Exponential decaying average of faults on a per-node
99844dba3d5SIulia Manda 	 * basis. Scheduling placement decisions are made based on these
99944dba3d5SIulia Manda 	 * counts. The values remain static for the duration of a PTE scan.
100044dba3d5SIulia Manda 	 * faults_cpu: Track the nodes the process was running on when a NUMA
100144dba3d5SIulia Manda 	 * hinting fault was incurred.
100244dba3d5SIulia Manda 	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
100344dba3d5SIulia Manda 	 * during the current scan window. When the scan completes, the counts
100444dba3d5SIulia Manda 	 * in faults_memory and faults_cpu decay and these values are copied.
1005745d6147SMel Gorman 	 */
100644dba3d5SIulia Manda 	unsigned long *numa_faults;
100783e1d2cdSMel Gorman 	unsigned long total_numa_faults;
1008745d6147SMel Gorman 
1009745d6147SMel Gorman 	/*
101004bb2f94SRik van Riel 	 * numa_faults_locality tracks if faults recorded during the last
1011074c2381SMel Gorman 	 * scan window were remote/local or failed to migrate. The task scan
1012074c2381SMel Gorman 	 * period is adapted based on the locality of the faults with different
1013074c2381SMel Gorman 	 * weights depending on whether they were shared or private faults
101404bb2f94SRik van Riel 	 */
1015074c2381SMel Gorman 	unsigned long numa_faults_locality[3];
101604bb2f94SRik van Riel 
1017b32e86b4SIngo Molnar 	unsigned long numa_pages_migrated;
1018cbee9f88SPeter Zijlstra #endif /* CONFIG_NUMA_BALANCING */
1019cbee9f88SPeter Zijlstra 
102072b252aeSMel Gorman #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
102172b252aeSMel Gorman 	struct tlbflush_unmap_batch tlb_ubc;
102272b252aeSMel Gorman #endif
102372b252aeSMel Gorman 
1024e56d0903SIngo Molnar 	struct rcu_head rcu;
1025b92ce558SJens Axboe 
1026b92ce558SJens Axboe 	/*
1027b92ce558SJens Axboe 	 * cache last used pipe for splice
1028b92ce558SJens Axboe 	 */
1029b92ce558SJens Axboe 	struct pipe_inode_info *splice_pipe;
10305640f768SEric Dumazet 
10315640f768SEric Dumazet 	struct page_frag task_frag;
10325640f768SEric Dumazet 
1033ca74e92bSShailabh Nagar #ifdef CONFIG_TASK_DELAY_ACCT
1034ca74e92bSShailabh Nagar 	struct task_delay_info		*delays;
1035ca74e92bSShailabh Nagar #endif
103647913d4eSIngo Molnar 
1037f4f154fdSAkinobu Mita #ifdef CONFIG_FAULT_INJECTION
1038f4f154fdSAkinobu Mita 	int make_it_fail;
1039f4f154fdSAkinobu Mita #endif
10409d823e8fSWu Fengguang 	/*
10419d823e8fSWu Fengguang 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
10429d823e8fSWu Fengguang 	 * balance_dirty_pages() for some dirty throttling pause
10439d823e8fSWu Fengguang 	 */
10449d823e8fSWu Fengguang 	int nr_dirtied;
10459d823e8fSWu Fengguang 	int nr_dirtied_pause;
104683712358SWu Fengguang 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
10479d823e8fSWu Fengguang 
10489745512cSArjan van de Ven #ifdef CONFIG_LATENCYTOP
10499745512cSArjan van de Ven 	int latency_record_count;
10509745512cSArjan van de Ven 	struct latency_record latency_record[LT_SAVECOUNT];
10519745512cSArjan van de Ven #endif
10526976675dSArjan van de Ven 	/*
10536976675dSArjan van de Ven 	 * time slack values; these are used to round up poll() and
10546976675dSArjan van de Ven 	 * select() etc timeout values. These are in nanoseconds.
10556976675dSArjan van de Ven 	 */
1056da8b44d5SJohn Stultz 	u64 timer_slack_ns;
1057da8b44d5SJohn Stultz 	u64 default_timer_slack_ns;
1058f8d570a4SDavid Miller 
10590b24beccSAndrey Ryabinin #ifdef CONFIG_KASAN
10600b24beccSAndrey Ryabinin 	unsigned int kasan_depth;
10610b24beccSAndrey Ryabinin #endif
1062fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
10633ad2f3fbSDaniel Mack 	/* Index of current stored address in ret_stack */
1064f201ae23SFrederic Weisbecker 	int curr_ret_stack;
1065f201ae23SFrederic Weisbecker 	/* Stack of return addresses for return function tracing */
1066f201ae23SFrederic Weisbecker 	struct ftrace_ret_stack	*ret_stack;
10678aef2d28SSteven Rostedt 	/* time stamp for last schedule */
10688aef2d28SSteven Rostedt 	unsigned long long ftrace_timestamp;
1069f201ae23SFrederic Weisbecker 	/*
1070f201ae23SFrederic Weisbecker 	 * Number of functions that haven't been traced
1071f201ae23SFrederic Weisbecker 	 * because of depth overrun.
1072f201ae23SFrederic Weisbecker 	 */
1073f201ae23SFrederic Weisbecker 	atomic_t trace_overrun;
1074380c4b14SFrederic Weisbecker 	/* Pause for the tracing */
1075380c4b14SFrederic Weisbecker 	atomic_t tracing_graph_pause;
1076f201ae23SFrederic Weisbecker #endif
1077ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING
1078ea4e2bc4SSteven Rostedt 	/* state flags for use by tracers */
1079ea4e2bc4SSteven Rostedt 	unsigned long trace;
1080b1cff0adSSteven Rostedt 	/* bitmask and counter of trace recursion */
1081261842b7SSteven Rostedt 	unsigned long trace_recursion;
1082261842b7SSteven Rostedt #endif /* CONFIG_TRACING */
10835c9a8750SDmitry Vyukov #ifdef CONFIG_KCOV
10845c9a8750SDmitry Vyukov 	/* Coverage collection mode enabled for this task (0 if disabled). */
10855c9a8750SDmitry Vyukov 	enum kcov_mode kcov_mode;
10865c9a8750SDmitry Vyukov 	/* Size of the kcov_area. */
10875c9a8750SDmitry Vyukov 	unsigned	kcov_size;
10885c9a8750SDmitry Vyukov 	/* Buffer for coverage collection. */
10895c9a8750SDmitry Vyukov 	void		*kcov_area;
10905c9a8750SDmitry Vyukov 	/* kcov desciptor wired with this task or NULL. */
10915c9a8750SDmitry Vyukov 	struct kcov	*kcov;
10925c9a8750SDmitry Vyukov #endif
10936f185c29SVladimir Davydov #ifdef CONFIG_MEMCG
1094626ebc41STejun Heo 	struct mem_cgroup *memcg_in_oom;
1095626ebc41STejun Heo 	gfp_t memcg_oom_gfp_mask;
1096626ebc41STejun Heo 	int memcg_oom_order;
1097b23afb93STejun Heo 
1098b23afb93STejun Heo 	/* number of pages to reclaim on returning to userland */
1099b23afb93STejun Heo 	unsigned int memcg_nr_pages_over_high;
1100569b846dSKAMEZAWA Hiroyuki #endif
11010326f5a9SSrikar Dronamraju #ifdef CONFIG_UPROBES
11020326f5a9SSrikar Dronamraju 	struct uprobe_task *utask;
11030326f5a9SSrikar Dronamraju #endif
1104cafe5635SKent Overstreet #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1105cafe5635SKent Overstreet 	unsigned int	sequential_io;
1106cafe5635SKent Overstreet 	unsigned int	sequential_io_avg;
1107cafe5635SKent Overstreet #endif
11088eb23b9fSPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
11098eb23b9fSPeter Zijlstra 	unsigned long	task_state_change;
11108eb23b9fSPeter Zijlstra #endif
11118bcbde54SDavid Hildenbrand 	int pagefault_disabled;
111203049269SMichal Hocko #ifdef CONFIG_MMU
111329c696e1SVladimir Davydov 	struct task_struct *oom_reaper_list;
111403049269SMichal Hocko #endif
1115ba14a194SAndy Lutomirski #ifdef CONFIG_VMAP_STACK
1116ba14a194SAndy Lutomirski 	struct vm_struct *stack_vm_area;
1117ba14a194SAndy Lutomirski #endif
111868f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
111968f24b08SAndy Lutomirski 	/* A live task holds one reference. */
112068f24b08SAndy Lutomirski 	atomic_t stack_refcount;
112168f24b08SAndy Lutomirski #endif
11220c8c0f03SDave Hansen /* CPU-specific state of this task */
11230c8c0f03SDave Hansen 	struct thread_struct thread;
11240c8c0f03SDave Hansen /*
11250c8c0f03SDave Hansen  * WARNING: on x86, 'thread_struct' contains a variable-sized
11260c8c0f03SDave Hansen  * structure.  It *MUST* be at the end of 'task_struct'.
11270c8c0f03SDave Hansen  *
11280c8c0f03SDave Hansen  * Do not put anything below here!
11290c8c0f03SDave Hansen  */
11301da177e4SLinus Torvalds };
11311da177e4SLinus Torvalds 
11325aaeb5c0SIngo Molnar #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
11335aaeb5c0SIngo Molnar extern int arch_task_struct_size __read_mostly;
11345aaeb5c0SIngo Molnar #else
11355aaeb5c0SIngo Molnar # define arch_task_struct_size (sizeof(struct task_struct))
11365aaeb5c0SIngo Molnar #endif
11370c8c0f03SDave Hansen 
1138ba14a194SAndy Lutomirski #ifdef CONFIG_VMAP_STACK
1139ba14a194SAndy Lutomirski static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
1140ba14a194SAndy Lutomirski {
1141ba14a194SAndy Lutomirski 	return t->stack_vm_area;
1142ba14a194SAndy Lutomirski }
1143ba14a194SAndy Lutomirski #else
1144ba14a194SAndy Lutomirski static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
1145ba14a194SAndy Lutomirski {
1146ba14a194SAndy Lutomirski 	return NULL;
1147ba14a194SAndy Lutomirski }
1148ba14a194SAndy Lutomirski #endif
1149ba14a194SAndy Lutomirski 
1150e868171aSAlexey Dobriyan static inline struct pid *task_pid(struct task_struct *task)
115122c935f4SEric W. Biederman {
115222c935f4SEric W. Biederman 	return task->pids[PIDTYPE_PID].pid;
115322c935f4SEric W. Biederman }
115422c935f4SEric W. Biederman 
1155e868171aSAlexey Dobriyan static inline struct pid *task_tgid(struct task_struct *task)
115622c935f4SEric W. Biederman {
115722c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PID].pid;
115822c935f4SEric W. Biederman }
115922c935f4SEric W. Biederman 
11606dda81f4SOleg Nesterov /*
11616dda81f4SOleg Nesterov  * Without tasklist or rcu lock it is not safe to dereference
11626dda81f4SOleg Nesterov  * the result of task_pgrp/task_session even if task == current,
11636dda81f4SOleg Nesterov  * we can race with another thread doing sys_setsid/sys_setpgid.
11646dda81f4SOleg Nesterov  */
1165e868171aSAlexey Dobriyan static inline struct pid *task_pgrp(struct task_struct *task)
116622c935f4SEric W. Biederman {
116722c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PGID].pid;
116822c935f4SEric W. Biederman }
116922c935f4SEric W. Biederman 
1170e868171aSAlexey Dobriyan static inline struct pid *task_session(struct task_struct *task)
117122c935f4SEric W. Biederman {
117222c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_SID].pid;
117322c935f4SEric W. Biederman }
117422c935f4SEric W. Biederman 
11757af57294SPavel Emelyanov struct pid_namespace;
11767af57294SPavel Emelyanov 
11777af57294SPavel Emelyanov /*
11787af57294SPavel Emelyanov  * the helpers to get the task's different pids as they are seen
11797af57294SPavel Emelyanov  * from various namespaces
11807af57294SPavel Emelyanov  *
11817af57294SPavel Emelyanov  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
118244c4e1b2SEric W. Biederman  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
118344c4e1b2SEric W. Biederman  *                     current.
11847af57294SPavel Emelyanov  * task_xid_nr_ns()  : id seen from the ns specified;
11857af57294SPavel Emelyanov  *
11867af57294SPavel Emelyanov  * set_task_vxid()   : assigns a virtual id to a task;
11877af57294SPavel Emelyanov  *
11887af57294SPavel Emelyanov  * see also pid_nr() etc in include/linux/pid.h
11897af57294SPavel Emelyanov  */
119052ee2dfdSOleg Nesterov pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
119152ee2dfdSOleg Nesterov 			struct pid_namespace *ns);
11927af57294SPavel Emelyanov 
1193e868171aSAlexey Dobriyan static inline pid_t task_pid_nr(struct task_struct *tsk)
11947af57294SPavel Emelyanov {
11957af57294SPavel Emelyanov 	return tsk->pid;
11967af57294SPavel Emelyanov }
11977af57294SPavel Emelyanov 
119852ee2dfdSOleg Nesterov static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
119952ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
120052ee2dfdSOleg Nesterov {
120152ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
120252ee2dfdSOleg Nesterov }
12037af57294SPavel Emelyanov 
12047af57294SPavel Emelyanov static inline pid_t task_pid_vnr(struct task_struct *tsk)
12057af57294SPavel Emelyanov {
120652ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
12077af57294SPavel Emelyanov }
12087af57294SPavel Emelyanov 
12097af57294SPavel Emelyanov 
1210e868171aSAlexey Dobriyan static inline pid_t task_tgid_nr(struct task_struct *tsk)
12117af57294SPavel Emelyanov {
12127af57294SPavel Emelyanov 	return tsk->tgid;
12137af57294SPavel Emelyanov }
12147af57294SPavel Emelyanov 
12152f2a3a46SPavel Emelyanov pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
12167af57294SPavel Emelyanov 
12177af57294SPavel Emelyanov static inline pid_t task_tgid_vnr(struct task_struct *tsk)
12187af57294SPavel Emelyanov {
12197af57294SPavel Emelyanov 	return pid_vnr(task_tgid(tsk));
12207af57294SPavel Emelyanov }
12217af57294SPavel Emelyanov 
12227af57294SPavel Emelyanov 
122380e0b6e8SRichard Guy Briggs static inline int pid_alive(const struct task_struct *p);
1224ad36d282SRichard Guy Briggs static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1225ad36d282SRichard Guy Briggs {
1226ad36d282SRichard Guy Briggs 	pid_t pid = 0;
1227ad36d282SRichard Guy Briggs 
1228ad36d282SRichard Guy Briggs 	rcu_read_lock();
1229ad36d282SRichard Guy Briggs 	if (pid_alive(tsk))
1230ad36d282SRichard Guy Briggs 		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1231ad36d282SRichard Guy Briggs 	rcu_read_unlock();
1232ad36d282SRichard Guy Briggs 
1233ad36d282SRichard Guy Briggs 	return pid;
1234ad36d282SRichard Guy Briggs }
1235ad36d282SRichard Guy Briggs 
1236ad36d282SRichard Guy Briggs static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1237ad36d282SRichard Guy Briggs {
1238ad36d282SRichard Guy Briggs 	return task_ppid_nr_ns(tsk, &init_pid_ns);
1239ad36d282SRichard Guy Briggs }
1240ad36d282SRichard Guy Briggs 
124152ee2dfdSOleg Nesterov static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
124252ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
12437af57294SPavel Emelyanov {
124452ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
12457af57294SPavel Emelyanov }
12467af57294SPavel Emelyanov 
12477af57294SPavel Emelyanov static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
12487af57294SPavel Emelyanov {
124952ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
12507af57294SPavel Emelyanov }
12517af57294SPavel Emelyanov 
12527af57294SPavel Emelyanov 
125352ee2dfdSOleg Nesterov static inline pid_t task_session_nr_ns(struct task_struct *tsk,
125452ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
12557af57294SPavel Emelyanov {
125652ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
12577af57294SPavel Emelyanov }
12587af57294SPavel Emelyanov 
12597af57294SPavel Emelyanov static inline pid_t task_session_vnr(struct task_struct *tsk)
12607af57294SPavel Emelyanov {
126152ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
12627af57294SPavel Emelyanov }
12637af57294SPavel Emelyanov 
12641b0f7ffdSOleg Nesterov /* obsolete, do not use */
12651b0f7ffdSOleg Nesterov static inline pid_t task_pgrp_nr(struct task_struct *tsk)
12661b0f7ffdSOleg Nesterov {
12671b0f7ffdSOleg Nesterov 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
12681b0f7ffdSOleg Nesterov }
12697af57294SPavel Emelyanov 
12701da177e4SLinus Torvalds /**
12711da177e4SLinus Torvalds  * pid_alive - check that a task structure is not stale
12721da177e4SLinus Torvalds  * @p: Task structure to be checked.
12731da177e4SLinus Torvalds  *
12741da177e4SLinus Torvalds  * Test if a process is not yet dead (at most zombie state)
12751da177e4SLinus Torvalds  * If pid_alive fails, then pointers within the task structure
12761da177e4SLinus Torvalds  * can be stale and must not be dereferenced.
1277e69f6186SYacine Belkadi  *
1278e69f6186SYacine Belkadi  * Return: 1 if the process is alive. 0 otherwise.
12791da177e4SLinus Torvalds  */
1280ad36d282SRichard Guy Briggs static inline int pid_alive(const struct task_struct *p)
12811da177e4SLinus Torvalds {
128292476d7fSEric W. Biederman 	return p->pids[PIDTYPE_PID].pid != NULL;
12831da177e4SLinus Torvalds }
12841da177e4SLinus Torvalds 
1285f400e198SSukadev Bhattiprolu /**
1286570f5241SSergey Senozhatsky  * is_global_init - check if a task structure is init. Since init
1287570f5241SSergey Senozhatsky  * is free to have sub-threads we need to check tgid.
12883260259fSHenne  * @tsk: Task structure to be checked.
12893260259fSHenne  *
12903260259fSHenne  * Check if a task structure is the first user space task the kernel created.
1291e69f6186SYacine Belkadi  *
1292e69f6186SYacine Belkadi  * Return: 1 if the task structure is init. 0 otherwise.
1293f400e198SSukadev Bhattiprolu  */
1294e868171aSAlexey Dobriyan static inline int is_global_init(struct task_struct *tsk)
1295b461cc03SPavel Emelyanov {
1296570f5241SSergey Senozhatsky 	return task_tgid_nr(tsk) == 1;
1297b461cc03SPavel Emelyanov }
1298b460cbc5SSerge E. Hallyn 
12999ec52099SCedric Le Goater extern struct pid *cad_pid;
13009ec52099SCedric Le Goater 
13011da177e4SLinus Torvalds extern void free_task(struct task_struct *tsk);
13021da177e4SLinus Torvalds #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1303e56d0903SIngo Molnar 
1304158d9ebdSAndrew Morton extern void __put_task_struct(struct task_struct *t);
1305e56d0903SIngo Molnar 
1306e56d0903SIngo Molnar static inline void put_task_struct(struct task_struct *t)
1307e56d0903SIngo Molnar {
1308e56d0903SIngo Molnar 	if (atomic_dec_and_test(&t->usage))
13098c7904a0SEric W. Biederman 		__put_task_struct(t);
1310e56d0903SIngo Molnar }
13111da177e4SLinus Torvalds 
1312150593bfSOleg Nesterov struct task_struct *task_rcu_dereference(struct task_struct **ptask);
1313150593bfSOleg Nesterov struct task_struct *try_get_task_struct(struct task_struct **ptask);
1314150593bfSOleg Nesterov 
13156a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
13166a61671bSFrederic Weisbecker extern void task_cputime(struct task_struct *t,
13175613fda9SFrederic Weisbecker 			 u64 *utime, u64 *stime);
131816a6d9beSFrederic Weisbecker extern u64 task_gtime(struct task_struct *t);
13196a61671bSFrederic Weisbecker #else
13206fac4829SFrederic Weisbecker static inline void task_cputime(struct task_struct *t,
13215613fda9SFrederic Weisbecker 				u64 *utime, u64 *stime)
13226fac4829SFrederic Weisbecker {
13236fac4829SFrederic Weisbecker 	*utime = t->utime;
13246fac4829SFrederic Weisbecker 	*stime = t->stime;
13256fac4829SFrederic Weisbecker }
13266fac4829SFrederic Weisbecker 
132716a6d9beSFrederic Weisbecker static inline u64 task_gtime(struct task_struct *t)
13286a61671bSFrederic Weisbecker {
13296a61671bSFrederic Weisbecker 	return t->gtime;
13306a61671bSFrederic Weisbecker }
13316a61671bSFrederic Weisbecker #endif
133240565b5aSStanislaw Gruszka 
133340565b5aSStanislaw Gruszka #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1334b31dc66aSJens Axboe static inline void task_cputime_scaled(struct task_struct *t,
13355613fda9SFrederic Weisbecker 				       u64 *utimescaled,
13365613fda9SFrederic Weisbecker 				       u64 *stimescaled)
1337b31dc66aSJens Axboe {
13384db96cf0SAndi Kleen 	*utimescaled = t->utimescaled;
133961a87122SThomas Gleixner 	*stimescaled = t->stimescaled;
134058a69cb4STejun Heo }
134140565b5aSStanislaw Gruszka #else
134240565b5aSStanislaw Gruszka static inline void task_cputime_scaled(struct task_struct *t,
13435613fda9SFrederic Weisbecker 				       u64 *utimescaled,
13445613fda9SFrederic Weisbecker 				       u64 *stimescaled)
13451da177e4SLinus Torvalds {
134640565b5aSStanislaw Gruszka 	task_cputime(t, utimescaled, stimescaled);
13471da177e4SLinus Torvalds }
13481da177e4SLinus Torvalds #endif
134940565b5aSStanislaw Gruszka 
13505613fda9SFrederic Weisbecker extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
13515613fda9SFrederic Weisbecker extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
13521da177e4SLinus Torvalds 
13531da177e4SLinus Torvalds /*
13541da177e4SLinus Torvalds  * Per process flags
13551da177e4SLinus Torvalds  */
1356c1de45caSPeter Zijlstra #define PF_IDLE		0x00000002	/* I am an IDLE thread */
13571da177e4SLinus Torvalds #define PF_EXITING	0x00000004	/* getting shut down */
13581da177e4SLinus Torvalds #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
13591da177e4SLinus Torvalds #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
13601da177e4SLinus Torvalds #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
13611da177e4SLinus Torvalds #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
13621da177e4SLinus Torvalds #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
13631da177e4SLinus Torvalds #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
13641da177e4SLinus Torvalds #define PF_DUMPCORE	0x00000200	/* dumped core */
13651da177e4SLinus Torvalds #define PF_SIGNALED	0x00000400	/* killed by a signal */
13661da177e4SLinus Torvalds #define PF_MEMALLOC	0x00000800	/* Allocating memory */
13671da177e4SLinus Torvalds #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
13681da177e4SLinus Torvalds #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
13691da177e4SLinus Torvalds #define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
13701da177e4SLinus Torvalds #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
13711da177e4SLinus Torvalds #define PF_FROZEN	0x00010000	/* frozen for system suspend */
13721da177e4SLinus Torvalds #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
13731da177e4SLinus Torvalds #define PF_KSWAPD	0x00040000	/* I am kswapd */
137421caf2fcSMing Lei #define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
13751da177e4SLinus Torvalds #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
13761da177e4SLinus Torvalds #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
13771da177e4SLinus Torvalds #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
13781da177e4SLinus Torvalds #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
137914a40ffcSTejun Heo #define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
13801da177e4SLinus Torvalds #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
13811da177e4SLinus Torvalds #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
13821da177e4SLinus Torvalds #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
13832b44c4dbSColin Cross #define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
13841da177e4SLinus Torvalds 
13851da177e4SLinus Torvalds /*
13861da177e4SLinus Torvalds  * Only the _current_ task can read/write to tsk->flags, but other
13871da177e4SLinus Torvalds  * tasks can access tsk->flags in readonly mode for example
13881da177e4SLinus Torvalds  * with tsk_used_math (like during threaded core dumping).
13891da177e4SLinus Torvalds  * There is however an exception to this rule during ptrace
13901da177e4SLinus Torvalds  * or during fork: the ptracer task is allowed to write to the
13911da177e4SLinus Torvalds  * child->flags of its traced child (same goes for fork, the parent
13921da177e4SLinus Torvalds  * can write to the child->flags), because we're guaranteed the
13931da177e4SLinus Torvalds  * child is not running and in turn not changing child->flags
13941da177e4SLinus Torvalds  * at the same time the parent does it.
13951da177e4SLinus Torvalds  */
13961da177e4SLinus Torvalds #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
13971da177e4SLinus Torvalds #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
13981da177e4SLinus Torvalds #define clear_used_math() clear_stopped_child_used_math(current)
13991da177e4SLinus Torvalds #define set_used_math() set_stopped_child_used_math(current)
14001da177e4SLinus Torvalds #define conditional_stopped_child_used_math(condition, child) \
14011da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
14021da177e4SLinus Torvalds #define conditional_used_math(condition) \
14031da177e4SLinus Torvalds 	conditional_stopped_child_used_math(condition, current)
14041da177e4SLinus Torvalds #define copy_to_stopped_child_used_math(child) \
14051da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
14061da177e4SLinus Torvalds /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
14071da177e4SLinus Torvalds #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
14081da177e4SLinus Torvalds #define used_math() tsk_used_math(current)
14091da177e4SLinus Torvalds 
14101d4457f9SKees Cook /* Per-process atomic flags. */
1411a2b86f77SZefan Li #define PFA_NO_NEW_PRIVS 0	/* May not gain new privileges. */
14122ad654bcSZefan Li #define PFA_SPREAD_PAGE  1      /* Spread page cache over cpuset */
14132ad654bcSZefan Li #define PFA_SPREAD_SLAB  2      /* Spread some slab caches over cpuset */
141477ed2c57STetsuo Handa #define PFA_LMK_WAITING  3      /* Lowmemorykiller is waiting */
14151d4457f9SKees Cook 
14161d4457f9SKees Cook 
1417e0e5070bSZefan Li #define TASK_PFA_TEST(name, func)					\
1418e0e5070bSZefan Li 	static inline bool task_##func(struct task_struct *p)		\
1419e0e5070bSZefan Li 	{ return test_bit(PFA_##name, &p->atomic_flags); }
1420e0e5070bSZefan Li #define TASK_PFA_SET(name, func)					\
1421e0e5070bSZefan Li 	static inline void task_set_##func(struct task_struct *p)	\
1422e0e5070bSZefan Li 	{ set_bit(PFA_##name, &p->atomic_flags); }
1423e0e5070bSZefan Li #define TASK_PFA_CLEAR(name, func)					\
1424e0e5070bSZefan Li 	static inline void task_clear_##func(struct task_struct *p)	\
1425e0e5070bSZefan Li 	{ clear_bit(PFA_##name, &p->atomic_flags); }
14261d4457f9SKees Cook 
1427e0e5070bSZefan Li TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1428e0e5070bSZefan Li TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
14291d4457f9SKees Cook 
14302ad654bcSZefan Li TASK_PFA_TEST(SPREAD_PAGE, spread_page)
14312ad654bcSZefan Li TASK_PFA_SET(SPREAD_PAGE, spread_page)
14322ad654bcSZefan Li TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
14332ad654bcSZefan Li 
14342ad654bcSZefan Li TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
14352ad654bcSZefan Li TASK_PFA_SET(SPREAD_SLAB, spread_slab)
14362ad654bcSZefan Li TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1437544b2c91STejun Heo 
143877ed2c57STetsuo Handa TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
143977ed2c57STetsuo Handa TASK_PFA_SET(LMK_WAITING, lmk_waiting)
144077ed2c57STetsuo Handa 
1441f41d911fSPaul E. McKenney static inline void rcu_copy_process(struct task_struct *p)
1442f41d911fSPaul E. McKenney {
14431da177e4SLinus Torvalds #ifdef CONFIG_PREEMPT_RCU
1444f41d911fSPaul E. McKenney 	p->rcu_read_lock_nesting = 0;
14451d082fd0SPaul E. McKenney 	p->rcu_read_unlock_special.s = 0;
1446dd5d19baSPaul E. McKenney 	p->rcu_blocked_node = NULL;
1447f41d911fSPaul E. McKenney 	INIT_LIST_HEAD(&p->rcu_node_entry);
14488315f422SPaul E. McKenney #endif /* #ifdef CONFIG_PREEMPT_RCU */
14498315f422SPaul E. McKenney #ifdef CONFIG_TASKS_RCU
14508315f422SPaul E. McKenney 	p->rcu_tasks_holdout = false;
14518315f422SPaul E. McKenney 	INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
1452176f8f7aSPaul E. McKenney 	p->rcu_tasks_idle_cpu = -1;
14538315f422SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_RCU */
1454f41d911fSPaul E. McKenney }
1455f41d911fSPaul E. McKenney 
1456907aed48SMel Gorman static inline void tsk_restore_flags(struct task_struct *task,
1457907aed48SMel Gorman 				unsigned long orig_flags, unsigned long flags)
1458907aed48SMel Gorman {
1459907aed48SMel Gorman 	task->flags &= ~flags;
1460907aed48SMel Gorman 	task->flags |= orig_flags & flags;
1461907aed48SMel Gorman }
1462907aed48SMel Gorman 
1463f82f8042SJuri Lelli extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
1464f82f8042SJuri Lelli 				     const struct cpumask *trial);
14657f51412aSJuri Lelli extern int task_can_attach(struct task_struct *p,
14667f51412aSJuri Lelli 			   const struct cpumask *cs_cpus_allowed);
14671da177e4SLinus Torvalds #ifdef CONFIG_SMP
14681e1b6c51SKOSAKI Motohiro extern void do_set_cpus_allowed(struct task_struct *p,
14691e1b6c51SKOSAKI Motohiro 			       const struct cpumask *new_mask);
14701e1b6c51SKOSAKI Motohiro 
1471cd8ba7cdSMike Travis extern int set_cpus_allowed_ptr(struct task_struct *p,
147296f874e2SRusty Russell 				const struct cpumask *new_mask);
14731da177e4SLinus Torvalds #else
14741e1b6c51SKOSAKI Motohiro static inline void do_set_cpus_allowed(struct task_struct *p,
14751e1b6c51SKOSAKI Motohiro 				      const struct cpumask *new_mask)
14761e1b6c51SKOSAKI Motohiro {
14771e1b6c51SKOSAKI Motohiro }
1478cd8ba7cdSMike Travis static inline int set_cpus_allowed_ptr(struct task_struct *p,
147996f874e2SRusty Russell 				       const struct cpumask *new_mask)
14801da177e4SLinus Torvalds {
148196f874e2SRusty Russell 	if (!cpumask_test_cpu(0, new_mask))
14821da177e4SLinus Torvalds 		return -EINVAL;
14831da177e4SLinus Torvalds 	return 0;
14841da177e4SLinus Torvalds }
14851da177e4SLinus Torvalds #endif
1486e0ad9556SRusty Russell 
14876d0d2878SChristian Borntraeger #ifndef cpu_relax_yield
14886d0d2878SChristian Borntraeger #define cpu_relax_yield() cpu_relax()
14896d0d2878SChristian Borntraeger #endif
14906d0d2878SChristian Borntraeger 
149136c8b586SIngo Molnar extern unsigned long long
149241b86e9cSIngo Molnar task_sched_runtime(struct task_struct *task);
14931da177e4SLinus Torvalds 
14941da177e4SLinus Torvalds /* sched_exec is called by processes performing an exec */
14951da177e4SLinus Torvalds #ifdef CONFIG_SMP
14961da177e4SLinus Torvalds extern void sched_exec(void);
14971da177e4SLinus Torvalds #else
14981da177e4SLinus Torvalds #define sched_exec()   {}
14991da177e4SLinus Torvalds #endif
15001da177e4SLinus Torvalds 
15011da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU
15021da177e4SLinus Torvalds extern void idle_task_exit(void);
15031da177e4SLinus Torvalds #else
15041da177e4SLinus Torvalds static inline void idle_task_exit(void) {}
15051da177e4SLinus Torvalds #endif
15061da177e4SLinus Torvalds 
1507fa93384fSDan Carpenter extern int yield_to(struct task_struct *p, bool preempt);
150836c8b586SIngo Molnar extern void set_user_nice(struct task_struct *p, long nice);
150936c8b586SIngo Molnar extern int task_prio(const struct task_struct *p);
1510d0ea0268SDongsheng Yang /**
1511d0ea0268SDongsheng Yang  * task_nice - return the nice value of a given task.
1512d0ea0268SDongsheng Yang  * @p: the task in question.
1513d0ea0268SDongsheng Yang  *
1514d0ea0268SDongsheng Yang  * Return: The nice value [ -20 ... 0 ... 19 ].
1515d0ea0268SDongsheng Yang  */
1516d0ea0268SDongsheng Yang static inline int task_nice(const struct task_struct *p)
1517d0ea0268SDongsheng Yang {
1518d0ea0268SDongsheng Yang 	return PRIO_TO_NICE((p)->static_prio);
1519d0ea0268SDongsheng Yang }
152036c8b586SIngo Molnar extern int can_nice(const struct task_struct *p, const int nice);
152136c8b586SIngo Molnar extern int task_curr(const struct task_struct *p);
15221da177e4SLinus Torvalds extern int idle_cpu(int cpu);
1523fe7de49fSKOSAKI Motohiro extern int sched_setscheduler(struct task_struct *, int,
1524fe7de49fSKOSAKI Motohiro 			      const struct sched_param *);
1525961ccdddSRusty Russell extern int sched_setscheduler_nocheck(struct task_struct *, int,
1526fe7de49fSKOSAKI Motohiro 				      const struct sched_param *);
1527d50dde5aSDario Faggioli extern int sched_setattr(struct task_struct *,
1528d50dde5aSDario Faggioli 			 const struct sched_attr *);
152936c8b586SIngo Molnar extern struct task_struct *idle_task(int cpu);
1530c4f30608SPaul E. McKenney /**
1531c4f30608SPaul E. McKenney  * is_idle_task - is the specified task an idle task?
1532fa757281SRandy Dunlap  * @p: the task in question.
1533e69f6186SYacine Belkadi  *
1534e69f6186SYacine Belkadi  * Return: 1 if @p is an idle task. 0 otherwise.
1535c4f30608SPaul E. McKenney  */
15367061ca3bSPaul E. McKenney static inline bool is_idle_task(const struct task_struct *p)
1537c4f30608SPaul E. McKenney {
1538c1de45caSPeter Zijlstra 	return !!(p->flags & PF_IDLE);
1539c4f30608SPaul E. McKenney }
154036c8b586SIngo Molnar extern struct task_struct *curr_task(int cpu);
1541a458ae2eSPeter Zijlstra extern void ia64_set_curr_task(int cpu, struct task_struct *p);
15421da177e4SLinus Torvalds 
15431da177e4SLinus Torvalds void yield(void);
15441da177e4SLinus Torvalds 
15451da177e4SLinus Torvalds union thread_union {
1546c65eacbeSAndy Lutomirski #ifndef CONFIG_THREAD_INFO_IN_TASK
15471da177e4SLinus Torvalds 	struct thread_info thread_info;
1548c65eacbeSAndy Lutomirski #endif
15491da177e4SLinus Torvalds 	unsigned long stack[THREAD_SIZE/sizeof(long)];
15501da177e4SLinus Torvalds };
15511da177e4SLinus Torvalds 
15521da177e4SLinus Torvalds #ifndef __HAVE_ARCH_KSTACK_END
15531da177e4SLinus Torvalds static inline int kstack_end(void *addr)
15541da177e4SLinus Torvalds {
15551da177e4SLinus Torvalds 	/* Reliable end of stack detection:
15561da177e4SLinus Torvalds 	 * Some APM bios versions misalign the stack
15571da177e4SLinus Torvalds 	 */
15581da177e4SLinus Torvalds 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
15591da177e4SLinus Torvalds }
15601da177e4SLinus Torvalds #endif
15611da177e4SLinus Torvalds 
15621da177e4SLinus Torvalds extern union thread_union init_thread_union;
15631da177e4SLinus Torvalds extern struct task_struct init_task;
15641da177e4SLinus Torvalds 
1565198fe21bSPavel Emelyanov extern struct pid_namespace init_pid_ns;
1566198fe21bSPavel Emelyanov 
1567198fe21bSPavel Emelyanov /*
1568198fe21bSPavel Emelyanov  * find a task by one of its numerical ids
1569198fe21bSPavel Emelyanov  *
1570198fe21bSPavel Emelyanov  * find_task_by_pid_ns():
1571198fe21bSPavel Emelyanov  *      finds a task by its pid in the specified namespace
1572228ebcbeSPavel Emelyanov  * find_task_by_vpid():
1573228ebcbeSPavel Emelyanov  *      finds a task by its virtual pid
1574198fe21bSPavel Emelyanov  *
1575e49859e7SPavel Emelyanov  * see also find_vpid() etc in include/linux/pid.h
1576198fe21bSPavel Emelyanov  */
1577198fe21bSPavel Emelyanov 
1578228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_vpid(pid_t nr);
1579228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_pid_ns(pid_t nr,
1580228ebcbeSPavel Emelyanov 		struct pid_namespace *ns);
1581198fe21bSPavel Emelyanov 
1582b3c97528SHarvey Harrison extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1583b3c97528SHarvey Harrison extern int wake_up_process(struct task_struct *tsk);
15843e51e3edSSamir Bellabes extern void wake_up_new_task(struct task_struct *tsk);
15851da177e4SLinus Torvalds #ifdef CONFIG_SMP
15861da177e4SLinus Torvalds  extern void kick_process(struct task_struct *tsk);
15871da177e4SLinus Torvalds #else
15881da177e4SLinus Torvalds  static inline void kick_process(struct task_struct *tsk) { }
15891da177e4SLinus Torvalds #endif
1590aab03e05SDario Faggioli extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
1591ad46c2c4SIngo Molnar extern void sched_dead(struct task_struct *p);
15921da177e4SLinus Torvalds 
15931da177e4SLinus Torvalds extern void proc_caches_init(void);
15949a13049eSOleg Nesterov 
15951da177e4SLinus Torvalds extern void release_task(struct task_struct * p);
15965a1b98d3SAl Viro 
15973033f14aSJosh Triplett #ifdef CONFIG_HAVE_COPY_THREAD_TLS
15983033f14aSJosh Triplett extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
15993033f14aSJosh Triplett 			struct task_struct *, unsigned long);
16003033f14aSJosh Triplett #else
16016f2c55b8SAlexey Dobriyan extern int copy_thread(unsigned long, unsigned long, unsigned long,
1602afa86fc4SAl Viro 			struct task_struct *);
16033033f14aSJosh Triplett 
16043033f14aSJosh Triplett /* Architectures that haven't opted into copy_thread_tls get the tls argument
16053033f14aSJosh Triplett  * via pt_regs, so ignore the tls argument passed via C. */
16063033f14aSJosh Triplett static inline int copy_thread_tls(
16073033f14aSJosh Triplett 		unsigned long clone_flags, unsigned long sp, unsigned long arg,
16083033f14aSJosh Triplett 		struct task_struct *p, unsigned long tls)
16093033f14aSJosh Triplett {
16103033f14aSJosh Triplett 	return copy_thread(clone_flags, sp, arg, p);
16113033f14aSJosh Triplett }
16123033f14aSJosh Triplett #endif
16131da177e4SLinus Torvalds extern void flush_thread(void);
16145f56a5dfSJiri Slaby 
16155f56a5dfSJiri Slaby #ifdef CONFIG_HAVE_EXIT_THREAD
1616e6464694SJiri Slaby extern void exit_thread(struct task_struct *tsk);
16175f56a5dfSJiri Slaby #else
1618e6464694SJiri Slaby static inline void exit_thread(struct task_struct *tsk)
16195f56a5dfSJiri Slaby {
16205f56a5dfSJiri Slaby }
16215f56a5dfSJiri Slaby #endif
16221da177e4SLinus Torvalds 
16231da177e4SLinus Torvalds extern void exit_files(struct task_struct *);
1624cbaffba1SOleg Nesterov 
16251da177e4SLinus Torvalds extern void exit_itimers(struct signal_struct *);
16261da177e4SLinus Torvalds 
16279402c95fSJoe Perches extern void do_group_exit(int);
16281da177e4SLinus Torvalds 
1629c4ad8f98SLinus Torvalds extern int do_execve(struct filename *,
1630d7627467SDavid Howells 		     const char __user * const __user *,
1631da3d4c5fSAl Viro 		     const char __user * const __user *);
163251f39a1fSDavid Drysdale extern int do_execveat(int, struct filename *,
163351f39a1fSDavid Drysdale 		       const char __user * const __user *,
163451f39a1fSDavid Drysdale 		       const char __user * const __user *,
163551f39a1fSDavid Drysdale 		       int);
16363033f14aSJosh Triplett extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
1637e80d6661SAl Viro extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
163836c8b586SIngo Molnar struct task_struct *fork_idle(int);
16392aa3a7f8SAl Viro extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
16401da177e4SLinus Torvalds 
164182b89778SAdrian Hunter extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
164282b89778SAdrian Hunter static inline void set_task_comm(struct task_struct *tsk, const char *from)
164382b89778SAdrian Hunter {
164482b89778SAdrian Hunter 	__set_task_comm(tsk, from, false);
164582b89778SAdrian Hunter }
164659714d65SAndrew Morton extern char *get_task_comm(char *to, struct task_struct *tsk);
16471da177e4SLinus Torvalds 
16481da177e4SLinus Torvalds #ifdef CONFIG_SMP
1649317f3941SPeter Zijlstra void scheduler_ipi(void);
165085ba2d86SRoland McGrath extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
16511da177e4SLinus Torvalds #else
1652184748ccSPeter Zijlstra static inline void scheduler_ipi(void) { }
165385ba2d86SRoland McGrath static inline unsigned long wait_task_inactive(struct task_struct *p,
165485ba2d86SRoland McGrath 					       long match_state)
165585ba2d86SRoland McGrath {
165685ba2d86SRoland McGrath 	return 1;
165785ba2d86SRoland McGrath }
16581da177e4SLinus Torvalds #endif
16591da177e4SLinus Torvalds 
16601da177e4SLinus Torvalds /*
1661260ea101SEric W. Biederman  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
166222e2c507SJens Axboe  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
1663ddbcc7e8SPaul Menage  * pins the final release of task.io_context.  Also protects ->cpuset and
1664d68b46feSOleg Nesterov  * ->cgroup.subsys[]. And ->vfork_done.
16651da177e4SLinus Torvalds  *
16661da177e4SLinus Torvalds  * Nests both inside and outside of read_lock(&tasklist_lock).
16671da177e4SLinus Torvalds  * It must not be nested with write_lock_irq(&tasklist_lock),
16681da177e4SLinus Torvalds  * neither inside nor outside.
16691da177e4SLinus Torvalds  */
16701da177e4SLinus Torvalds static inline void task_lock(struct task_struct *p)
16711da177e4SLinus Torvalds {
16721da177e4SLinus Torvalds 	spin_lock(&p->alloc_lock);
16731da177e4SLinus Torvalds }
16741da177e4SLinus Torvalds 
16751da177e4SLinus Torvalds static inline void task_unlock(struct task_struct *p)
16761da177e4SLinus Torvalds {
16771da177e4SLinus Torvalds 	spin_unlock(&p->alloc_lock);
16781da177e4SLinus Torvalds }
16791da177e4SLinus Torvalds 
1680c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
1681c65eacbeSAndy Lutomirski 
1682c65eacbeSAndy Lutomirski static inline struct thread_info *task_thread_info(struct task_struct *task)
1683c65eacbeSAndy Lutomirski {
1684c65eacbeSAndy Lutomirski 	return &task->thread_info;
1685c65eacbeSAndy Lutomirski }
1686c6c314a6SAndy Lutomirski 
1687c6c314a6SAndy Lutomirski /*
1688c6c314a6SAndy Lutomirski  * When accessing the stack of a non-current task that might exit, use
1689c6c314a6SAndy Lutomirski  * try_get_task_stack() instead.  task_stack_page will return a pointer
1690c6c314a6SAndy Lutomirski  * that could get freed out from under you.
1691c6c314a6SAndy Lutomirski  */
1692c65eacbeSAndy Lutomirski static inline void *task_stack_page(const struct task_struct *task)
1693c65eacbeSAndy Lutomirski {
1694c65eacbeSAndy Lutomirski 	return task->stack;
1695c65eacbeSAndy Lutomirski }
1696c6c314a6SAndy Lutomirski 
1697c65eacbeSAndy Lutomirski #define setup_thread_stack(new,old)	do { } while(0)
1698c6c314a6SAndy Lutomirski 
1699c65eacbeSAndy Lutomirski static inline unsigned long *end_of_stack(const struct task_struct *task)
1700c65eacbeSAndy Lutomirski {
1701c65eacbeSAndy Lutomirski 	return task->stack;
1702c65eacbeSAndy Lutomirski }
1703c65eacbeSAndy Lutomirski 
1704c65eacbeSAndy Lutomirski #elif !defined(__HAVE_THREAD_FUNCTIONS)
1705f037360fSAl Viro 
1706f7e4217bSRoman Zippel #define task_thread_info(task)	((struct thread_info *)(task)->stack)
1707c65eacbeSAndy Lutomirski #define task_stack_page(task)	((void *)(task)->stack)
1708a1261f54SAl Viro 
170910ebffdeSAl Viro static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
171010ebffdeSAl Viro {
171110ebffdeSAl Viro 	*task_thread_info(p) = *task_thread_info(org);
171210ebffdeSAl Viro 	task_thread_info(p)->task = p;
171310ebffdeSAl Viro }
171410ebffdeSAl Viro 
17156a40281aSChuck Ebbert /*
17166a40281aSChuck Ebbert  * Return the address of the last usable long on the stack.
17176a40281aSChuck Ebbert  *
17186a40281aSChuck Ebbert  * When the stack grows down, this is just above the thread
17196a40281aSChuck Ebbert  * info struct. Going any lower will corrupt the threadinfo.
17206a40281aSChuck Ebbert  *
17216a40281aSChuck Ebbert  * When the stack grows up, this is the highest address.
17226a40281aSChuck Ebbert  * Beyond that position, we corrupt data on the next page.
17236a40281aSChuck Ebbert  */
172410ebffdeSAl Viro static inline unsigned long *end_of_stack(struct task_struct *p)
172510ebffdeSAl Viro {
17266a40281aSChuck Ebbert #ifdef CONFIG_STACK_GROWSUP
17276a40281aSChuck Ebbert 	return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
17286a40281aSChuck Ebbert #else
1729f7e4217bSRoman Zippel 	return (unsigned long *)(task_thread_info(p) + 1);
17306a40281aSChuck Ebbert #endif
173110ebffdeSAl Viro }
173210ebffdeSAl Viro 
1733f037360fSAl Viro #endif
1734c6c314a6SAndy Lutomirski 
173568f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
173668f24b08SAndy Lutomirski static inline void *try_get_task_stack(struct task_struct *tsk)
173768f24b08SAndy Lutomirski {
173868f24b08SAndy Lutomirski 	return atomic_inc_not_zero(&tsk->stack_refcount) ?
173968f24b08SAndy Lutomirski 		task_stack_page(tsk) : NULL;
174068f24b08SAndy Lutomirski }
174168f24b08SAndy Lutomirski 
174268f24b08SAndy Lutomirski extern void put_task_stack(struct task_struct *tsk);
174368f24b08SAndy Lutomirski #else
1744c6c314a6SAndy Lutomirski static inline void *try_get_task_stack(struct task_struct *tsk)
1745c6c314a6SAndy Lutomirski {
1746c6c314a6SAndy Lutomirski 	return task_stack_page(tsk);
1747c6c314a6SAndy Lutomirski }
1748c6c314a6SAndy Lutomirski 
1749c6c314a6SAndy Lutomirski static inline void put_task_stack(struct task_struct *tsk) {}
175068f24b08SAndy Lutomirski #endif
1751c6c314a6SAndy Lutomirski 
1752a70857e4SAaron Tomlin #define task_stack_end_corrupted(task) \
1753a70857e4SAaron Tomlin 		(*(end_of_stack(task)) != STACK_END_MAGIC)
1754f037360fSAl Viro 
17558b05c7e6SFUJITA Tomonori static inline int object_is_on_stack(void *obj)
17568b05c7e6SFUJITA Tomonori {
17578b05c7e6SFUJITA Tomonori 	void *stack = task_stack_page(current);
17588b05c7e6SFUJITA Tomonori 
17598b05c7e6SFUJITA Tomonori 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
17608b05c7e6SFUJITA Tomonori }
17618b05c7e6SFUJITA Tomonori 
1762b235beeaSLinus Torvalds extern void thread_stack_cache_init(void);
17638c9843e5SBenjamin Herrenschmidt 
17647c9f8861SEric Sandeen #ifdef CONFIG_DEBUG_STACK_USAGE
17657c9f8861SEric Sandeen static inline unsigned long stack_not_used(struct task_struct *p)
17667c9f8861SEric Sandeen {
17677c9f8861SEric Sandeen 	unsigned long *n = end_of_stack(p);
17687c9f8861SEric Sandeen 
17697c9f8861SEric Sandeen 	do { 	/* Skip over canary */
17706c31da34SHelge Deller # ifdef CONFIG_STACK_GROWSUP
17716c31da34SHelge Deller 		n--;
17726c31da34SHelge Deller # else
17737c9f8861SEric Sandeen 		n++;
17746c31da34SHelge Deller # endif
17757c9f8861SEric Sandeen 	} while (!*n);
17767c9f8861SEric Sandeen 
17776c31da34SHelge Deller # ifdef CONFIG_STACK_GROWSUP
17786c31da34SHelge Deller 	return (unsigned long)end_of_stack(p) - (unsigned long)n;
17796c31da34SHelge Deller # else
17807c9f8861SEric Sandeen 	return (unsigned long)n - (unsigned long)end_of_stack(p);
17816c31da34SHelge Deller # endif
17827c9f8861SEric Sandeen }
17837c9f8861SEric Sandeen #endif
1784d4311ff1SAaron Tomlin extern void set_task_stack_end_magic(struct task_struct *tsk);
17857c9f8861SEric Sandeen 
17861da177e4SLinus Torvalds /* set thread flags in other task's structures
17871da177e4SLinus Torvalds  * - see asm/thread_info.h for TIF_xxxx flags available
17881da177e4SLinus Torvalds  */
17891da177e4SLinus Torvalds static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
17901da177e4SLinus Torvalds {
1791a1261f54SAl Viro 	set_ti_thread_flag(task_thread_info(tsk), flag);
17921da177e4SLinus Torvalds }
17931da177e4SLinus Torvalds 
17941da177e4SLinus Torvalds static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
17951da177e4SLinus Torvalds {
1796a1261f54SAl Viro 	clear_ti_thread_flag(task_thread_info(tsk), flag);
17971da177e4SLinus Torvalds }
17981da177e4SLinus Torvalds 
17991da177e4SLinus Torvalds static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
18001da177e4SLinus Torvalds {
1801a1261f54SAl Viro 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
18021da177e4SLinus Torvalds }
18031da177e4SLinus Torvalds 
18041da177e4SLinus Torvalds static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
18051da177e4SLinus Torvalds {
1806a1261f54SAl Viro 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
18071da177e4SLinus Torvalds }
18081da177e4SLinus Torvalds 
18091da177e4SLinus Torvalds static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
18101da177e4SLinus Torvalds {
1811a1261f54SAl Viro 	return test_ti_thread_flag(task_thread_info(tsk), flag);
18121da177e4SLinus Torvalds }
18131da177e4SLinus Torvalds 
18141da177e4SLinus Torvalds static inline void set_tsk_need_resched(struct task_struct *tsk)
18151da177e4SLinus Torvalds {
18161da177e4SLinus Torvalds 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
18171da177e4SLinus Torvalds }
18181da177e4SLinus Torvalds 
18191da177e4SLinus Torvalds static inline void clear_tsk_need_resched(struct task_struct *tsk)
18201da177e4SLinus Torvalds {
18211da177e4SLinus Torvalds 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
18221da177e4SLinus Torvalds }
18231da177e4SLinus Torvalds 
18248ae121acSGregory Haskins static inline int test_tsk_need_resched(struct task_struct *tsk)
18258ae121acSGregory Haskins {
18268ae121acSGregory Haskins 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
18278ae121acSGregory Haskins }
18288ae121acSGregory Haskins 
18291da177e4SLinus Torvalds /*
18301da177e4SLinus Torvalds  * cond_resched() and cond_resched_lock(): latency reduction via
18311da177e4SLinus Torvalds  * explicit rescheduling in places that are safe. The return
18321da177e4SLinus Torvalds  * value indicates whether a reschedule was done in fact.
18331da177e4SLinus Torvalds  * cond_resched_lock() will drop the spinlock before scheduling,
18341da177e4SLinus Torvalds  * cond_resched_softirq() will enable bhs before scheduling.
18351da177e4SLinus Torvalds  */
183635a773a0SPeter Zijlstra #ifndef CONFIG_PREEMPT
1837c3921ab7SLinus Torvalds extern int _cond_resched(void);
183835a773a0SPeter Zijlstra #else
183935a773a0SPeter Zijlstra static inline int _cond_resched(void) { return 0; }
184035a773a0SPeter Zijlstra #endif
18416f80bd98SFrederic Weisbecker 
1842613afbf8SFrederic Weisbecker #define cond_resched() ({			\
18433427445aSPeter Zijlstra 	___might_sleep(__FILE__, __LINE__, 0);	\
1844613afbf8SFrederic Weisbecker 	_cond_resched();			\
1845613afbf8SFrederic Weisbecker })
18466f80bd98SFrederic Weisbecker 
1847613afbf8SFrederic Weisbecker extern int __cond_resched_lock(spinlock_t *lock);
1848613afbf8SFrederic Weisbecker 
1849613afbf8SFrederic Weisbecker #define cond_resched_lock(lock) ({				\
18503427445aSPeter Zijlstra 	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1851613afbf8SFrederic Weisbecker 	__cond_resched_lock(lock);				\
1852613afbf8SFrederic Weisbecker })
1853613afbf8SFrederic Weisbecker 
1854613afbf8SFrederic Weisbecker extern int __cond_resched_softirq(void);
1855613afbf8SFrederic Weisbecker 
1856613afbf8SFrederic Weisbecker #define cond_resched_softirq() ({					\
18573427445aSPeter Zijlstra 	___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
1858613afbf8SFrederic Weisbecker 	__cond_resched_softirq();					\
1859613afbf8SFrederic Weisbecker })
18601da177e4SLinus Torvalds 
1861f6f3c437SSimon Horman static inline void cond_resched_rcu(void)
1862f6f3c437SSimon Horman {
1863f6f3c437SSimon Horman #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1864f6f3c437SSimon Horman 	rcu_read_unlock();
1865f6f3c437SSimon Horman 	cond_resched();
1866f6f3c437SSimon Horman 	rcu_read_lock();
1867f6f3c437SSimon Horman #endif
1868f6f3c437SSimon Horman }
1869f6f3c437SSimon Horman 
18701da177e4SLinus Torvalds /*
18711da177e4SLinus Torvalds  * Does a critical section need to be broken due to another
187295c354feSNick Piggin  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
187395c354feSNick Piggin  * but a general need for low latency)
18741da177e4SLinus Torvalds  */
187595c354feSNick Piggin static inline int spin_needbreak(spinlock_t *lock)
18761da177e4SLinus Torvalds {
187795c354feSNick Piggin #ifdef CONFIG_PREEMPT
187895c354feSNick Piggin 	return spin_is_contended(lock);
187995c354feSNick Piggin #else
18801da177e4SLinus Torvalds 	return 0;
188195c354feSNick Piggin #endif
18821da177e4SLinus Torvalds }
18831da177e4SLinus Torvalds 
188475f93fedSPeter Zijlstra static __always_inline bool need_resched(void)
188575f93fedSPeter Zijlstra {
188675f93fedSPeter Zijlstra 	return unlikely(tif_need_resched());
188775f93fedSPeter Zijlstra }
188875f93fedSPeter Zijlstra 
1889ee761f62SThomas Gleixner /*
1890f06febc9SFrank Mayhar  * Thread group CPU time accounting.
1891f06febc9SFrank Mayhar  */
18924cd4c1b4SPeter Zijlstra void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
18934da94d49SPeter Zijlstra void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
1894f06febc9SFrank Mayhar 
1895f06febc9SFrank Mayhar /*
18961da177e4SLinus Torvalds  * Wrappers for p->thread_info->cpu access. No-op on UP.
18971da177e4SLinus Torvalds  */
18981da177e4SLinus Torvalds #ifdef CONFIG_SMP
18991da177e4SLinus Torvalds 
19001da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
19011da177e4SLinus Torvalds {
1902c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
1903c65eacbeSAndy Lutomirski 	return p->cpu;
1904c65eacbeSAndy Lutomirski #else
1905a1261f54SAl Viro 	return task_thread_info(p)->cpu;
1906c65eacbeSAndy Lutomirski #endif
19071da177e4SLinus Torvalds }
19081da177e4SLinus Torvalds 
1909b32e86b4SIngo Molnar static inline int task_node(const struct task_struct *p)
1910b32e86b4SIngo Molnar {
1911b32e86b4SIngo Molnar 	return cpu_to_node(task_cpu(p));
1912b32e86b4SIngo Molnar }
1913b32e86b4SIngo Molnar 
1914c65cc870SIngo Molnar extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
19151da177e4SLinus Torvalds 
19161da177e4SLinus Torvalds #else
19171da177e4SLinus Torvalds 
19181da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
19191da177e4SLinus Torvalds {
19201da177e4SLinus Torvalds 	return 0;
19211da177e4SLinus Torvalds }
19221da177e4SLinus Torvalds 
19231da177e4SLinus Torvalds static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
19241da177e4SLinus Torvalds {
19251da177e4SLinus Torvalds }
19261da177e4SLinus Torvalds 
19271da177e4SLinus Torvalds #endif /* CONFIG_SMP */
19281da177e4SLinus Torvalds 
1929d9345c65SPan Xinhui /*
1930d9345c65SPan Xinhui  * In order to reduce various lock holder preemption latencies provide an
1931d9345c65SPan Xinhui  * interface to see if a vCPU is currently running or not.
1932d9345c65SPan Xinhui  *
1933d9345c65SPan Xinhui  * This allows us to terminate optimistic spin loops and block, analogous to
1934d9345c65SPan Xinhui  * the native optimistic spin heuristic of testing if the lock owner task is
1935d9345c65SPan Xinhui  * running or not.
1936d9345c65SPan Xinhui  */
1937d9345c65SPan Xinhui #ifndef vcpu_is_preempted
1938d9345c65SPan Xinhui # define vcpu_is_preempted(cpu)	false
1939d9345c65SPan Xinhui #endif
1940d9345c65SPan Xinhui 
194196f874e2SRusty Russell extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
194296f874e2SRusty Russell extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
19435c45bf27SSiddha, Suresh B 
19447c941438SDhaval Giani #ifdef CONFIG_CGROUP_SCHED
194507e06b01SYong Zhang extern struct task_group root_task_group;
19468323f26cSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */
19479b5b7751SSrivatsa Vaddagiri 
194854e99124SDhaval Giani extern int task_can_switch_user(struct user_struct *up,
194954e99124SDhaval Giani 					struct task_struct *tsk);
195054e99124SDhaval Giani 
195182455257SDave Hansen #ifndef TASK_SIZE_OF
195282455257SDave Hansen #define TASK_SIZE_OF(tsk)	TASK_SIZE
195382455257SDave Hansen #endif
195482455257SDave Hansen 
19551da177e4SLinus Torvalds #endif
1956