xref: /linux/include/linux/sched.h (revision 7f5f8e8d97d77edf33f2836259d1f19c6f4d94f5)
11da177e4SLinus Torvalds #ifndef _LINUX_SCHED_H
21da177e4SLinus Torvalds #define _LINUX_SCHED_H
31da177e4SLinus Torvalds 
4607ca46eSDavid Howells #include <uapi/linux/sched.h>
5b7b3c76aSDavid Woodhouse 
65c228079SDongsheng Yang #include <linux/sched/prio.h>
7ee6a3d19SIngo Molnar #include <linux/nodemask.h>
85c228079SDongsheng Yang 
9b69339baSIngo Molnar #include <linux/mutex.h>
10fb00aca4SPeter Zijlstra #include <linux/plist.h>
1177ba809eSIngo Molnar #include <linux/mm_types_task.h>
121da177e4SLinus Torvalds 
131da177e4SLinus Torvalds #include <linux/sem.h>
14ab602f79SJack Miller #include <linux/shm.h>
15f361bf4aSIngo Molnar #include <linux/signal_types.h>
161da177e4SLinus Torvalds #include <linux/pid.h>
171da177e4SLinus Torvalds #include <linux/seccomp.h>
18b68070e1SIngo Molnar #include <linux/rcupdate.h>
191da177e4SLinus Torvalds 
20a3b6714eSDavid Woodhouse #include <linux/resource.h>
21a3b6714eSDavid Woodhouse #include <linux/hrtimer.h>
225c9a8750SDmitry Vyukov #include <linux/kcov.h>
237c3ab738SAndrew Morton #include <linux/task_io_accounting.h>
249745512cSArjan van de Ven #include <linux/latencytop.h>
25a3b6714eSDavid Woodhouse 
2670b8157eSIngo Molnar #include <asm/current.h>
2770b8157eSIngo Molnar 
28c7af7877SIngo Molnar /* task_struct member predeclarations: */
29c7af7877SIngo Molnar struct audit_context;
30c7af7877SIngo Molnar struct autogroup;
31c7af7877SIngo Molnar struct backing_dev_info;
32c7af7877SIngo Molnar struct bio_list;
33c7af7877SIngo Molnar struct blk_plug;
34c7af7877SIngo Molnar struct cfs_rq;
35c7af7877SIngo Molnar struct filename;
36c7af7877SIngo Molnar struct fs_struct;
37c7af7877SIngo Molnar struct futex_pi_state;
38c7af7877SIngo Molnar struct io_context;
39c7af7877SIngo Molnar struct mempolicy;
40c7af7877SIngo Molnar struct nameidata;
41c7af7877SIngo Molnar struct nsproxy;
42c7af7877SIngo Molnar struct perf_event_context;
43c7af7877SIngo Molnar struct pid_namespace;
44c7af7877SIngo Molnar struct pipe_inode_info;
45c7af7877SIngo Molnar struct rcu_node;
46c7af7877SIngo Molnar struct reclaim_state;
47c7af7877SIngo Molnar struct robust_list_head;
48e2d1e2aeSIngo Molnar struct sched_attr;
49e2d1e2aeSIngo Molnar struct sched_param;
5043ae34cbSIngo Molnar struct seq_file;
51c7af7877SIngo Molnar struct sighand_struct;
52c7af7877SIngo Molnar struct signal_struct;
53c7af7877SIngo Molnar struct task_delay_info;
544cf86d77SIngo Molnar struct task_group;
55c7af7877SIngo Molnar struct task_struct;
56c7af7877SIngo Molnar struct uts_namespace;
571da177e4SLinus Torvalds 
584a8342d2SLinus Torvalds /*
594a8342d2SLinus Torvalds  * Task state bitmask. NOTE! These bits are also
604a8342d2SLinus Torvalds  * encoded in fs/proc/array.c: get_task_state().
614a8342d2SLinus Torvalds  *
624a8342d2SLinus Torvalds  * We have two separate sets of flags: task->state
634a8342d2SLinus Torvalds  * is about runnability, while task->exit_state are
644a8342d2SLinus Torvalds  * about the task exiting. Confusing, but this way
654a8342d2SLinus Torvalds  * modifying one set can't modify the other one by
664a8342d2SLinus Torvalds  * mistake.
674a8342d2SLinus Torvalds  */
681da177e4SLinus Torvalds #define TASK_RUNNING		0
691da177e4SLinus Torvalds #define TASK_INTERRUPTIBLE	1
701da177e4SLinus Torvalds #define TASK_UNINTERRUPTIBLE	2
71f021a3c2SMatthew Wilcox #define __TASK_STOPPED		4
72f021a3c2SMatthew Wilcox #define __TASK_TRACED		8
734a8342d2SLinus Torvalds /* in tsk->exit_state */
74ad86622bSOleg Nesterov #define EXIT_DEAD		16
75ad86622bSOleg Nesterov #define EXIT_ZOMBIE		32
76abd50b39SOleg Nesterov #define EXIT_TRACE		(EXIT_ZOMBIE | EXIT_DEAD)
774a8342d2SLinus Torvalds /* in tsk->state again */
78af927232SMike Galbraith #define TASK_DEAD		64
79f021a3c2SMatthew Wilcox #define TASK_WAKEKILL		128
80e9c84311SPeter Zijlstra #define TASK_WAKING		256
81f2530dc7SThomas Gleixner #define TASK_PARKED		512
8280ed87c8SPeter Zijlstra #define TASK_NOLOAD		1024
837dc603c9SPeter Zijlstra #define TASK_NEW		2048
847dc603c9SPeter Zijlstra #define TASK_STATE_MAX		4096
85f021a3c2SMatthew Wilcox 
867dc603c9SPeter Zijlstra #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
8773342151SPeter Zijlstra 
88642fa448SDavidlohr Bueso /* Convenience macros for the sake of set_current_state */
89f021a3c2SMatthew Wilcox #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
90f021a3c2SMatthew Wilcox #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
91f021a3c2SMatthew Wilcox #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
921da177e4SLinus Torvalds 
9380ed87c8SPeter Zijlstra #define TASK_IDLE		(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
9480ed87c8SPeter Zijlstra 
9592a1f4bcSMatthew Wilcox /* Convenience macros for the sake of wake_up */
9692a1f4bcSMatthew Wilcox #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
97f021a3c2SMatthew Wilcox #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
9892a1f4bcSMatthew Wilcox 
9992a1f4bcSMatthew Wilcox /* get_task_state() */
10092a1f4bcSMatthew Wilcox #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
101f021a3c2SMatthew Wilcox 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
10274e37200SOleg Nesterov 				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
10392a1f4bcSMatthew Wilcox 
104f021a3c2SMatthew Wilcox #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
105f021a3c2SMatthew Wilcox #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
10692a1f4bcSMatthew Wilcox #define task_is_stopped_or_traced(task)	\
107f021a3c2SMatthew Wilcox 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
10892a1f4bcSMatthew Wilcox #define task_contributes_to_load(task)	\
109e3c8ca83SNathan Lynch 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
11080ed87c8SPeter Zijlstra 				 (task->flags & PF_FROZEN) == 0 && \
11180ed87c8SPeter Zijlstra 				 (task->state & TASK_NOLOAD) == 0)
1121da177e4SLinus Torvalds 
1138eb23b9fSPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1148eb23b9fSPeter Zijlstra 
1158eb23b9fSPeter Zijlstra #define __set_current_state(state_value)			\
1168eb23b9fSPeter Zijlstra 	do {							\
1178eb23b9fSPeter Zijlstra 		current->task_state_change = _THIS_IP_;		\
1188eb23b9fSPeter Zijlstra 		current->state = (state_value);			\
1198eb23b9fSPeter Zijlstra 	} while (0)
1208eb23b9fSPeter Zijlstra #define set_current_state(state_value)				\
1218eb23b9fSPeter Zijlstra 	do {							\
1228eb23b9fSPeter Zijlstra 		current->task_state_change = _THIS_IP_;		\
123b92b8b35SPeter Zijlstra 		smp_store_mb(current->state, (state_value));	\
1248eb23b9fSPeter Zijlstra 	} while (0)
1258eb23b9fSPeter Zijlstra 
1268eb23b9fSPeter Zijlstra #else
127498d0c57SAndrew Morton /*
128498d0c57SAndrew Morton  * set_current_state() includes a barrier so that the write of current->state
129498d0c57SAndrew Morton  * is correctly serialised wrt the caller's subsequent test of whether to
130498d0c57SAndrew Morton  * actually sleep:
131498d0c57SAndrew Morton  *
132a2250238SPeter Zijlstra  *   for (;;) {
133498d0c57SAndrew Morton  *	set_current_state(TASK_UNINTERRUPTIBLE);
134a2250238SPeter Zijlstra  *	if (!need_sleep)
135a2250238SPeter Zijlstra  *		break;
136498d0c57SAndrew Morton  *
137a2250238SPeter Zijlstra  *	schedule();
138a2250238SPeter Zijlstra  *   }
139a2250238SPeter Zijlstra  *   __set_current_state(TASK_RUNNING);
140a2250238SPeter Zijlstra  *
141a2250238SPeter Zijlstra  * If the caller does not need such serialisation (because, for instance, the
142a2250238SPeter Zijlstra  * condition test and condition change and wakeup are under the same lock) then
143a2250238SPeter Zijlstra  * use __set_current_state().
144a2250238SPeter Zijlstra  *
145a2250238SPeter Zijlstra  * The above is typically ordered against the wakeup, which does:
146a2250238SPeter Zijlstra  *
147a2250238SPeter Zijlstra  *	need_sleep = false;
148a2250238SPeter Zijlstra  *	wake_up_state(p, TASK_UNINTERRUPTIBLE);
149a2250238SPeter Zijlstra  *
150a2250238SPeter Zijlstra  * Where wake_up_state() (and all other wakeup primitives) imply enough
151a2250238SPeter Zijlstra  * barriers to order the store of the variable against wakeup.
152a2250238SPeter Zijlstra  *
153a2250238SPeter Zijlstra  * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
154a2250238SPeter Zijlstra  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
155a2250238SPeter Zijlstra  * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
156a2250238SPeter Zijlstra  *
157a2250238SPeter Zijlstra  * This is obviously fine, since they both store the exact same value.
158a2250238SPeter Zijlstra  *
159a2250238SPeter Zijlstra  * Also see the comments of try_to_wake_up().
160498d0c57SAndrew Morton  */
1611da177e4SLinus Torvalds #define __set_current_state(state_value)		\
1621da177e4SLinus Torvalds 	do { current->state = (state_value); } while (0)
1631da177e4SLinus Torvalds #define set_current_state(state_value)			\
164b92b8b35SPeter Zijlstra 	smp_store_mb(current->state, (state_value))
1651da177e4SLinus Torvalds 
1668eb23b9fSPeter Zijlstra #endif
1678eb23b9fSPeter Zijlstra 
1681da177e4SLinus Torvalds /* Task command name length */
1691da177e4SLinus Torvalds #define TASK_COMM_LEN 16
1701da177e4SLinus Torvalds 
1713fa0818bSRik van Riel extern cpumask_var_t cpu_isolated_map;
1723fa0818bSRik van Riel 
1731da177e4SLinus Torvalds extern void scheduler_tick(void);
1741da177e4SLinus Torvalds 
1751da177e4SLinus Torvalds #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
176b3c97528SHarvey Harrison extern signed long schedule_timeout(signed long timeout);
17764ed93a2SNishanth Aravamudan extern signed long schedule_timeout_interruptible(signed long timeout);
178294d5cc2SMatthew Wilcox extern signed long schedule_timeout_killable(signed long timeout);
17964ed93a2SNishanth Aravamudan extern signed long schedule_timeout_uninterruptible(signed long timeout);
18069b27bafSAndrew Morton extern signed long schedule_timeout_idle(signed long timeout);
1811da177e4SLinus Torvalds asmlinkage void schedule(void);
182c5491ea7SThomas Gleixner extern void schedule_preempt_disabled(void);
1831da177e4SLinus Torvalds 
18410ab5643STejun Heo extern int __must_check io_schedule_prepare(void);
18510ab5643STejun Heo extern void io_schedule_finish(int token);
1869cff8adeSNeilBrown extern long io_schedule_timeout(long timeout);
18710ab5643STejun Heo extern void io_schedule(void);
1889cff8adeSNeilBrown 
189f06febc9SFrank Mayhar /**
1909d7fb042SPeter Zijlstra  * struct prev_cputime - snaphsot of system and user cputime
191d37f761dSFrederic Weisbecker  * @utime: time spent in user mode
192d37f761dSFrederic Weisbecker  * @stime: time spent in system mode
1939d7fb042SPeter Zijlstra  * @lock: protects the above two fields
194d37f761dSFrederic Weisbecker  *
1959d7fb042SPeter Zijlstra  * Stores previous user/system time values such that we can guarantee
1969d7fb042SPeter Zijlstra  * monotonicity.
197d37f761dSFrederic Weisbecker  */
1989d7fb042SPeter Zijlstra struct prev_cputime {
1999d7fb042SPeter Zijlstra #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
2005613fda9SFrederic Weisbecker 	u64 utime;
2015613fda9SFrederic Weisbecker 	u64 stime;
2029d7fb042SPeter Zijlstra 	raw_spinlock_t lock;
2039d7fb042SPeter Zijlstra #endif
204d37f761dSFrederic Weisbecker };
205d37f761dSFrederic Weisbecker 
206d37f761dSFrederic Weisbecker /**
207f06febc9SFrank Mayhar  * struct task_cputime - collected CPU time counts
2085613fda9SFrederic Weisbecker  * @utime:		time spent in user mode, in nanoseconds
2095613fda9SFrederic Weisbecker  * @stime:		time spent in kernel mode, in nanoseconds
210f06febc9SFrank Mayhar  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
211f06febc9SFrank Mayhar  *
2129d7fb042SPeter Zijlstra  * This structure groups together three kinds of CPU time that are tracked for
2139d7fb042SPeter Zijlstra  * threads and thread groups.  Most things considering CPU time want to group
2149d7fb042SPeter Zijlstra  * these counts together and treat all three of them in parallel.
215f06febc9SFrank Mayhar  */
216f06febc9SFrank Mayhar struct task_cputime {
2175613fda9SFrederic Weisbecker 	u64 utime;
2185613fda9SFrederic Weisbecker 	u64 stime;
219f06febc9SFrank Mayhar 	unsigned long long sum_exec_runtime;
220f06febc9SFrank Mayhar };
2219d7fb042SPeter Zijlstra 
222f06febc9SFrank Mayhar /* Alternate field names when used to cache expirations. */
223f06febc9SFrank Mayhar #define virt_exp	utime
2249d7fb042SPeter Zijlstra #define prof_exp	stime
225f06febc9SFrank Mayhar #define sched_exp	sum_exec_runtime
226f06febc9SFrank Mayhar 
2271da177e4SLinus Torvalds struct sched_info {
228*7f5f8e8dSIngo Molnar #ifdef CONFIG_SCHED_INFO
2291da177e4SLinus Torvalds 	/* cumulative counters */
2302d72376bSIngo Molnar 	unsigned long pcount;	      /* # of times run on this cpu */
2319c2c4802SKen Chen 	unsigned long long run_delay; /* time spent waiting on a runqueue */
2321da177e4SLinus Torvalds 
2331da177e4SLinus Torvalds 	/* timestamps */
234172ba844SBalbir Singh 	unsigned long long last_arrival,/* when we last ran on a cpu */
2351da177e4SLinus Torvalds 			   last_queued;	/* when we were last queued to run */
236f6db8347SNaveen N. Rao #endif /* CONFIG_SCHED_INFO */
237*7f5f8e8dSIngo Molnar };
2381da177e4SLinus Torvalds 
2391da177e4SLinus Torvalds /*
2406ecdd749SYuyang Du  * Integer metrics need fixed point arithmetic, e.g., sched/fair
2416ecdd749SYuyang Du  * has a few: load, load_avg, util_avg, freq, and capacity.
2426ecdd749SYuyang Du  *
2436ecdd749SYuyang Du  * We define a basic fixed point arithmetic range, and then formalize
2446ecdd749SYuyang Du  * all these metrics based on that basic range.
2456ecdd749SYuyang Du  */
2466ecdd749SYuyang Du # define SCHED_FIXEDPOINT_SHIFT	10
2476ecdd749SYuyang Du # define SCHED_FIXEDPOINT_SCALE	(1L << SCHED_FIXEDPOINT_SHIFT)
2486ecdd749SYuyang Du 
24920b8a59fSIngo Molnar struct load_weight {
2509dbdb155SPeter Zijlstra 	unsigned long weight;
2519dbdb155SPeter Zijlstra 	u32 inv_weight;
25220b8a59fSIngo Molnar };
25320b8a59fSIngo Molnar 
2549d89c257SYuyang Du /*
2557b595334SYuyang Du  * The load_avg/util_avg accumulates an infinite geometric series
2567b595334SYuyang Du  * (see __update_load_avg() in kernel/sched/fair.c).
2577b595334SYuyang Du  *
2587b595334SYuyang Du  * [load_avg definition]
2597b595334SYuyang Du  *
2607b595334SYuyang Du  *   load_avg = runnable% * scale_load_down(load)
2617b595334SYuyang Du  *
2627b595334SYuyang Du  * where runnable% is the time ratio that a sched_entity is runnable.
2637b595334SYuyang Du  * For cfs_rq, it is the aggregated load_avg of all runnable and
2649d89c257SYuyang Du  * blocked sched_entities.
2657b595334SYuyang Du  *
2667b595334SYuyang Du  * load_avg may also take frequency scaling into account:
2677b595334SYuyang Du  *
2687b595334SYuyang Du  *   load_avg = runnable% * scale_load_down(load) * freq%
2697b595334SYuyang Du  *
2707b595334SYuyang Du  * where freq% is the CPU frequency normalized to the highest frequency.
2717b595334SYuyang Du  *
2727b595334SYuyang Du  * [util_avg definition]
2737b595334SYuyang Du  *
2747b595334SYuyang Du  *   util_avg = running% * SCHED_CAPACITY_SCALE
2757b595334SYuyang Du  *
2767b595334SYuyang Du  * where running% is the time ratio that a sched_entity is running on
2777b595334SYuyang Du  * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
2787b595334SYuyang Du  * and blocked sched_entities.
2797b595334SYuyang Du  *
2807b595334SYuyang Du  * util_avg may also factor frequency scaling and CPU capacity scaling:
2817b595334SYuyang Du  *
2827b595334SYuyang Du  *   util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
2837b595334SYuyang Du  *
2847b595334SYuyang Du  * where freq% is the same as above, and capacity% is the CPU capacity
2857b595334SYuyang Du  * normalized to the greatest capacity (due to uarch differences, etc).
2867b595334SYuyang Du  *
2877b595334SYuyang Du  * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
2887b595334SYuyang Du  * themselves are in the range of [0, 1]. To do fixed point arithmetics,
2897b595334SYuyang Du  * we therefore scale them to as large a range as necessary. This is for
2907b595334SYuyang Du  * example reflected by util_avg's SCHED_CAPACITY_SCALE.
2917b595334SYuyang Du  *
2927b595334SYuyang Du  * [Overflow issue]
2937b595334SYuyang Du  *
2947b595334SYuyang Du  * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
2957b595334SYuyang Du  * with the highest load (=88761), always runnable on a single cfs_rq,
2967b595334SYuyang Du  * and should not overflow as the number already hits PID_MAX_LIMIT.
2977b595334SYuyang Du  *
2987b595334SYuyang Du  * For all other cases (including 32-bit kernels), struct load_weight's
2997b595334SYuyang Du  * weight will overflow first before we do, because:
3007b595334SYuyang Du  *
3017b595334SYuyang Du  *    Max(load_avg) <= Max(load.weight)
3027b595334SYuyang Du  *
3037b595334SYuyang Du  * Then it is the load_weight's responsibility to consider overflow
3047b595334SYuyang Du  * issues.
3059d89c257SYuyang Du  */
3069d85f21cSPaul Turner struct sched_avg {
3079d89c257SYuyang Du 	u64 last_update_time, load_sum;
3089d89c257SYuyang Du 	u32 util_sum, period_contrib;
3099d89c257SYuyang Du 	unsigned long load_avg, util_avg;
3109d85f21cSPaul Turner };
3119d85f21cSPaul Turner 
31241acab88SLucas De Marchi struct sched_statistics {
313*7f5f8e8dSIngo Molnar #ifdef CONFIG_SCHEDSTATS
31494c18227SIngo Molnar 	u64			wait_start;
31594c18227SIngo Molnar 	u64			wait_max;
3166d082592SArjan van de Ven 	u64			wait_count;
3176d082592SArjan van de Ven 	u64			wait_sum;
3188f0dfc34SArjan van de Ven 	u64			iowait_count;
3198f0dfc34SArjan van de Ven 	u64			iowait_sum;
32094c18227SIngo Molnar 
32194c18227SIngo Molnar 	u64			sleep_start;
32220b8a59fSIngo Molnar 	u64			sleep_max;
32394c18227SIngo Molnar 	s64			sum_sleep_runtime;
32494c18227SIngo Molnar 
32594c18227SIngo Molnar 	u64			block_start;
32620b8a59fSIngo Molnar 	u64			block_max;
32720b8a59fSIngo Molnar 	u64			exec_max;
328eba1ed4bSIngo Molnar 	u64			slice_max;
329cc367732SIngo Molnar 
330cc367732SIngo Molnar 	u64			nr_migrations_cold;
331cc367732SIngo Molnar 	u64			nr_failed_migrations_affine;
332cc367732SIngo Molnar 	u64			nr_failed_migrations_running;
333cc367732SIngo Molnar 	u64			nr_failed_migrations_hot;
334cc367732SIngo Molnar 	u64			nr_forced_migrations;
335cc367732SIngo Molnar 
336cc367732SIngo Molnar 	u64			nr_wakeups;
337cc367732SIngo Molnar 	u64			nr_wakeups_sync;
338cc367732SIngo Molnar 	u64			nr_wakeups_migrate;
339cc367732SIngo Molnar 	u64			nr_wakeups_local;
340cc367732SIngo Molnar 	u64			nr_wakeups_remote;
341cc367732SIngo Molnar 	u64			nr_wakeups_affine;
342cc367732SIngo Molnar 	u64			nr_wakeups_affine_attempts;
343cc367732SIngo Molnar 	u64			nr_wakeups_passive;
344cc367732SIngo Molnar 	u64			nr_wakeups_idle;
34541acab88SLucas De Marchi #endif
346*7f5f8e8dSIngo Molnar };
34741acab88SLucas De Marchi 
34841acab88SLucas De Marchi struct sched_entity {
34941acab88SLucas De Marchi 	struct load_weight	load;		/* for load-balancing */
35041acab88SLucas De Marchi 	struct rb_node		run_node;
35141acab88SLucas De Marchi 	struct list_head	group_node;
35241acab88SLucas De Marchi 	unsigned int		on_rq;
35341acab88SLucas De Marchi 
35441acab88SLucas De Marchi 	u64			exec_start;
35541acab88SLucas De Marchi 	u64			sum_exec_runtime;
35641acab88SLucas De Marchi 	u64			vruntime;
35741acab88SLucas De Marchi 	u64			prev_sum_exec_runtime;
35841acab88SLucas De Marchi 
35941acab88SLucas De Marchi 	u64			nr_migrations;
36041acab88SLucas De Marchi 
36141acab88SLucas De Marchi 	struct sched_statistics statistics;
36294c18227SIngo Molnar 
36320b8a59fSIngo Molnar #ifdef CONFIG_FAIR_GROUP_SCHED
364fed14d45SPeter Zijlstra 	int			depth;
36520b8a59fSIngo Molnar 	struct sched_entity	*parent;
36620b8a59fSIngo Molnar 	/* rq on which this entity is (to be) queued: */
36720b8a59fSIngo Molnar 	struct cfs_rq		*cfs_rq;
36820b8a59fSIngo Molnar 	/* rq "owned" by this entity/group: */
36920b8a59fSIngo Molnar 	struct cfs_rq		*my_q;
37020b8a59fSIngo Molnar #endif
3718bd75c77SClark Williams 
372141965c7SAlex Shi #ifdef CONFIG_SMP
3735a107804SJiri Olsa 	/*
3745a107804SJiri Olsa 	 * Per entity load average tracking.
3755a107804SJiri Olsa 	 *
3765a107804SJiri Olsa 	 * Put into separate cache line so it does not
3775a107804SJiri Olsa 	 * collide with read-mostly values above.
3785a107804SJiri Olsa 	 */
3795a107804SJiri Olsa 	struct sched_avg	avg ____cacheline_aligned_in_smp;
3809d85f21cSPaul Turner #endif
38120b8a59fSIngo Molnar };
38270b97a7fSIngo Molnar 
383fa717060SPeter Zijlstra struct sched_rt_entity {
384fa717060SPeter Zijlstra 	struct list_head run_list;
38578f2c7dbSPeter Zijlstra 	unsigned long timeout;
38657d2aa00SYing Xue 	unsigned long watchdog_stamp;
387bee367edSRichard Kennedy 	unsigned int time_slice;
388ff77e468SPeter Zijlstra 	unsigned short on_rq;
389ff77e468SPeter Zijlstra 	unsigned short on_list;
3906f505b16SPeter Zijlstra 
39158d6c2d7SPeter Zijlstra 	struct sched_rt_entity *back;
392052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
3936f505b16SPeter Zijlstra 	struct sched_rt_entity	*parent;
3946f505b16SPeter Zijlstra 	/* rq on which this entity is (to be) queued: */
3956f505b16SPeter Zijlstra 	struct rt_rq		*rt_rq;
3966f505b16SPeter Zijlstra 	/* rq "owned" by this entity/group: */
3976f505b16SPeter Zijlstra 	struct rt_rq		*my_q;
3986f505b16SPeter Zijlstra #endif
399fa717060SPeter Zijlstra };
400fa717060SPeter Zijlstra 
401aab03e05SDario Faggioli struct sched_dl_entity {
402aab03e05SDario Faggioli 	struct rb_node	rb_node;
403aab03e05SDario Faggioli 
404aab03e05SDario Faggioli 	/*
405aab03e05SDario Faggioli 	 * Original scheduling parameters. Copied here from sched_attr
4064027d080Sxiaofeng.yan 	 * during sched_setattr(), they will remain the same until
4074027d080Sxiaofeng.yan 	 * the next sched_setattr().
408aab03e05SDario Faggioli 	 */
409aab03e05SDario Faggioli 	u64 dl_runtime;		/* maximum runtime for each instance	*/
410aab03e05SDario Faggioli 	u64 dl_deadline;	/* relative deadline of each instance	*/
411755378a4SHarald Gustafsson 	u64 dl_period;		/* separation of two instances (period) */
412332ac17eSDario Faggioli 	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
413aab03e05SDario Faggioli 
414aab03e05SDario Faggioli 	/*
415aab03e05SDario Faggioli 	 * Actual scheduling parameters. Initialized with the values above,
416aab03e05SDario Faggioli 	 * they are continously updated during task execution. Note that
417aab03e05SDario Faggioli 	 * the remaining runtime could be < 0 in case we are in overrun.
418aab03e05SDario Faggioli 	 */
419aab03e05SDario Faggioli 	s64 runtime;		/* remaining runtime for this instance	*/
420aab03e05SDario Faggioli 	u64 deadline;		/* absolute deadline for this instance	*/
421aab03e05SDario Faggioli 	unsigned int flags;	/* specifying the scheduler behaviour	*/
422aab03e05SDario Faggioli 
423aab03e05SDario Faggioli 	/*
424aab03e05SDario Faggioli 	 * Some bool flags:
425aab03e05SDario Faggioli 	 *
426aab03e05SDario Faggioli 	 * @dl_throttled tells if we exhausted the runtime. If so, the
427aab03e05SDario Faggioli 	 * task has to wait for a replenishment to be performed at the
428aab03e05SDario Faggioli 	 * next firing of dl_timer.
429aab03e05SDario Faggioli 	 *
4302d3d891dSDario Faggioli 	 * @dl_boosted tells if we are boosted due to DI. If so we are
4312d3d891dSDario Faggioli 	 * outside bandwidth enforcement mechanism (but only until we
4325bfd126eSJuri Lelli 	 * exit the critical section);
4335bfd126eSJuri Lelli 	 *
4345bfd126eSJuri Lelli 	 * @dl_yielded tells if task gave up the cpu before consuming
4355bfd126eSJuri Lelli 	 * all its available runtime during the last job.
436aab03e05SDario Faggioli 	 */
43772f9f3fdSLuca Abeni 	int dl_throttled, dl_boosted, dl_yielded;
438aab03e05SDario Faggioli 
439aab03e05SDario Faggioli 	/*
440aab03e05SDario Faggioli 	 * Bandwidth enforcement timer. Each -deadline task has its
441aab03e05SDario Faggioli 	 * own bandwidth to be enforced, thus we need one timer per task.
442aab03e05SDario Faggioli 	 */
443aab03e05SDario Faggioli 	struct hrtimer dl_timer;
444aab03e05SDario Faggioli };
4458bd75c77SClark Williams 
4461d082fd0SPaul E. McKenney union rcu_special {
4471d082fd0SPaul E. McKenney 	struct {
4488203d6d0SPaul E. McKenney 		u8 blocked;
4498203d6d0SPaul E. McKenney 		u8 need_qs;
4508203d6d0SPaul E. McKenney 		u8 exp_need_qs;
4518203d6d0SPaul E. McKenney 		u8 pad;	/* Otherwise the compiler can store garbage here. */
4528203d6d0SPaul E. McKenney 	} b; /* Bits. */
4538203d6d0SPaul E. McKenney 	u32 s; /* Set of bits. */
4541d082fd0SPaul E. McKenney };
45586848966SPaul E. McKenney 
4568dc85d54SPeter Zijlstra enum perf_event_task_context {
4578dc85d54SPeter Zijlstra 	perf_invalid_context = -1,
4588dc85d54SPeter Zijlstra 	perf_hw_context = 0,
45989a1e187SPeter Zijlstra 	perf_sw_context,
4608dc85d54SPeter Zijlstra 	perf_nr_task_contexts,
4618dc85d54SPeter Zijlstra };
4628dc85d54SPeter Zijlstra 
463eb61baf6SIngo Molnar struct wake_q_node {
464eb61baf6SIngo Molnar 	struct wake_q_node *next;
465eb61baf6SIngo Molnar };
466eb61baf6SIngo Molnar 
4671da177e4SLinus Torvalds struct task_struct {
468c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
469c65eacbeSAndy Lutomirski 	/*
470c65eacbeSAndy Lutomirski 	 * For reasons of header soup (see current_thread_info()), this
471c65eacbeSAndy Lutomirski 	 * must be the first element of task_struct.
472c65eacbeSAndy Lutomirski 	 */
473c65eacbeSAndy Lutomirski 	struct thread_info thread_info;
474c65eacbeSAndy Lutomirski #endif
4751da177e4SLinus Torvalds 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
476f7e4217bSRoman Zippel 	void *stack;
4771da177e4SLinus Torvalds 	atomic_t usage;
47897dc32cdSWilliam Cohen 	unsigned int flags;	/* per process flags, defined below */
47997dc32cdSWilliam Cohen 	unsigned int ptrace;
4801da177e4SLinus Torvalds 
4812dd73a4fSPeter Williams #ifdef CONFIG_SMP
482fa14ff4aSPeter Zijlstra 	struct llist_node wake_entry;
4833ca7a440SPeter Zijlstra 	int on_cpu;
484c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
485c65eacbeSAndy Lutomirski 	unsigned int cpu;	/* current CPU */
486c65eacbeSAndy Lutomirski #endif
48763b0e9edSMike Galbraith 	unsigned int wakee_flips;
48862470419SMichael Wang 	unsigned long wakee_flip_decay_ts;
48963b0e9edSMike Galbraith 	struct task_struct *last_wakee;
490ac66f547SPeter Zijlstra 
491ac66f547SPeter Zijlstra 	int wake_cpu;
4924866cde0SNick Piggin #endif
493fd2f4419SPeter Zijlstra 	int on_rq;
49450e645a8SIngo Molnar 
495b29739f9SIngo Molnar 	int prio, static_prio, normal_prio;
496c7aceabaSRichard Kennedy 	unsigned int rt_priority;
4975522d5d5SIngo Molnar 	const struct sched_class *sched_class;
49820b8a59fSIngo Molnar 	struct sched_entity se;
499fa717060SPeter Zijlstra 	struct sched_rt_entity rt;
5008323f26cSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
5018323f26cSPeter Zijlstra 	struct task_group *sched_task_group;
5028323f26cSPeter Zijlstra #endif
503aab03e05SDario Faggioli 	struct sched_dl_entity dl;
5041da177e4SLinus Torvalds 
505e107be36SAvi Kivity #ifdef CONFIG_PREEMPT_NOTIFIERS
506e107be36SAvi Kivity 	/* list of struct preempt_notifier: */
507e107be36SAvi Kivity 	struct hlist_head preempt_notifiers;
508e107be36SAvi Kivity #endif
509e107be36SAvi Kivity 
5106c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE
5112056a782SJens Axboe 	unsigned int btrace_seq;
5126c5c9341SAlexey Dobriyan #endif
5131da177e4SLinus Torvalds 
51497dc32cdSWilliam Cohen 	unsigned int policy;
51529baa747SPeter Zijlstra 	int nr_cpus_allowed;
5161da177e4SLinus Torvalds 	cpumask_t cpus_allowed;
5171da177e4SLinus Torvalds 
518a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
519e260be67SPaul E. McKenney 	int rcu_read_lock_nesting;
5201d082fd0SPaul E. McKenney 	union rcu_special rcu_read_unlock_special;
521f41d911fSPaul E. McKenney 	struct list_head rcu_node_entry;
522a57eb940SPaul E. McKenney 	struct rcu_node *rcu_blocked_node;
52328f6569aSPranith Kumar #endif /* #ifdef CONFIG_PREEMPT_RCU */
5248315f422SPaul E. McKenney #ifdef CONFIG_TASKS_RCU
5258315f422SPaul E. McKenney 	unsigned long rcu_tasks_nvcsw;
5268315f422SPaul E. McKenney 	bool rcu_tasks_holdout;
5278315f422SPaul E. McKenney 	struct list_head rcu_tasks_holdout_list;
528176f8f7aSPaul E. McKenney 	int rcu_tasks_idle_cpu;
5298315f422SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_RCU */
530e260be67SPaul E. McKenney 
5311da177e4SLinus Torvalds 	struct sched_info sched_info;
5321da177e4SLinus Torvalds 
5331da177e4SLinus Torvalds 	struct list_head tasks;
534806c09a7SDario Faggioli #ifdef CONFIG_SMP
535917b627dSGregory Haskins 	struct plist_node pushable_tasks;
5361baca4ceSJuri Lelli 	struct rb_node pushable_dl_tasks;
537806c09a7SDario Faggioli #endif
5381da177e4SLinus Torvalds 
5391da177e4SLinus Torvalds 	struct mm_struct *mm, *active_mm;
540314ff785SIngo Molnar 
541314ff785SIngo Molnar 	/* Per-thread vma caching: */
542314ff785SIngo Molnar 	struct vmacache vmacache;
543314ff785SIngo Molnar 
54434e55232SKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING)
54534e55232SKAMEZAWA Hiroyuki 	struct task_rss_stat	rss_stat;
54634e55232SKAMEZAWA Hiroyuki #endif
5471da177e4SLinus Torvalds /* task state */
54897dc32cdSWilliam Cohen 	int exit_state;
5491da177e4SLinus Torvalds 	int exit_code, exit_signal;
5501da177e4SLinus Torvalds 	int pdeath_signal;  /*  The signal sent when the parent dies  */
551e7cc4173SPalmer Dabbelt 	unsigned long jobctl;	/* JOBCTL_*, siglock protected */
5529b89f6baSAndrei Epure 
5539b89f6baSAndrei Epure 	/* Used for emulating ABI behavior of previous Linux versions */
55497dc32cdSWilliam Cohen 	unsigned int personality;
5559b89f6baSAndrei Epure 
556be958bdcSPeter Zijlstra 	/* scheduler bits, serialized by scheduler locks */
557ca94c442SLennart Poettering 	unsigned sched_reset_on_fork:1;
558a8e4f2eaSPeter Zijlstra 	unsigned sched_contributes_to_load:1;
559ff303e66SPeter Zijlstra 	unsigned sched_migrated:1;
560b7e7ade3SPeter Zijlstra 	unsigned sched_remote_wakeup:1;
561be958bdcSPeter Zijlstra 	unsigned :0; /* force alignment to the next boundary */
562be958bdcSPeter Zijlstra 
563be958bdcSPeter Zijlstra 	/* unserialized, strictly 'current' */
564be958bdcSPeter Zijlstra 	unsigned in_execve:1; /* bit to tell LSMs we're in execve */
565be958bdcSPeter Zijlstra 	unsigned in_iowait:1;
5667e781418SAndy Lutomirski #if !defined(TIF_RESTORE_SIGMASK)
5677e781418SAndy Lutomirski 	unsigned restore_sigmask:1;
5687e781418SAndy Lutomirski #endif
569626ebc41STejun Heo #ifdef CONFIG_MEMCG
570626ebc41STejun Heo 	unsigned memcg_may_oom:1;
571127424c8SJohannes Weiner #ifndef CONFIG_SLOB
5726f185c29SVladimir Davydov 	unsigned memcg_kmem_skip_account:1;
5736f185c29SVladimir Davydov #endif
574127424c8SJohannes Weiner #endif
575ff303e66SPeter Zijlstra #ifdef CONFIG_COMPAT_BRK
576ff303e66SPeter Zijlstra 	unsigned brk_randomized:1;
577ff303e66SPeter Zijlstra #endif
5786f185c29SVladimir Davydov 
5791d4457f9SKees Cook 	unsigned long atomic_flags; /* Flags needing atomic access. */
5801d4457f9SKees Cook 
581f56141e3SAndy Lutomirski 	struct restart_block restart_block;
582f56141e3SAndy Lutomirski 
5831da177e4SLinus Torvalds 	pid_t pid;
5841da177e4SLinus Torvalds 	pid_t tgid;
5850a425405SArjan van de Ven 
5861314562aSHiroshi Shimamoto #ifdef CONFIG_CC_STACKPROTECTOR
5870a425405SArjan van de Ven 	/* Canary value for the -fstack-protector gcc feature */
5880a425405SArjan van de Ven 	unsigned long stack_canary;
5891314562aSHiroshi Shimamoto #endif
5901da177e4SLinus Torvalds 	/*
5911da177e4SLinus Torvalds 	 * pointers to (original) parent process, youngest child, younger sibling,
5921da177e4SLinus Torvalds 	 * older sibling, respectively.  (p->father can be replaced with
593f470021aSRoland McGrath 	 * p->real_parent->pid)
5941da177e4SLinus Torvalds 	 */
595abd63bc3SKees Cook 	struct task_struct __rcu *real_parent; /* real parent process */
596abd63bc3SKees Cook 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
5971da177e4SLinus Torvalds 	/*
598f470021aSRoland McGrath 	 * children/sibling forms the list of my natural children
5991da177e4SLinus Torvalds 	 */
6001da177e4SLinus Torvalds 	struct list_head children;	/* list of my children */
6011da177e4SLinus Torvalds 	struct list_head sibling;	/* linkage in my parent's children list */
6021da177e4SLinus Torvalds 	struct task_struct *group_leader;	/* threadgroup leader */
6031da177e4SLinus Torvalds 
604f470021aSRoland McGrath 	/*
605f470021aSRoland McGrath 	 * ptraced is the list of tasks this task is using ptrace on.
606f470021aSRoland McGrath 	 * This includes both natural children and PTRACE_ATTACH targets.
607f470021aSRoland McGrath 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
608f470021aSRoland McGrath 	 */
609f470021aSRoland McGrath 	struct list_head ptraced;
610f470021aSRoland McGrath 	struct list_head ptrace_entry;
611f470021aSRoland McGrath 
6121da177e4SLinus Torvalds 	/* PID/PID hash table linkage. */
61392476d7fSEric W. Biederman 	struct pid_link pids[PIDTYPE_MAX];
61447e65328SOleg Nesterov 	struct list_head thread_group;
6150c740d0aSOleg Nesterov 	struct list_head thread_node;
6161da177e4SLinus Torvalds 
6171da177e4SLinus Torvalds 	struct completion *vfork_done;		/* for vfork() */
6181da177e4SLinus Torvalds 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
6191da177e4SLinus Torvalds 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
6201da177e4SLinus Torvalds 
6215613fda9SFrederic Weisbecker 	u64 utime, stime;
62240565b5aSStanislaw Gruszka #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
6235613fda9SFrederic Weisbecker 	u64 utimescaled, stimescaled;
62440565b5aSStanislaw Gruszka #endif
62516a6d9beSFrederic Weisbecker 	u64 gtime;
6269d7fb042SPeter Zijlstra 	struct prev_cputime prev_cputime;
6276a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
628b7ce2277SFrederic Weisbecker 	seqcount_t vtime_seqcount;
6296a61671bSFrederic Weisbecker 	unsigned long long vtime_snap;
6306a61671bSFrederic Weisbecker 	enum {
6317098c1eaSFrederic Weisbecker 		/* Task is sleeping or running in a CPU with VTIME inactive */
6327098c1eaSFrederic Weisbecker 		VTIME_INACTIVE = 0,
6337098c1eaSFrederic Weisbecker 		/* Task runs in userspace in a CPU with VTIME active */
6346a61671bSFrederic Weisbecker 		VTIME_USER,
6357098c1eaSFrederic Weisbecker 		/* Task runs in kernelspace in a CPU with VTIME active */
6366a61671bSFrederic Weisbecker 		VTIME_SYS,
6376a61671bSFrederic Weisbecker 	} vtime_snap_whence;
6386a61671bSFrederic Weisbecker #endif
639d027d45dSFrederic Weisbecker 
640d027d45dSFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL
641f009a7a7SFrederic Weisbecker 	atomic_t tick_dep_mask;
642d027d45dSFrederic Weisbecker #endif
6431da177e4SLinus Torvalds 	unsigned long nvcsw, nivcsw; /* context switch counts */
644ccbf62d8SThomas Gleixner 	u64 start_time;		/* monotonic time in nsec */
64557e0be04SThomas Gleixner 	u64 real_start_time;	/* boot based time in nsec */
6461da177e4SLinus Torvalds /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
6471da177e4SLinus Torvalds 	unsigned long min_flt, maj_flt;
6481da177e4SLinus Torvalds 
649b18b6a9cSNicolas Pitre #ifdef CONFIG_POSIX_TIMERS
650f06febc9SFrank Mayhar 	struct task_cputime cputime_expires;
6511da177e4SLinus Torvalds 	struct list_head cpu_timers[3];
652b18b6a9cSNicolas Pitre #endif
6531da177e4SLinus Torvalds 
6541da177e4SLinus Torvalds /* process credentials */
65564b875f7SEric W. Biederman 	const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
6561b0ba1c9SArnd Bergmann 	const struct cred __rcu *real_cred; /* objective and real subjective task
6573b11a1deSDavid Howells 					 * credentials (COW) */
6581b0ba1c9SArnd Bergmann 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
6593b11a1deSDavid Howells 					 * credentials (COW) */
66036772092SPaolo 'Blaisorblade' Giarrusso 	char comm[TASK_COMM_LEN]; /* executable name excluding path
66136772092SPaolo 'Blaisorblade' Giarrusso 				     - access with [gs]et_task_comm (which lock
66236772092SPaolo 'Blaisorblade' Giarrusso 				       it with task_lock())
663221af7f8SLinus Torvalds 				     - initialized normally by setup_new_exec */
6641da177e4SLinus Torvalds /* file system info */
665756daf26SNeilBrown 	struct nameidata *nameidata;
6663d5b6fccSAlexey Dobriyan #ifdef CONFIG_SYSVIPC
6671da177e4SLinus Torvalds /* ipc stuff */
6681da177e4SLinus Torvalds 	struct sysv_sem sysvsem;
669ab602f79SJack Miller 	struct sysv_shm sysvshm;
6703d5b6fccSAlexey Dobriyan #endif
671e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK
67282a1fcb9SIngo Molnar /* hung task detection */
67382a1fcb9SIngo Molnar 	unsigned long last_switch_count;
67482a1fcb9SIngo Molnar #endif
6751da177e4SLinus Torvalds /* filesystem information */
6761da177e4SLinus Torvalds 	struct fs_struct *fs;
6771da177e4SLinus Torvalds /* open file information */
6781da177e4SLinus Torvalds 	struct files_struct *files;
6791651e14eSSerge E. Hallyn /* namespaces */
680ab516013SSerge E. Hallyn 	struct nsproxy *nsproxy;
6811da177e4SLinus Torvalds /* signal handlers */
6821da177e4SLinus Torvalds 	struct signal_struct *signal;
6831da177e4SLinus Torvalds 	struct sighand_struct *sighand;
6841da177e4SLinus Torvalds 
6851da177e4SLinus Torvalds 	sigset_t blocked, real_blocked;
686f3de272bSRoland McGrath 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
6871da177e4SLinus Torvalds 	struct sigpending pending;
6881da177e4SLinus Torvalds 
6891da177e4SLinus Torvalds 	unsigned long sas_ss_sp;
6901da177e4SLinus Torvalds 	size_t sas_ss_size;
6912a742138SStas Sergeev 	unsigned sas_ss_flags;
6922e01fabeSOleg Nesterov 
69367d12145SAl Viro 	struct callback_head *task_works;
694e73f8959SOleg Nesterov 
6951da177e4SLinus Torvalds 	struct audit_context *audit_context;
696bfef93a5SAl Viro #ifdef CONFIG_AUDITSYSCALL
697e1760bd5SEric W. Biederman 	kuid_t loginuid;
6984746ec5bSEric Paris 	unsigned int sessionid;
699bfef93a5SAl Viro #endif
700932ecebbSWill Drewry 	struct seccomp seccomp;
7011da177e4SLinus Torvalds 
7021da177e4SLinus Torvalds /* Thread group tracking */
7031da177e4SLinus Torvalds    	u32 parent_exec_id;
7041da177e4SLinus Torvalds    	u32 self_exec_id;
70558568d2aSMiao Xie /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
70658568d2aSMiao Xie  * mempolicy */
7071da177e4SLinus Torvalds 	spinlock_t alloc_lock;
7081da177e4SLinus Torvalds 
709b29739f9SIngo Molnar 	/* Protection of the PI data structures: */
7101d615482SThomas Gleixner 	raw_spinlock_t pi_lock;
711b29739f9SIngo Molnar 
71276751049SPeter Zijlstra 	struct wake_q_node wake_q;
71376751049SPeter Zijlstra 
71423f78d4aSIngo Molnar #ifdef CONFIG_RT_MUTEXES
71523f78d4aSIngo Molnar 	/* PI waiters blocked on a rt_mutex held by this task */
716fb00aca4SPeter Zijlstra 	struct rb_root pi_waiters;
717fb00aca4SPeter Zijlstra 	struct rb_node *pi_waiters_leftmost;
71823f78d4aSIngo Molnar 	/* Deadlock detection and priority inheritance handling */
71923f78d4aSIngo Molnar 	struct rt_mutex_waiter *pi_blocked_on;
72023f78d4aSIngo Molnar #endif
72123f78d4aSIngo Molnar 
722408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES
723408894eeSIngo Molnar 	/* mutex deadlock detection */
724408894eeSIngo Molnar 	struct mutex_waiter *blocked_on;
725408894eeSIngo Molnar #endif
726de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS
727de30a2b3SIngo Molnar 	unsigned int irq_events;
728de30a2b3SIngo Molnar 	unsigned long hardirq_enable_ip;
729de30a2b3SIngo Molnar 	unsigned long hardirq_disable_ip;
730fa1452e8SHiroshi Shimamoto 	unsigned int hardirq_enable_event;
731de30a2b3SIngo Molnar 	unsigned int hardirq_disable_event;
732fa1452e8SHiroshi Shimamoto 	int hardirqs_enabled;
733de30a2b3SIngo Molnar 	int hardirq_context;
734fa1452e8SHiroshi Shimamoto 	unsigned long softirq_disable_ip;
735fa1452e8SHiroshi Shimamoto 	unsigned long softirq_enable_ip;
736fa1452e8SHiroshi Shimamoto 	unsigned int softirq_disable_event;
737fa1452e8SHiroshi Shimamoto 	unsigned int softirq_enable_event;
738fa1452e8SHiroshi Shimamoto 	int softirqs_enabled;
739de30a2b3SIngo Molnar 	int softirq_context;
740de30a2b3SIngo Molnar #endif
741fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP
742bdb9441eSPeter Zijlstra # define MAX_LOCK_DEPTH 48UL
743fbb9ce95SIngo Molnar 	u64 curr_chain_key;
744fbb9ce95SIngo Molnar 	int lockdep_depth;
745fbb9ce95SIngo Molnar 	unsigned int lockdep_recursion;
746c7aceabaSRichard Kennedy 	struct held_lock held_locks[MAX_LOCK_DEPTH];
747cf40bd16SNick Piggin 	gfp_t lockdep_reclaim_gfp;
748fbb9ce95SIngo Molnar #endif
749c6d30853SAndrey Ryabinin #ifdef CONFIG_UBSAN
750c6d30853SAndrey Ryabinin 	unsigned int in_ubsan;
751c6d30853SAndrey Ryabinin #endif
752408894eeSIngo Molnar 
7531da177e4SLinus Torvalds /* journalling filesystem info */
7541da177e4SLinus Torvalds 	void *journal_info;
7551da177e4SLinus Torvalds 
756d89d8796SNeil Brown /* stacked block device info */
757bddd87c7SAkinobu Mita 	struct bio_list *bio_list;
758d89d8796SNeil Brown 
75973c10101SJens Axboe #ifdef CONFIG_BLOCK
76073c10101SJens Axboe /* stack plugging */
76173c10101SJens Axboe 	struct blk_plug *plug;
76273c10101SJens Axboe #endif
76373c10101SJens Axboe 
7641da177e4SLinus Torvalds /* VM state */
7651da177e4SLinus Torvalds 	struct reclaim_state *reclaim_state;
7661da177e4SLinus Torvalds 
7671da177e4SLinus Torvalds 	struct backing_dev_info *backing_dev_info;
7681da177e4SLinus Torvalds 
7691da177e4SLinus Torvalds 	struct io_context *io_context;
7701da177e4SLinus Torvalds 
7711da177e4SLinus Torvalds 	unsigned long ptrace_message;
7721da177e4SLinus Torvalds 	siginfo_t *last_siginfo; /* For ptrace use.  */
7737c3ab738SAndrew Morton 	struct task_io_accounting ioac;
7748f0ab514SJay Lan #if defined(CONFIG_TASK_XACCT)
7751da177e4SLinus Torvalds 	u64 acct_rss_mem1;	/* accumulated rss usage */
7761da177e4SLinus Torvalds 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
777605dc2b3SFrederic Weisbecker 	u64 acct_timexpd;	/* stime + utime since last update */
7781da177e4SLinus Torvalds #endif
7791da177e4SLinus Torvalds #ifdef CONFIG_CPUSETS
78058568d2aSMiao Xie 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
781cc9a6c87SMel Gorman 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
782825a46afSPaul Jackson 	int cpuset_mem_spread_rotor;
7836adef3ebSJack Steiner 	int cpuset_slab_spread_rotor;
7841da177e4SLinus Torvalds #endif
785ddbcc7e8SPaul Menage #ifdef CONFIG_CGROUPS
786817929ecSPaul Menage 	/* Control Group info protected by css_set_lock */
7872c392b8cSArnd Bergmann 	struct css_set __rcu *cgroups;
788817929ecSPaul Menage 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
789817929ecSPaul Menage 	struct list_head cg_list;
790ddbcc7e8SPaul Menage #endif
791e02737d5SFenghua Yu #ifdef CONFIG_INTEL_RDT_A
792e02737d5SFenghua Yu 	int closid;
793e02737d5SFenghua Yu #endif
79442b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX
7950771dfefSIngo Molnar 	struct robust_list_head __user *robust_list;
79634f192c6SIngo Molnar #ifdef CONFIG_COMPAT
79734f192c6SIngo Molnar 	struct compat_robust_list_head __user *compat_robust_list;
79834f192c6SIngo Molnar #endif
799c87e2837SIngo Molnar 	struct list_head pi_state_list;
800c87e2837SIngo Molnar 	struct futex_pi_state *pi_state_cache;
80142b2dd0aSAlexey Dobriyan #endif
802cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
8038dc85d54SPeter Zijlstra 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
804cdd6c482SIngo Molnar 	struct mutex perf_event_mutex;
805cdd6c482SIngo Molnar 	struct list_head perf_event_list;
806a63eaf34SPaul Mackerras #endif
8078f47b187SThomas Gleixner #ifdef CONFIG_DEBUG_PREEMPT
8088f47b187SThomas Gleixner 	unsigned long preempt_disable_ip;
8098f47b187SThomas Gleixner #endif
810c7aceabaSRichard Kennedy #ifdef CONFIG_NUMA
81158568d2aSMiao Xie 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
812c7aceabaSRichard Kennedy 	short il_next;
813207205a2SEric Dumazet 	short pref_node_fork;
814c7aceabaSRichard Kennedy #endif
815cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
816cbee9f88SPeter Zijlstra 	int numa_scan_seq;
817cbee9f88SPeter Zijlstra 	unsigned int numa_scan_period;
818598f0ec0SMel Gorman 	unsigned int numa_scan_period_max;
819de1c9ce6SRik van Riel 	int numa_preferred_nid;
8206b9a7460SMel Gorman 	unsigned long numa_migrate_retry;
821cbee9f88SPeter Zijlstra 	u64 node_stamp;			/* migration stamp  */
8227e2703e6SRik van Riel 	u64 last_task_numa_placement;
8237e2703e6SRik van Riel 	u64 last_sum_exec_runtime;
824cbee9f88SPeter Zijlstra 	struct callback_head numa_work;
825f809ca9aSMel Gorman 
8268c8a743cSPeter Zijlstra 	struct list_head numa_entry;
8278c8a743cSPeter Zijlstra 	struct numa_group *numa_group;
8288c8a743cSPeter Zijlstra 
829745d6147SMel Gorman 	/*
83044dba3d5SIulia Manda 	 * numa_faults is an array split into four regions:
83144dba3d5SIulia Manda 	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
83244dba3d5SIulia Manda 	 * in this precise order.
83344dba3d5SIulia Manda 	 *
83444dba3d5SIulia Manda 	 * faults_memory: Exponential decaying average of faults on a per-node
83544dba3d5SIulia Manda 	 * basis. Scheduling placement decisions are made based on these
83644dba3d5SIulia Manda 	 * counts. The values remain static for the duration of a PTE scan.
83744dba3d5SIulia Manda 	 * faults_cpu: Track the nodes the process was running on when a NUMA
83844dba3d5SIulia Manda 	 * hinting fault was incurred.
83944dba3d5SIulia Manda 	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
84044dba3d5SIulia Manda 	 * during the current scan window. When the scan completes, the counts
84144dba3d5SIulia Manda 	 * in faults_memory and faults_cpu decay and these values are copied.
842745d6147SMel Gorman 	 */
84344dba3d5SIulia Manda 	unsigned long *numa_faults;
84483e1d2cdSMel Gorman 	unsigned long total_numa_faults;
845745d6147SMel Gorman 
846745d6147SMel Gorman 	/*
84704bb2f94SRik van Riel 	 * numa_faults_locality tracks if faults recorded during the last
848074c2381SMel Gorman 	 * scan window were remote/local or failed to migrate. The task scan
849074c2381SMel Gorman 	 * period is adapted based on the locality of the faults with different
850074c2381SMel Gorman 	 * weights depending on whether they were shared or private faults
85104bb2f94SRik van Riel 	 */
852074c2381SMel Gorman 	unsigned long numa_faults_locality[3];
85304bb2f94SRik van Riel 
854b32e86b4SIngo Molnar 	unsigned long numa_pages_migrated;
855cbee9f88SPeter Zijlstra #endif /* CONFIG_NUMA_BALANCING */
856cbee9f88SPeter Zijlstra 
85772b252aeSMel Gorman 	struct tlbflush_unmap_batch tlb_ubc;
85872b252aeSMel Gorman 
859e56d0903SIngo Molnar 	struct rcu_head rcu;
860b92ce558SJens Axboe 
861b92ce558SJens Axboe 	/*
862b92ce558SJens Axboe 	 * cache last used pipe for splice
863b92ce558SJens Axboe 	 */
864b92ce558SJens Axboe 	struct pipe_inode_info *splice_pipe;
8655640f768SEric Dumazet 
8665640f768SEric Dumazet 	struct page_frag task_frag;
8675640f768SEric Dumazet 
868ca74e92bSShailabh Nagar #ifdef CONFIG_TASK_DELAY_ACCT
869ca74e92bSShailabh Nagar 	struct task_delay_info		*delays;
870ca74e92bSShailabh Nagar #endif
87147913d4eSIngo Molnar 
872f4f154fdSAkinobu Mita #ifdef CONFIG_FAULT_INJECTION
873f4f154fdSAkinobu Mita 	int make_it_fail;
874f4f154fdSAkinobu Mita #endif
8759d823e8fSWu Fengguang 	/*
8769d823e8fSWu Fengguang 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
8779d823e8fSWu Fengguang 	 * balance_dirty_pages() for some dirty throttling pause
8789d823e8fSWu Fengguang 	 */
8799d823e8fSWu Fengguang 	int nr_dirtied;
8809d823e8fSWu Fengguang 	int nr_dirtied_pause;
88183712358SWu Fengguang 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
8829d823e8fSWu Fengguang 
8839745512cSArjan van de Ven #ifdef CONFIG_LATENCYTOP
8849745512cSArjan van de Ven 	int latency_record_count;
8859745512cSArjan van de Ven 	struct latency_record latency_record[LT_SAVECOUNT];
8869745512cSArjan van de Ven #endif
8876976675dSArjan van de Ven 	/*
8886976675dSArjan van de Ven 	 * time slack values; these are used to round up poll() and
8896976675dSArjan van de Ven 	 * select() etc timeout values. These are in nanoseconds.
8906976675dSArjan van de Ven 	 */
891da8b44d5SJohn Stultz 	u64 timer_slack_ns;
892da8b44d5SJohn Stultz 	u64 default_timer_slack_ns;
893f8d570a4SDavid Miller 
8940b24beccSAndrey Ryabinin #ifdef CONFIG_KASAN
8950b24beccSAndrey Ryabinin 	unsigned int kasan_depth;
8960b24beccSAndrey Ryabinin #endif
897fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8983ad2f3fbSDaniel Mack 	/* Index of current stored address in ret_stack */
899f201ae23SFrederic Weisbecker 	int curr_ret_stack;
900f201ae23SFrederic Weisbecker 	/* Stack of return addresses for return function tracing */
901f201ae23SFrederic Weisbecker 	struct ftrace_ret_stack	*ret_stack;
9028aef2d28SSteven Rostedt 	/* time stamp for last schedule */
9038aef2d28SSteven Rostedt 	unsigned long long ftrace_timestamp;
904f201ae23SFrederic Weisbecker 	/*
905f201ae23SFrederic Weisbecker 	 * Number of functions that haven't been traced
906f201ae23SFrederic Weisbecker 	 * because of depth overrun.
907f201ae23SFrederic Weisbecker 	 */
908f201ae23SFrederic Weisbecker 	atomic_t trace_overrun;
909380c4b14SFrederic Weisbecker 	/* Pause for the tracing */
910380c4b14SFrederic Weisbecker 	atomic_t tracing_graph_pause;
911f201ae23SFrederic Weisbecker #endif
912ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING
913ea4e2bc4SSteven Rostedt 	/* state flags for use by tracers */
914ea4e2bc4SSteven Rostedt 	unsigned long trace;
915b1cff0adSSteven Rostedt 	/* bitmask and counter of trace recursion */
916261842b7SSteven Rostedt 	unsigned long trace_recursion;
917261842b7SSteven Rostedt #endif /* CONFIG_TRACING */
9185c9a8750SDmitry Vyukov #ifdef CONFIG_KCOV
9195c9a8750SDmitry Vyukov 	/* Coverage collection mode enabled for this task (0 if disabled). */
9205c9a8750SDmitry Vyukov 	enum kcov_mode kcov_mode;
9215c9a8750SDmitry Vyukov 	/* Size of the kcov_area. */
9225c9a8750SDmitry Vyukov 	unsigned	kcov_size;
9235c9a8750SDmitry Vyukov 	/* Buffer for coverage collection. */
9245c9a8750SDmitry Vyukov 	void		*kcov_area;
9255c9a8750SDmitry Vyukov 	/* kcov desciptor wired with this task or NULL. */
9265c9a8750SDmitry Vyukov 	struct kcov	*kcov;
9275c9a8750SDmitry Vyukov #endif
9286f185c29SVladimir Davydov #ifdef CONFIG_MEMCG
929626ebc41STejun Heo 	struct mem_cgroup *memcg_in_oom;
930626ebc41STejun Heo 	gfp_t memcg_oom_gfp_mask;
931626ebc41STejun Heo 	int memcg_oom_order;
932b23afb93STejun Heo 
933b23afb93STejun Heo 	/* number of pages to reclaim on returning to userland */
934b23afb93STejun Heo 	unsigned int memcg_nr_pages_over_high;
935569b846dSKAMEZAWA Hiroyuki #endif
9360326f5a9SSrikar Dronamraju #ifdef CONFIG_UPROBES
9370326f5a9SSrikar Dronamraju 	struct uprobe_task *utask;
9380326f5a9SSrikar Dronamraju #endif
939cafe5635SKent Overstreet #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
940cafe5635SKent Overstreet 	unsigned int	sequential_io;
941cafe5635SKent Overstreet 	unsigned int	sequential_io_avg;
942cafe5635SKent Overstreet #endif
9438eb23b9fSPeter Zijlstra #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
9448eb23b9fSPeter Zijlstra 	unsigned long	task_state_change;
9458eb23b9fSPeter Zijlstra #endif
9468bcbde54SDavid Hildenbrand 	int pagefault_disabled;
94703049269SMichal Hocko #ifdef CONFIG_MMU
94829c696e1SVladimir Davydov 	struct task_struct *oom_reaper_list;
94903049269SMichal Hocko #endif
950ba14a194SAndy Lutomirski #ifdef CONFIG_VMAP_STACK
951ba14a194SAndy Lutomirski 	struct vm_struct *stack_vm_area;
952ba14a194SAndy Lutomirski #endif
95368f24b08SAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
95468f24b08SAndy Lutomirski 	/* A live task holds one reference. */
95568f24b08SAndy Lutomirski 	atomic_t stack_refcount;
95668f24b08SAndy Lutomirski #endif
9570c8c0f03SDave Hansen /* CPU-specific state of this task */
9580c8c0f03SDave Hansen 	struct thread_struct thread;
9590c8c0f03SDave Hansen /*
9600c8c0f03SDave Hansen  * WARNING: on x86, 'thread_struct' contains a variable-sized
9610c8c0f03SDave Hansen  * structure.  It *MUST* be at the end of 'task_struct'.
9620c8c0f03SDave Hansen  *
9630c8c0f03SDave Hansen  * Do not put anything below here!
9640c8c0f03SDave Hansen  */
9651da177e4SLinus Torvalds };
9661da177e4SLinus Torvalds 
967e868171aSAlexey Dobriyan static inline struct pid *task_pid(struct task_struct *task)
96822c935f4SEric W. Biederman {
96922c935f4SEric W. Biederman 	return task->pids[PIDTYPE_PID].pid;
97022c935f4SEric W. Biederman }
97122c935f4SEric W. Biederman 
972e868171aSAlexey Dobriyan static inline struct pid *task_tgid(struct task_struct *task)
97322c935f4SEric W. Biederman {
97422c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PID].pid;
97522c935f4SEric W. Biederman }
97622c935f4SEric W. Biederman 
9776dda81f4SOleg Nesterov /*
9786dda81f4SOleg Nesterov  * Without tasklist or rcu lock it is not safe to dereference
9796dda81f4SOleg Nesterov  * the result of task_pgrp/task_session even if task == current,
9806dda81f4SOleg Nesterov  * we can race with another thread doing sys_setsid/sys_setpgid.
9816dda81f4SOleg Nesterov  */
982e868171aSAlexey Dobriyan static inline struct pid *task_pgrp(struct task_struct *task)
98322c935f4SEric W. Biederman {
98422c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PGID].pid;
98522c935f4SEric W. Biederman }
98622c935f4SEric W. Biederman 
987e868171aSAlexey Dobriyan static inline struct pid *task_session(struct task_struct *task)
98822c935f4SEric W. Biederman {
98922c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_SID].pid;
99022c935f4SEric W. Biederman }
99122c935f4SEric W. Biederman 
9927af57294SPavel Emelyanov /*
9937af57294SPavel Emelyanov  * the helpers to get the task's different pids as they are seen
9947af57294SPavel Emelyanov  * from various namespaces
9957af57294SPavel Emelyanov  *
9967af57294SPavel Emelyanov  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
99744c4e1b2SEric W. Biederman  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
99844c4e1b2SEric W. Biederman  *                     current.
9997af57294SPavel Emelyanov  * task_xid_nr_ns()  : id seen from the ns specified;
10007af57294SPavel Emelyanov  *
10017af57294SPavel Emelyanov  * set_task_vxid()   : assigns a virtual id to a task;
10027af57294SPavel Emelyanov  *
10037af57294SPavel Emelyanov  * see also pid_nr() etc in include/linux/pid.h
10047af57294SPavel Emelyanov  */
100552ee2dfdSOleg Nesterov pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
100652ee2dfdSOleg Nesterov 			struct pid_namespace *ns);
10077af57294SPavel Emelyanov 
1008e868171aSAlexey Dobriyan static inline pid_t task_pid_nr(struct task_struct *tsk)
10097af57294SPavel Emelyanov {
10107af57294SPavel Emelyanov 	return tsk->pid;
10117af57294SPavel Emelyanov }
10127af57294SPavel Emelyanov 
101352ee2dfdSOleg Nesterov static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
101452ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
101552ee2dfdSOleg Nesterov {
101652ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
101752ee2dfdSOleg Nesterov }
10187af57294SPavel Emelyanov 
10197af57294SPavel Emelyanov static inline pid_t task_pid_vnr(struct task_struct *tsk)
10207af57294SPavel Emelyanov {
102152ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
10227af57294SPavel Emelyanov }
10237af57294SPavel Emelyanov 
10247af57294SPavel Emelyanov 
1025e868171aSAlexey Dobriyan static inline pid_t task_tgid_nr(struct task_struct *tsk)
10267af57294SPavel Emelyanov {
10277af57294SPavel Emelyanov 	return tsk->tgid;
10287af57294SPavel Emelyanov }
10297af57294SPavel Emelyanov 
10302f2a3a46SPavel Emelyanov pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
10317af57294SPavel Emelyanov 
10327af57294SPavel Emelyanov static inline pid_t task_tgid_vnr(struct task_struct *tsk)
10337af57294SPavel Emelyanov {
10347af57294SPavel Emelyanov 	return pid_vnr(task_tgid(tsk));
10357af57294SPavel Emelyanov }
10367af57294SPavel Emelyanov 
10377af57294SPavel Emelyanov 
103880e0b6e8SRichard Guy Briggs static inline int pid_alive(const struct task_struct *p);
1039ad36d282SRichard Guy Briggs static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1040ad36d282SRichard Guy Briggs {
1041ad36d282SRichard Guy Briggs 	pid_t pid = 0;
1042ad36d282SRichard Guy Briggs 
1043ad36d282SRichard Guy Briggs 	rcu_read_lock();
1044ad36d282SRichard Guy Briggs 	if (pid_alive(tsk))
1045ad36d282SRichard Guy Briggs 		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1046ad36d282SRichard Guy Briggs 	rcu_read_unlock();
1047ad36d282SRichard Guy Briggs 
1048ad36d282SRichard Guy Briggs 	return pid;
1049ad36d282SRichard Guy Briggs }
1050ad36d282SRichard Guy Briggs 
1051ad36d282SRichard Guy Briggs static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1052ad36d282SRichard Guy Briggs {
1053ad36d282SRichard Guy Briggs 	return task_ppid_nr_ns(tsk, &init_pid_ns);
1054ad36d282SRichard Guy Briggs }
1055ad36d282SRichard Guy Briggs 
105652ee2dfdSOleg Nesterov static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
105752ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
10587af57294SPavel Emelyanov {
105952ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
10607af57294SPavel Emelyanov }
10617af57294SPavel Emelyanov 
10627af57294SPavel Emelyanov static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
10637af57294SPavel Emelyanov {
106452ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
10657af57294SPavel Emelyanov }
10667af57294SPavel Emelyanov 
10677af57294SPavel Emelyanov 
106852ee2dfdSOleg Nesterov static inline pid_t task_session_nr_ns(struct task_struct *tsk,
106952ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
10707af57294SPavel Emelyanov {
107152ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
10727af57294SPavel Emelyanov }
10737af57294SPavel Emelyanov 
10747af57294SPavel Emelyanov static inline pid_t task_session_vnr(struct task_struct *tsk)
10757af57294SPavel Emelyanov {
107652ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
10777af57294SPavel Emelyanov }
10787af57294SPavel Emelyanov 
10791b0f7ffdSOleg Nesterov /* obsolete, do not use */
10801b0f7ffdSOleg Nesterov static inline pid_t task_pgrp_nr(struct task_struct *tsk)
10811b0f7ffdSOleg Nesterov {
10821b0f7ffdSOleg Nesterov 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
10831b0f7ffdSOleg Nesterov }
10847af57294SPavel Emelyanov 
10851da177e4SLinus Torvalds /**
10861da177e4SLinus Torvalds  * pid_alive - check that a task structure is not stale
10871da177e4SLinus Torvalds  * @p: Task structure to be checked.
10881da177e4SLinus Torvalds  *
10891da177e4SLinus Torvalds  * Test if a process is not yet dead (at most zombie state)
10901da177e4SLinus Torvalds  * If pid_alive fails, then pointers within the task structure
10911da177e4SLinus Torvalds  * can be stale and must not be dereferenced.
1092e69f6186SYacine Belkadi  *
1093e69f6186SYacine Belkadi  * Return: 1 if the process is alive. 0 otherwise.
10941da177e4SLinus Torvalds  */
1095ad36d282SRichard Guy Briggs static inline int pid_alive(const struct task_struct *p)
10961da177e4SLinus Torvalds {
109792476d7fSEric W. Biederman 	return p->pids[PIDTYPE_PID].pid != NULL;
10981da177e4SLinus Torvalds }
10991da177e4SLinus Torvalds 
1100f400e198SSukadev Bhattiprolu /**
1101570f5241SSergey Senozhatsky  * is_global_init - check if a task structure is init. Since init
1102570f5241SSergey Senozhatsky  * is free to have sub-threads we need to check tgid.
11033260259fSHenne  * @tsk: Task structure to be checked.
11043260259fSHenne  *
11053260259fSHenne  * Check if a task structure is the first user space task the kernel created.
1106e69f6186SYacine Belkadi  *
1107e69f6186SYacine Belkadi  * Return: 1 if the task structure is init. 0 otherwise.
1108f400e198SSukadev Bhattiprolu  */
1109e868171aSAlexey Dobriyan static inline int is_global_init(struct task_struct *tsk)
1110b461cc03SPavel Emelyanov {
1111570f5241SSergey Senozhatsky 	return task_tgid_nr(tsk) == 1;
1112b461cc03SPavel Emelyanov }
1113b460cbc5SSerge E. Hallyn 
11149ec52099SCedric Le Goater extern struct pid *cad_pid;
11159ec52099SCedric Le Goater 
11161da177e4SLinus Torvalds /*
11171da177e4SLinus Torvalds  * Per process flags
11181da177e4SLinus Torvalds  */
1119c1de45caSPeter Zijlstra #define PF_IDLE		0x00000002	/* I am an IDLE thread */
11201da177e4SLinus Torvalds #define PF_EXITING	0x00000004	/* getting shut down */
1121778e9a9cSAlexey Kuznetsov #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
112294886b84SLaurent Vivier #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
112321aa9af0STejun Heo #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
11241da177e4SLinus Torvalds #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
11254db96cf0SAndi Kleen #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
11261da177e4SLinus Torvalds #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
11271da177e4SLinus Torvalds #define PF_DUMPCORE	0x00000200	/* dumped core */
11281da177e4SLinus Torvalds #define PF_SIGNALED	0x00000400	/* killed by a signal */
11291da177e4SLinus Torvalds #define PF_MEMALLOC	0x00000800	/* Allocating memory */
113072fa5997SVasiliy Kulikov #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
11311da177e4SLinus Torvalds #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1132774a1221STejun Heo #define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
11331da177e4SLinus Torvalds #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
11341da177e4SLinus Torvalds #define PF_FROZEN	0x00010000	/* frozen for system suspend */
11351da177e4SLinus Torvalds #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
11361da177e4SLinus Torvalds #define PF_KSWAPD	0x00040000	/* I am kswapd */
113721caf2fcSMing Lei #define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
11381da177e4SLinus Torvalds #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1139246bb0b1SOleg Nesterov #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1140b31dc66aSJens Axboe #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1141b31dc66aSJens Axboe #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
114214a40ffcSTejun Heo #define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
11434db96cf0SAndi Kleen #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
114461a87122SThomas Gleixner #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
114558a69cb4STejun Heo #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
11462b44c4dbSColin Cross #define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
11471da177e4SLinus Torvalds 
11481da177e4SLinus Torvalds /*
11491da177e4SLinus Torvalds  * Only the _current_ task can read/write to tsk->flags, but other
11501da177e4SLinus Torvalds  * tasks can access tsk->flags in readonly mode for example
11511da177e4SLinus Torvalds  * with tsk_used_math (like during threaded core dumping).
11521da177e4SLinus Torvalds  * There is however an exception to this rule during ptrace
11531da177e4SLinus Torvalds  * or during fork: the ptracer task is allowed to write to the
11541da177e4SLinus Torvalds  * child->flags of its traced child (same goes for fork, the parent
11551da177e4SLinus Torvalds  * can write to the child->flags), because we're guaranteed the
11561da177e4SLinus Torvalds  * child is not running and in turn not changing child->flags
11571da177e4SLinus Torvalds  * at the same time the parent does it.
11581da177e4SLinus Torvalds  */
11591da177e4SLinus Torvalds #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
11601da177e4SLinus Torvalds #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
11611da177e4SLinus Torvalds #define clear_used_math() clear_stopped_child_used_math(current)
11621da177e4SLinus Torvalds #define set_used_math() set_stopped_child_used_math(current)
11631da177e4SLinus Torvalds #define conditional_stopped_child_used_math(condition, child) \
11641da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
11651da177e4SLinus Torvalds #define conditional_used_math(condition) \
11661da177e4SLinus Torvalds 	conditional_stopped_child_used_math(condition, current)
11671da177e4SLinus Torvalds #define copy_to_stopped_child_used_math(child) \
11681da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
11691da177e4SLinus Torvalds /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
11701da177e4SLinus Torvalds #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
11711da177e4SLinus Torvalds #define used_math() tsk_used_math(current)
11721da177e4SLinus Torvalds 
11731d4457f9SKees Cook /* Per-process atomic flags. */
1174a2b86f77SZefan Li #define PFA_NO_NEW_PRIVS 0	/* May not gain new privileges. */
11752ad654bcSZefan Li #define PFA_SPREAD_PAGE  1      /* Spread page cache over cpuset */
11762ad654bcSZefan Li #define PFA_SPREAD_SLAB  2      /* Spread some slab caches over cpuset */
117777ed2c57STetsuo Handa #define PFA_LMK_WAITING  3      /* Lowmemorykiller is waiting */
11781d4457f9SKees Cook 
11791d4457f9SKees Cook 
1180e0e5070bSZefan Li #define TASK_PFA_TEST(name, func)					\
1181e0e5070bSZefan Li 	static inline bool task_##func(struct task_struct *p)		\
1182e0e5070bSZefan Li 	{ return test_bit(PFA_##name, &p->atomic_flags); }
1183e0e5070bSZefan Li #define TASK_PFA_SET(name, func)					\
1184e0e5070bSZefan Li 	static inline void task_set_##func(struct task_struct *p)	\
1185e0e5070bSZefan Li 	{ set_bit(PFA_##name, &p->atomic_flags); }
1186e0e5070bSZefan Li #define TASK_PFA_CLEAR(name, func)					\
1187e0e5070bSZefan Li 	static inline void task_clear_##func(struct task_struct *p)	\
1188e0e5070bSZefan Li 	{ clear_bit(PFA_##name, &p->atomic_flags); }
11891d4457f9SKees Cook 
1190e0e5070bSZefan Li TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1191e0e5070bSZefan Li TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
11921d4457f9SKees Cook 
11932ad654bcSZefan Li TASK_PFA_TEST(SPREAD_PAGE, spread_page)
11942ad654bcSZefan Li TASK_PFA_SET(SPREAD_PAGE, spread_page)
11952ad654bcSZefan Li TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
11962ad654bcSZefan Li 
11972ad654bcSZefan Li TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
11982ad654bcSZefan Li TASK_PFA_SET(SPREAD_SLAB, spread_slab)
11992ad654bcSZefan Li TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1200544b2c91STejun Heo 
120177ed2c57STetsuo Handa TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
120277ed2c57STetsuo Handa TASK_PFA_SET(LMK_WAITING, lmk_waiting)
120377ed2c57STetsuo Handa 
1204907aed48SMel Gorman static inline void tsk_restore_flags(struct task_struct *task,
1205907aed48SMel Gorman 				unsigned long orig_flags, unsigned long flags)
1206907aed48SMel Gorman {
1207907aed48SMel Gorman 	task->flags &= ~flags;
1208907aed48SMel Gorman 	task->flags |= orig_flags & flags;
1209907aed48SMel Gorman }
1210907aed48SMel Gorman 
1211f82f8042SJuri Lelli extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
1212f82f8042SJuri Lelli 				     const struct cpumask *trial);
12137f51412aSJuri Lelli extern int task_can_attach(struct task_struct *p,
12147f51412aSJuri Lelli 			   const struct cpumask *cs_cpus_allowed);
12151da177e4SLinus Torvalds #ifdef CONFIG_SMP
12161e1b6c51SKOSAKI Motohiro extern void do_set_cpus_allowed(struct task_struct *p,
12171e1b6c51SKOSAKI Motohiro 			       const struct cpumask *new_mask);
12181e1b6c51SKOSAKI Motohiro 
1219cd8ba7cdSMike Travis extern int set_cpus_allowed_ptr(struct task_struct *p,
122096f874e2SRusty Russell 				const struct cpumask *new_mask);
12211da177e4SLinus Torvalds #else
12221e1b6c51SKOSAKI Motohiro static inline void do_set_cpus_allowed(struct task_struct *p,
12231e1b6c51SKOSAKI Motohiro 				      const struct cpumask *new_mask)
12241e1b6c51SKOSAKI Motohiro {
12251e1b6c51SKOSAKI Motohiro }
1226cd8ba7cdSMike Travis static inline int set_cpus_allowed_ptr(struct task_struct *p,
122796f874e2SRusty Russell 				       const struct cpumask *new_mask)
12281da177e4SLinus Torvalds {
122996f874e2SRusty Russell 	if (!cpumask_test_cpu(0, new_mask))
12301da177e4SLinus Torvalds 		return -EINVAL;
12311da177e4SLinus Torvalds 	return 0;
12321da177e4SLinus Torvalds }
12331da177e4SLinus Torvalds #endif
1234e0ad9556SRusty Russell 
12356d0d2878SChristian Borntraeger #ifndef cpu_relax_yield
12366d0d2878SChristian Borntraeger #define cpu_relax_yield() cpu_relax()
12376d0d2878SChristian Borntraeger #endif
12386d0d2878SChristian Borntraeger 
1239fa93384fSDan Carpenter extern int yield_to(struct task_struct *p, bool preempt);
124036c8b586SIngo Molnar extern void set_user_nice(struct task_struct *p, long nice);
124136c8b586SIngo Molnar extern int task_prio(const struct task_struct *p);
1242d0ea0268SDongsheng Yang /**
1243d0ea0268SDongsheng Yang  * task_nice - return the nice value of a given task.
1244d0ea0268SDongsheng Yang  * @p: the task in question.
1245d0ea0268SDongsheng Yang  *
1246d0ea0268SDongsheng Yang  * Return: The nice value [ -20 ... 0 ... 19 ].
1247d0ea0268SDongsheng Yang  */
1248d0ea0268SDongsheng Yang static inline int task_nice(const struct task_struct *p)
1249d0ea0268SDongsheng Yang {
1250d0ea0268SDongsheng Yang 	return PRIO_TO_NICE((p)->static_prio);
1251d0ea0268SDongsheng Yang }
125236c8b586SIngo Molnar extern int can_nice(const struct task_struct *p, const int nice);
125336c8b586SIngo Molnar extern int task_curr(const struct task_struct *p);
12541da177e4SLinus Torvalds extern int idle_cpu(int cpu);
1255fe7de49fSKOSAKI Motohiro extern int sched_setscheduler(struct task_struct *, int,
1256fe7de49fSKOSAKI Motohiro 			      const struct sched_param *);
1257961ccdddSRusty Russell extern int sched_setscheduler_nocheck(struct task_struct *, int,
1258fe7de49fSKOSAKI Motohiro 				      const struct sched_param *);
1259d50dde5aSDario Faggioli extern int sched_setattr(struct task_struct *,
1260d50dde5aSDario Faggioli 			 const struct sched_attr *);
126136c8b586SIngo Molnar extern struct task_struct *idle_task(int cpu);
1262c4f30608SPaul E. McKenney /**
1263c4f30608SPaul E. McKenney  * is_idle_task - is the specified task an idle task?
1264fa757281SRandy Dunlap  * @p: the task in question.
1265e69f6186SYacine Belkadi  *
1266e69f6186SYacine Belkadi  * Return: 1 if @p is an idle task. 0 otherwise.
1267c4f30608SPaul E. McKenney  */
12687061ca3bSPaul E. McKenney static inline bool is_idle_task(const struct task_struct *p)
1269c4f30608SPaul E. McKenney {
1270c1de45caSPeter Zijlstra 	return !!(p->flags & PF_IDLE);
1271c4f30608SPaul E. McKenney }
127236c8b586SIngo Molnar extern struct task_struct *curr_task(int cpu);
1273a458ae2eSPeter Zijlstra extern void ia64_set_curr_task(int cpu, struct task_struct *p);
12741da177e4SLinus Torvalds 
12751da177e4SLinus Torvalds void yield(void);
12761da177e4SLinus Torvalds 
12771da177e4SLinus Torvalds union thread_union {
1278c65eacbeSAndy Lutomirski #ifndef CONFIG_THREAD_INFO_IN_TASK
12791da177e4SLinus Torvalds 	struct thread_info thread_info;
1280c65eacbeSAndy Lutomirski #endif
12811da177e4SLinus Torvalds 	unsigned long stack[THREAD_SIZE/sizeof(long)];
12821da177e4SLinus Torvalds };
12831da177e4SLinus Torvalds 
1284f3ac6067SIngo Molnar #ifdef CONFIG_THREAD_INFO_IN_TASK
1285f3ac6067SIngo Molnar static inline struct thread_info *task_thread_info(struct task_struct *task)
1286f3ac6067SIngo Molnar {
1287f3ac6067SIngo Molnar 	return &task->thread_info;
1288f3ac6067SIngo Molnar }
1289f3ac6067SIngo Molnar #elif !defined(__HAVE_THREAD_FUNCTIONS)
1290f3ac6067SIngo Molnar # define task_thread_info(task)	((struct thread_info *)(task)->stack)
1291f3ac6067SIngo Molnar #endif
1292f3ac6067SIngo Molnar 
1293198fe21bSPavel Emelyanov /*
1294198fe21bSPavel Emelyanov  * find a task by one of its numerical ids
1295198fe21bSPavel Emelyanov  *
1296198fe21bSPavel Emelyanov  * find_task_by_pid_ns():
1297198fe21bSPavel Emelyanov  *      finds a task by its pid in the specified namespace
1298228ebcbeSPavel Emelyanov  * find_task_by_vpid():
1299228ebcbeSPavel Emelyanov  *      finds a task by its virtual pid
1300198fe21bSPavel Emelyanov  *
1301e49859e7SPavel Emelyanov  * see also find_vpid() etc in include/linux/pid.h
1302198fe21bSPavel Emelyanov  */
1303198fe21bSPavel Emelyanov 
1304228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_vpid(pid_t nr);
1305228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_pid_ns(pid_t nr,
1306228ebcbeSPavel Emelyanov 		struct pid_namespace *ns);
1307198fe21bSPavel Emelyanov 
1308b3c97528SHarvey Harrison extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1309b3c97528SHarvey Harrison extern int wake_up_process(struct task_struct *tsk);
13103e51e3edSSamir Bellabes extern void wake_up_new_task(struct task_struct *tsk);
13111da177e4SLinus Torvalds #ifdef CONFIG_SMP
13121da177e4SLinus Torvalds  extern void kick_process(struct task_struct *tsk);
13131da177e4SLinus Torvalds #else
13141da177e4SLinus Torvalds  static inline void kick_process(struct task_struct *tsk) { }
13151da177e4SLinus Torvalds #endif
13161da177e4SLinus Torvalds 
131782b89778SAdrian Hunter extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
131882b89778SAdrian Hunter static inline void set_task_comm(struct task_struct *tsk, const char *from)
131982b89778SAdrian Hunter {
132082b89778SAdrian Hunter 	__set_task_comm(tsk, from, false);
132182b89778SAdrian Hunter }
132259714d65SAndrew Morton extern char *get_task_comm(char *to, struct task_struct *tsk);
13231da177e4SLinus Torvalds 
13241da177e4SLinus Torvalds #ifdef CONFIG_SMP
1325317f3941SPeter Zijlstra void scheduler_ipi(void);
132685ba2d86SRoland McGrath extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
13271da177e4SLinus Torvalds #else
1328184748ccSPeter Zijlstra static inline void scheduler_ipi(void) { }
132985ba2d86SRoland McGrath static inline unsigned long wait_task_inactive(struct task_struct *p,
133085ba2d86SRoland McGrath 					       long match_state)
133185ba2d86SRoland McGrath {
133285ba2d86SRoland McGrath 	return 1;
133385ba2d86SRoland McGrath }
13341da177e4SLinus Torvalds #endif
13351da177e4SLinus Torvalds 
13361da177e4SLinus Torvalds /* set thread flags in other task's structures
13371da177e4SLinus Torvalds  * - see asm/thread_info.h for TIF_xxxx flags available
13381da177e4SLinus Torvalds  */
13391da177e4SLinus Torvalds static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
13401da177e4SLinus Torvalds {
1341a1261f54SAl Viro 	set_ti_thread_flag(task_thread_info(tsk), flag);
13421da177e4SLinus Torvalds }
13431da177e4SLinus Torvalds 
13441da177e4SLinus Torvalds static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
13451da177e4SLinus Torvalds {
1346a1261f54SAl Viro 	clear_ti_thread_flag(task_thread_info(tsk), flag);
13471da177e4SLinus Torvalds }
13481da177e4SLinus Torvalds 
13491da177e4SLinus Torvalds static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
13501da177e4SLinus Torvalds {
1351a1261f54SAl Viro 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
13521da177e4SLinus Torvalds }
13531da177e4SLinus Torvalds 
13541da177e4SLinus Torvalds static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
13551da177e4SLinus Torvalds {
1356a1261f54SAl Viro 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
13571da177e4SLinus Torvalds }
13581da177e4SLinus Torvalds 
13591da177e4SLinus Torvalds static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
13601da177e4SLinus Torvalds {
1361a1261f54SAl Viro 	return test_ti_thread_flag(task_thread_info(tsk), flag);
13621da177e4SLinus Torvalds }
13631da177e4SLinus Torvalds 
13641da177e4SLinus Torvalds static inline void set_tsk_need_resched(struct task_struct *tsk)
13651da177e4SLinus Torvalds {
13661da177e4SLinus Torvalds 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
13671da177e4SLinus Torvalds }
13681da177e4SLinus Torvalds 
13691da177e4SLinus Torvalds static inline void clear_tsk_need_resched(struct task_struct *tsk)
13701da177e4SLinus Torvalds {
13711da177e4SLinus Torvalds 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
13721da177e4SLinus Torvalds }
13731da177e4SLinus Torvalds 
13748ae121acSGregory Haskins static inline int test_tsk_need_resched(struct task_struct *tsk)
13758ae121acSGregory Haskins {
13768ae121acSGregory Haskins 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
13778ae121acSGregory Haskins }
13788ae121acSGregory Haskins 
13791da177e4SLinus Torvalds /*
13801da177e4SLinus Torvalds  * cond_resched() and cond_resched_lock(): latency reduction via
13811da177e4SLinus Torvalds  * explicit rescheduling in places that are safe. The return
13821da177e4SLinus Torvalds  * value indicates whether a reschedule was done in fact.
13831da177e4SLinus Torvalds  * cond_resched_lock() will drop the spinlock before scheduling,
13841da177e4SLinus Torvalds  * cond_resched_softirq() will enable bhs before scheduling.
13851da177e4SLinus Torvalds  */
138635a773a0SPeter Zijlstra #ifndef CONFIG_PREEMPT
1387c3921ab7SLinus Torvalds extern int _cond_resched(void);
138835a773a0SPeter Zijlstra #else
138935a773a0SPeter Zijlstra static inline int _cond_resched(void) { return 0; }
139035a773a0SPeter Zijlstra #endif
13916f80bd98SFrederic Weisbecker 
1392613afbf8SFrederic Weisbecker #define cond_resched() ({			\
13933427445aSPeter Zijlstra 	___might_sleep(__FILE__, __LINE__, 0);	\
1394613afbf8SFrederic Weisbecker 	_cond_resched();			\
1395613afbf8SFrederic Weisbecker })
13966f80bd98SFrederic Weisbecker 
1397613afbf8SFrederic Weisbecker extern int __cond_resched_lock(spinlock_t *lock);
1398613afbf8SFrederic Weisbecker 
1399613afbf8SFrederic Weisbecker #define cond_resched_lock(lock) ({				\
14003427445aSPeter Zijlstra 	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1401613afbf8SFrederic Weisbecker 	__cond_resched_lock(lock);				\
1402613afbf8SFrederic Weisbecker })
1403613afbf8SFrederic Weisbecker 
1404613afbf8SFrederic Weisbecker extern int __cond_resched_softirq(void);
1405613afbf8SFrederic Weisbecker 
1406613afbf8SFrederic Weisbecker #define cond_resched_softirq() ({					\
14073427445aSPeter Zijlstra 	___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
1408613afbf8SFrederic Weisbecker 	__cond_resched_softirq();					\
1409613afbf8SFrederic Weisbecker })
14101da177e4SLinus Torvalds 
1411f6f3c437SSimon Horman static inline void cond_resched_rcu(void)
1412f6f3c437SSimon Horman {
1413f6f3c437SSimon Horman #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1414f6f3c437SSimon Horman 	rcu_read_unlock();
1415f6f3c437SSimon Horman 	cond_resched();
1416f6f3c437SSimon Horman 	rcu_read_lock();
1417f6f3c437SSimon Horman #endif
1418f6f3c437SSimon Horman }
1419f6f3c437SSimon Horman 
14201da177e4SLinus Torvalds /*
14211da177e4SLinus Torvalds  * Does a critical section need to be broken due to another
142295c354feSNick Piggin  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
142395c354feSNick Piggin  * but a general need for low latency)
14241da177e4SLinus Torvalds  */
142595c354feSNick Piggin static inline int spin_needbreak(spinlock_t *lock)
14261da177e4SLinus Torvalds {
142795c354feSNick Piggin #ifdef CONFIG_PREEMPT
142895c354feSNick Piggin 	return spin_is_contended(lock);
142995c354feSNick Piggin #else
14301da177e4SLinus Torvalds 	return 0;
143195c354feSNick Piggin #endif
14321da177e4SLinus Torvalds }
14331da177e4SLinus Torvalds 
143475f93fedSPeter Zijlstra static __always_inline bool need_resched(void)
143575f93fedSPeter Zijlstra {
143675f93fedSPeter Zijlstra 	return unlikely(tif_need_resched());
143775f93fedSPeter Zijlstra }
143875f93fedSPeter Zijlstra 
1439ee761f62SThomas Gleixner /*
14401da177e4SLinus Torvalds  * Wrappers for p->thread_info->cpu access. No-op on UP.
14411da177e4SLinus Torvalds  */
14421da177e4SLinus Torvalds #ifdef CONFIG_SMP
14431da177e4SLinus Torvalds 
14441da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
14451da177e4SLinus Torvalds {
1446c65eacbeSAndy Lutomirski #ifdef CONFIG_THREAD_INFO_IN_TASK
1447c65eacbeSAndy Lutomirski 	return p->cpu;
1448c65eacbeSAndy Lutomirski #else
1449a1261f54SAl Viro 	return task_thread_info(p)->cpu;
1450c65eacbeSAndy Lutomirski #endif
14511da177e4SLinus Torvalds }
14521da177e4SLinus Torvalds 
1453c65cc870SIngo Molnar extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
14541da177e4SLinus Torvalds 
14551da177e4SLinus Torvalds #else
14561da177e4SLinus Torvalds 
14571da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
14581da177e4SLinus Torvalds {
14591da177e4SLinus Torvalds 	return 0;
14601da177e4SLinus Torvalds }
14611da177e4SLinus Torvalds 
14621da177e4SLinus Torvalds static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
14631da177e4SLinus Torvalds {
14641da177e4SLinus Torvalds }
14651da177e4SLinus Torvalds 
14661da177e4SLinus Torvalds #endif /* CONFIG_SMP */
14671da177e4SLinus Torvalds 
1468d9345c65SPan Xinhui /*
1469d9345c65SPan Xinhui  * In order to reduce various lock holder preemption latencies provide an
1470d9345c65SPan Xinhui  * interface to see if a vCPU is currently running or not.
1471d9345c65SPan Xinhui  *
1472d9345c65SPan Xinhui  * This allows us to terminate optimistic spin loops and block, analogous to
1473d9345c65SPan Xinhui  * the native optimistic spin heuristic of testing if the lock owner task is
1474d9345c65SPan Xinhui  * running or not.
1475d9345c65SPan Xinhui  */
1476d9345c65SPan Xinhui #ifndef vcpu_is_preempted
1477d9345c65SPan Xinhui # define vcpu_is_preempted(cpu)	false
1478d9345c65SPan Xinhui #endif
1479d9345c65SPan Xinhui 
148096f874e2SRusty Russell extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
148196f874e2SRusty Russell extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
14825c45bf27SSiddha, Suresh B 
148382455257SDave Hansen #ifndef TASK_SIZE_OF
148482455257SDave Hansen #define TASK_SIZE_OF(tsk)	TASK_SIZE
148582455257SDave Hansen #endif
148682455257SDave Hansen 
14871da177e4SLinus Torvalds #endif
1488