xref: /linux/include/linux/sched.h (revision 7e2703e6099609adc93679c4d45cd6247f565971)
11da177e4SLinus Torvalds #ifndef _LINUX_SCHED_H
21da177e4SLinus Torvalds #define _LINUX_SCHED_H
31da177e4SLinus Torvalds 
4607ca46eSDavid Howells #include <uapi/linux/sched.h>
5b7b3c76aSDavid Woodhouse 
6b7b3c76aSDavid Woodhouse 
7b7b3c76aSDavid Woodhouse struct sched_param {
8b7b3c76aSDavid Woodhouse 	int sched_priority;
9b7b3c76aSDavid Woodhouse };
10b7b3c76aSDavid Woodhouse 
111da177e4SLinus Torvalds #include <asm/param.h>	/* for HZ */
121da177e4SLinus Torvalds 
131da177e4SLinus Torvalds #include <linux/capability.h>
141da177e4SLinus Torvalds #include <linux/threads.h>
151da177e4SLinus Torvalds #include <linux/kernel.h>
161da177e4SLinus Torvalds #include <linux/types.h>
171da177e4SLinus Torvalds #include <linux/timex.h>
181da177e4SLinus Torvalds #include <linux/jiffies.h>
19fb00aca4SPeter Zijlstra #include <linux/plist.h>
201da177e4SLinus Torvalds #include <linux/rbtree.h>
211da177e4SLinus Torvalds #include <linux/thread_info.h>
221da177e4SLinus Torvalds #include <linux/cpumask.h>
231da177e4SLinus Torvalds #include <linux/errno.h>
241da177e4SLinus Torvalds #include <linux/nodemask.h>
25c92ff1bdSMartin Schwidefsky #include <linux/mm_types.h>
2600d1a39eSThomas Gleixner #include <linux/preempt_mask.h>
271da177e4SLinus Torvalds 
281da177e4SLinus Torvalds #include <asm/page.h>
291da177e4SLinus Torvalds #include <asm/ptrace.h>
301da177e4SLinus Torvalds #include <asm/cputime.h>
311da177e4SLinus Torvalds 
321da177e4SLinus Torvalds #include <linux/smp.h>
331da177e4SLinus Torvalds #include <linux/sem.h>
341da177e4SLinus Torvalds #include <linux/signal.h>
351da177e4SLinus Torvalds #include <linux/compiler.h>
361da177e4SLinus Torvalds #include <linux/completion.h>
371da177e4SLinus Torvalds #include <linux/pid.h>
381da177e4SLinus Torvalds #include <linux/percpu.h>
391da177e4SLinus Torvalds #include <linux/topology.h>
403e26c149SPeter Zijlstra #include <linux/proportions.h>
411da177e4SLinus Torvalds #include <linux/seccomp.h>
42e56d0903SIngo Molnar #include <linux/rcupdate.h>
4305725f7eSJiri Pirko #include <linux/rculist.h>
4423f78d4aSIngo Molnar #include <linux/rtmutex.h>
451da177e4SLinus Torvalds 
46a3b6714eSDavid Woodhouse #include <linux/time.h>
47a3b6714eSDavid Woodhouse #include <linux/param.h>
48a3b6714eSDavid Woodhouse #include <linux/resource.h>
49a3b6714eSDavid Woodhouse #include <linux/timer.h>
50a3b6714eSDavid Woodhouse #include <linux/hrtimer.h>
517c3ab738SAndrew Morton #include <linux/task_io_accounting.h>
529745512cSArjan van de Ven #include <linux/latencytop.h>
539e2b2dc4SDavid Howells #include <linux/cred.h>
54fa14ff4aSPeter Zijlstra #include <linux/llist.h>
557b44ab97SEric W. Biederman #include <linux/uidgid.h>
5621caf2fcSMing Lei #include <linux/gfp.h>
57a3b6714eSDavid Woodhouse 
58a3b6714eSDavid Woodhouse #include <asm/processor.h>
5936d57ac4SH. J. Lu 
60d50dde5aSDario Faggioli #define SCHED_ATTR_SIZE_VER0	48	/* sizeof first published struct */
61d50dde5aSDario Faggioli 
62d50dde5aSDario Faggioli /*
63d50dde5aSDario Faggioli  * Extended scheduling parameters data structure.
64d50dde5aSDario Faggioli  *
65d50dde5aSDario Faggioli  * This is needed because the original struct sched_param can not be
66d50dde5aSDario Faggioli  * altered without introducing ABI issues with legacy applications
67d50dde5aSDario Faggioli  * (e.g., in sched_getparam()).
68d50dde5aSDario Faggioli  *
69d50dde5aSDario Faggioli  * However, the possibility of specifying more than just a priority for
70d50dde5aSDario Faggioli  * the tasks may be useful for a wide variety of application fields, e.g.,
71d50dde5aSDario Faggioli  * multimedia, streaming, automation and control, and many others.
72d50dde5aSDario Faggioli  *
73d50dde5aSDario Faggioli  * This variant (sched_attr) is meant at describing a so-called
74d50dde5aSDario Faggioli  * sporadic time-constrained task. In such model a task is specified by:
75d50dde5aSDario Faggioli  *  - the activation period or minimum instance inter-arrival time;
76d50dde5aSDario Faggioli  *  - the maximum (or average, depending on the actual scheduling
77d50dde5aSDario Faggioli  *    discipline) computation time of all instances, a.k.a. runtime;
78d50dde5aSDario Faggioli  *  - the deadline (relative to the actual activation time) of each
79d50dde5aSDario Faggioli  *    instance.
80d50dde5aSDario Faggioli  * Very briefly, a periodic (sporadic) task asks for the execution of
81d50dde5aSDario Faggioli  * some specific computation --which is typically called an instance--
82d50dde5aSDario Faggioli  * (at most) every period. Moreover, each instance typically lasts no more
83d50dde5aSDario Faggioli  * than the runtime and must be completed by time instant t equal to
84d50dde5aSDario Faggioli  * the instance activation time + the deadline.
85d50dde5aSDario Faggioli  *
86d50dde5aSDario Faggioli  * This is reflected by the actual fields of the sched_attr structure:
87d50dde5aSDario Faggioli  *
88d50dde5aSDario Faggioli  *  @size		size of the structure, for fwd/bwd compat.
89d50dde5aSDario Faggioli  *
90d50dde5aSDario Faggioli  *  @sched_policy	task's scheduling policy
91d50dde5aSDario Faggioli  *  @sched_flags	for customizing the scheduler behaviour
92d50dde5aSDario Faggioli  *  @sched_nice		task's nice value      (SCHED_NORMAL/BATCH)
93d50dde5aSDario Faggioli  *  @sched_priority	task's static priority (SCHED_FIFO/RR)
94d50dde5aSDario Faggioli  *  @sched_deadline	representative of the task's deadline
95d50dde5aSDario Faggioli  *  @sched_runtime	representative of the task's runtime
96d50dde5aSDario Faggioli  *  @sched_period	representative of the task's period
97d50dde5aSDario Faggioli  *
98d50dde5aSDario Faggioli  * Given this task model, there are a multiplicity of scheduling algorithms
99d50dde5aSDario Faggioli  * and policies, that can be used to ensure all the tasks will make their
100d50dde5aSDario Faggioli  * timing constraints.
101aab03e05SDario Faggioli  *
102aab03e05SDario Faggioli  * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
103aab03e05SDario Faggioli  * only user of this new interface. More information about the algorithm
104aab03e05SDario Faggioli  * available in the scheduling class file or in Documentation/.
105d50dde5aSDario Faggioli  */
106d50dde5aSDario Faggioli struct sched_attr {
107d50dde5aSDario Faggioli 	u32 size;
108d50dde5aSDario Faggioli 
109d50dde5aSDario Faggioli 	u32 sched_policy;
110d50dde5aSDario Faggioli 	u64 sched_flags;
111d50dde5aSDario Faggioli 
112d50dde5aSDario Faggioli 	/* SCHED_NORMAL, SCHED_BATCH */
113d50dde5aSDario Faggioli 	s32 sched_nice;
114d50dde5aSDario Faggioli 
115d50dde5aSDario Faggioli 	/* SCHED_FIFO, SCHED_RR */
116d50dde5aSDario Faggioli 	u32 sched_priority;
117d50dde5aSDario Faggioli 
118d50dde5aSDario Faggioli 	/* SCHED_DEADLINE */
119d50dde5aSDario Faggioli 	u64 sched_runtime;
120d50dde5aSDario Faggioli 	u64 sched_deadline;
121d50dde5aSDario Faggioli 	u64 sched_period;
122d50dde5aSDario Faggioli };
123d50dde5aSDario Faggioli 
1241da177e4SLinus Torvalds struct exec_domain;
125c87e2837SIngo Molnar struct futex_pi_state;
126286100a6SAlexey Dobriyan struct robust_list_head;
127bddd87c7SAkinobu Mita struct bio_list;
1285ad4e53bSAl Viro struct fs_struct;
129cdd6c482SIngo Molnar struct perf_event_context;
13073c10101SJens Axboe struct blk_plug;
1311da177e4SLinus Torvalds 
1321da177e4SLinus Torvalds /*
1331da177e4SLinus Torvalds  * List of flags we want to share for kernel threads,
1341da177e4SLinus Torvalds  * if only because they are not used by them anyway.
1351da177e4SLinus Torvalds  */
1361da177e4SLinus Torvalds #define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
1371da177e4SLinus Torvalds 
1381da177e4SLinus Torvalds /*
1391da177e4SLinus Torvalds  * These are the constant used to fake the fixed-point load-average
1401da177e4SLinus Torvalds  * counting. Some notes:
1411da177e4SLinus Torvalds  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
1421da177e4SLinus Torvalds  *    a load-average precision of 10 bits integer + 11 bits fractional
1431da177e4SLinus Torvalds  *  - if you want to count load-averages more often, you need more
1441da177e4SLinus Torvalds  *    precision, or rounding will get you. With 2-second counting freq,
1451da177e4SLinus Torvalds  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
1461da177e4SLinus Torvalds  *    11 bit fractions.
1471da177e4SLinus Torvalds  */
1481da177e4SLinus Torvalds extern unsigned long avenrun[];		/* Load averages */
1492d02494fSThomas Gleixner extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
1501da177e4SLinus Torvalds 
1511da177e4SLinus Torvalds #define FSHIFT		11		/* nr of bits of precision */
1521da177e4SLinus Torvalds #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
1530c2043abSLinus Torvalds #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
1541da177e4SLinus Torvalds #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
1551da177e4SLinus Torvalds #define EXP_5		2014		/* 1/exp(5sec/5min) */
1561da177e4SLinus Torvalds #define EXP_15		2037		/* 1/exp(5sec/15min) */
1571da177e4SLinus Torvalds 
1581da177e4SLinus Torvalds #define CALC_LOAD(load,exp,n) \
1591da177e4SLinus Torvalds 	load *= exp; \
1601da177e4SLinus Torvalds 	load += n*(FIXED_1-exp); \
1611da177e4SLinus Torvalds 	load >>= FSHIFT;
1621da177e4SLinus Torvalds 
1631da177e4SLinus Torvalds extern unsigned long total_forks;
1641da177e4SLinus Torvalds extern int nr_threads;
1651da177e4SLinus Torvalds DECLARE_PER_CPU(unsigned long, process_counts);
1661da177e4SLinus Torvalds extern int nr_processes(void);
1671da177e4SLinus Torvalds extern unsigned long nr_running(void);
1681da177e4SLinus Torvalds extern unsigned long nr_iowait(void);
1698c215bd3SPeter Zijlstra extern unsigned long nr_iowait_cpu(int cpu);
17069d25870SArjan van de Ven extern unsigned long this_cpu_load(void);
17169d25870SArjan van de Ven 
17269d25870SArjan van de Ven 
1730f004f5aSPeter Zijlstra extern void calc_global_load(unsigned long ticks);
1745aaa0b7aSPeter Zijlstra extern void update_cpu_load_nohz(void);
1751da177e4SLinus Torvalds 
1767e49fcceSSteven Rostedt extern unsigned long get_parent_ip(unsigned long addr);
1777e49fcceSSteven Rostedt 
178b637a328SPaul E. McKenney extern void dump_cpu_task(int cpu);
179b637a328SPaul E. McKenney 
18043ae34cbSIngo Molnar struct seq_file;
18143ae34cbSIngo Molnar struct cfs_rq;
1824cf86d77SIngo Molnar struct task_group;
18343ae34cbSIngo Molnar #ifdef CONFIG_SCHED_DEBUG
18443ae34cbSIngo Molnar extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
18543ae34cbSIngo Molnar extern void proc_sched_set_task(struct task_struct *p);
18643ae34cbSIngo Molnar extern void
1875cef9ecaSIngo Molnar print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
18843ae34cbSIngo Molnar #endif
1891da177e4SLinus Torvalds 
1904a8342d2SLinus Torvalds /*
1914a8342d2SLinus Torvalds  * Task state bitmask. NOTE! These bits are also
1924a8342d2SLinus Torvalds  * encoded in fs/proc/array.c: get_task_state().
1934a8342d2SLinus Torvalds  *
1944a8342d2SLinus Torvalds  * We have two separate sets of flags: task->state
1954a8342d2SLinus Torvalds  * is about runnability, while task->exit_state are
1964a8342d2SLinus Torvalds  * about the task exiting. Confusing, but this way
1974a8342d2SLinus Torvalds  * modifying one set can't modify the other one by
1984a8342d2SLinus Torvalds  * mistake.
1994a8342d2SLinus Torvalds  */
2001da177e4SLinus Torvalds #define TASK_RUNNING		0
2011da177e4SLinus Torvalds #define TASK_INTERRUPTIBLE	1
2021da177e4SLinus Torvalds #define TASK_UNINTERRUPTIBLE	2
203f021a3c2SMatthew Wilcox #define __TASK_STOPPED		4
204f021a3c2SMatthew Wilcox #define __TASK_TRACED		8
2054a8342d2SLinus Torvalds /* in tsk->exit_state */
2064a8342d2SLinus Torvalds #define EXIT_ZOMBIE		16
2074a8342d2SLinus Torvalds #define EXIT_DEAD		32
2084a8342d2SLinus Torvalds /* in tsk->state again */
209af927232SMike Galbraith #define TASK_DEAD		64
210f021a3c2SMatthew Wilcox #define TASK_WAKEKILL		128
211e9c84311SPeter Zijlstra #define TASK_WAKING		256
212f2530dc7SThomas Gleixner #define TASK_PARKED		512
213f2530dc7SThomas Gleixner #define TASK_STATE_MAX		1024
214f021a3c2SMatthew Wilcox 
215f2530dc7SThomas Gleixner #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
21673342151SPeter Zijlstra 
217e1781538SPeter Zijlstra extern char ___assert_task_state[1 - 2*!!(
218e1781538SPeter Zijlstra 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
219f021a3c2SMatthew Wilcox 
220f021a3c2SMatthew Wilcox /* Convenience macros for the sake of set_task_state */
221f021a3c2SMatthew Wilcox #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
222f021a3c2SMatthew Wilcox #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
223f021a3c2SMatthew Wilcox #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
2241da177e4SLinus Torvalds 
22592a1f4bcSMatthew Wilcox /* Convenience macros for the sake of wake_up */
22692a1f4bcSMatthew Wilcox #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
227f021a3c2SMatthew Wilcox #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
22892a1f4bcSMatthew Wilcox 
22992a1f4bcSMatthew Wilcox /* get_task_state() */
23092a1f4bcSMatthew Wilcox #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
231f021a3c2SMatthew Wilcox 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
232f021a3c2SMatthew Wilcox 				 __TASK_TRACED)
23392a1f4bcSMatthew Wilcox 
234f021a3c2SMatthew Wilcox #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
235f021a3c2SMatthew Wilcox #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
23692a1f4bcSMatthew Wilcox #define task_is_stopped_or_traced(task)	\
237f021a3c2SMatthew Wilcox 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
23892a1f4bcSMatthew Wilcox #define task_contributes_to_load(task)	\
239e3c8ca83SNathan Lynch 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
240376fede8STejun Heo 				 (task->flags & PF_FROZEN) == 0)
2411da177e4SLinus Torvalds 
2421da177e4SLinus Torvalds #define __set_task_state(tsk, state_value)		\
2431da177e4SLinus Torvalds 	do { (tsk)->state = (state_value); } while (0)
2441da177e4SLinus Torvalds #define set_task_state(tsk, state_value)		\
2451da177e4SLinus Torvalds 	set_mb((tsk)->state, (state_value))
2461da177e4SLinus Torvalds 
247498d0c57SAndrew Morton /*
248498d0c57SAndrew Morton  * set_current_state() includes a barrier so that the write of current->state
249498d0c57SAndrew Morton  * is correctly serialised wrt the caller's subsequent test of whether to
250498d0c57SAndrew Morton  * actually sleep:
251498d0c57SAndrew Morton  *
252498d0c57SAndrew Morton  *	set_current_state(TASK_UNINTERRUPTIBLE);
253498d0c57SAndrew Morton  *	if (do_i_need_to_sleep())
254498d0c57SAndrew Morton  *		schedule();
255498d0c57SAndrew Morton  *
256498d0c57SAndrew Morton  * If the caller does not need such serialisation then use __set_current_state()
257498d0c57SAndrew Morton  */
2581da177e4SLinus Torvalds #define __set_current_state(state_value)			\
2591da177e4SLinus Torvalds 	do { current->state = (state_value); } while (0)
2601da177e4SLinus Torvalds #define set_current_state(state_value)		\
2611da177e4SLinus Torvalds 	set_mb(current->state, (state_value))
2621da177e4SLinus Torvalds 
2631da177e4SLinus Torvalds /* Task command name length */
2641da177e4SLinus Torvalds #define TASK_COMM_LEN 16
2651da177e4SLinus Torvalds 
2661da177e4SLinus Torvalds #include <linux/spinlock.h>
2671da177e4SLinus Torvalds 
2681da177e4SLinus Torvalds /*
2691da177e4SLinus Torvalds  * This serializes "schedule()" and also protects
2701da177e4SLinus Torvalds  * the run-queue from deletions/modifications (but
2711da177e4SLinus Torvalds  * _adding_ to the beginning of the run-queue has
2721da177e4SLinus Torvalds  * a separate lock).
2731da177e4SLinus Torvalds  */
2741da177e4SLinus Torvalds extern rwlock_t tasklist_lock;
2751da177e4SLinus Torvalds extern spinlock_t mmlist_lock;
2761da177e4SLinus Torvalds 
27736c8b586SIngo Molnar struct task_struct;
2781da177e4SLinus Torvalds 
279db1466b3SPaul E. McKenney #ifdef CONFIG_PROVE_RCU
280db1466b3SPaul E. McKenney extern int lockdep_tasklist_lock_is_held(void);
281db1466b3SPaul E. McKenney #endif /* #ifdef CONFIG_PROVE_RCU */
282db1466b3SPaul E. McKenney 
2831da177e4SLinus Torvalds extern void sched_init(void);
2841da177e4SLinus Torvalds extern void sched_init_smp(void);
2852d07b255SHarvey Harrison extern asmlinkage void schedule_tail(struct task_struct *prev);
28636c8b586SIngo Molnar extern void init_idle(struct task_struct *idle, int cpu);
2871df21055SIngo Molnar extern void init_idle_bootup_task(struct task_struct *idle);
2881da177e4SLinus Torvalds 
28989f19f04SAndrew Morton extern int runqueue_is_locked(int cpu);
290017730c1SIngo Molnar 
2913451d024SFrederic Weisbecker #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
292c1cc017cSAlex Shi extern void nohz_balance_enter_idle(int cpu);
29369e1e811SSuresh Siddha extern void set_cpu_sd_state_idle(void);
29483cd4fe2SVenkatesh Pallipadi extern int get_nohz_timer_target(void);
29546cb4b7cSSiddha, Suresh B #else
296c1cc017cSAlex Shi static inline void nohz_balance_enter_idle(int cpu) { }
297fdaabd80SPeter Zijlstra static inline void set_cpu_sd_state_idle(void) { }
29846cb4b7cSSiddha, Suresh B #endif
2991da177e4SLinus Torvalds 
300e59e2ae2SIngo Molnar /*
30139bc89fdSIngo Molnar  * Only dump TASK_* tasks. (0 for all tasks)
302e59e2ae2SIngo Molnar  */
303e59e2ae2SIngo Molnar extern void show_state_filter(unsigned long state_filter);
304e59e2ae2SIngo Molnar 
305e59e2ae2SIngo Molnar static inline void show_state(void)
306e59e2ae2SIngo Molnar {
30739bc89fdSIngo Molnar 	show_state_filter(0);
308e59e2ae2SIngo Molnar }
309e59e2ae2SIngo Molnar 
3101da177e4SLinus Torvalds extern void show_regs(struct pt_regs *);
3111da177e4SLinus Torvalds 
3121da177e4SLinus Torvalds /*
3131da177e4SLinus Torvalds  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
3141da177e4SLinus Torvalds  * task), SP is the stack pointer of the first frame that should be shown in the back
3151da177e4SLinus Torvalds  * trace (or NULL if the entire call-chain of the task should be shown).
3161da177e4SLinus Torvalds  */
3171da177e4SLinus Torvalds extern void show_stack(struct task_struct *task, unsigned long *sp);
3181da177e4SLinus Torvalds 
3191da177e4SLinus Torvalds void io_schedule(void);
3201da177e4SLinus Torvalds long io_schedule_timeout(long timeout);
3211da177e4SLinus Torvalds 
3221da177e4SLinus Torvalds extern void cpu_init (void);
3231da177e4SLinus Torvalds extern void trap_init(void);
3241da177e4SLinus Torvalds extern void update_process_times(int user);
3251da177e4SLinus Torvalds extern void scheduler_tick(void);
3261da177e4SLinus Torvalds 
32782a1fcb9SIngo Molnar extern void sched_show_task(struct task_struct *p);
32882a1fcb9SIngo Molnar 
32919cc36c0SFrederic Weisbecker #ifdef CONFIG_LOCKUP_DETECTOR
3308446f1d3SIngo Molnar extern void touch_softlockup_watchdog(void);
331d6ad3e28SJason Wessel extern void touch_softlockup_watchdog_sync(void);
33204c9167fSJeremy Fitzhardinge extern void touch_all_softlockup_watchdogs(void);
333332fbdbcSDon Zickus extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
3348d65af78SAlexey Dobriyan 				  void __user *buffer,
335baf48f65SMandeep Singh Baines 				  size_t *lenp, loff_t *ppos);
3369c44bc03SIngo Molnar extern unsigned int  softlockup_panic;
337004417a6SPeter Zijlstra void lockup_detector_init(void);
3388446f1d3SIngo Molnar #else
3398446f1d3SIngo Molnar static inline void touch_softlockup_watchdog(void)
3408446f1d3SIngo Molnar {
3418446f1d3SIngo Molnar }
342d6ad3e28SJason Wessel static inline void touch_softlockup_watchdog_sync(void)
343d6ad3e28SJason Wessel {
344d6ad3e28SJason Wessel }
34504c9167fSJeremy Fitzhardinge static inline void touch_all_softlockup_watchdogs(void)
34604c9167fSJeremy Fitzhardinge {
34704c9167fSJeremy Fitzhardinge }
348004417a6SPeter Zijlstra static inline void lockup_detector_init(void)
349004417a6SPeter Zijlstra {
350004417a6SPeter Zijlstra }
3518446f1d3SIngo Molnar #endif
3528446f1d3SIngo Molnar 
3538b414521SMarcelo Tosatti #ifdef CONFIG_DETECT_HUNG_TASK
3548b414521SMarcelo Tosatti void reset_hung_task_detector(void);
3558b414521SMarcelo Tosatti #else
3568b414521SMarcelo Tosatti static inline void reset_hung_task_detector(void)
3578b414521SMarcelo Tosatti {
3588b414521SMarcelo Tosatti }
3598b414521SMarcelo Tosatti #endif
3608b414521SMarcelo Tosatti 
3611da177e4SLinus Torvalds /* Attach to any functions which should be ignored in wchan output. */
3621da177e4SLinus Torvalds #define __sched		__attribute__((__section__(".sched.text")))
363deaf2227SIngo Molnar 
364deaf2227SIngo Molnar /* Linker adds these: start and end of __sched functions */
365deaf2227SIngo Molnar extern char __sched_text_start[], __sched_text_end[];
366deaf2227SIngo Molnar 
3671da177e4SLinus Torvalds /* Is this address in the __sched functions? */
3681da177e4SLinus Torvalds extern int in_sched_functions(unsigned long addr);
3691da177e4SLinus Torvalds 
3701da177e4SLinus Torvalds #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
371b3c97528SHarvey Harrison extern signed long schedule_timeout(signed long timeout);
37264ed93a2SNishanth Aravamudan extern signed long schedule_timeout_interruptible(signed long timeout);
373294d5cc2SMatthew Wilcox extern signed long schedule_timeout_killable(signed long timeout);
37464ed93a2SNishanth Aravamudan extern signed long schedule_timeout_uninterruptible(signed long timeout);
3751da177e4SLinus Torvalds asmlinkage void schedule(void);
376c5491ea7SThomas Gleixner extern void schedule_preempt_disabled(void);
3771da177e4SLinus Torvalds 
378ab516013SSerge E. Hallyn struct nsproxy;
379acce292cSCedric Le Goater struct user_namespace;
3801da177e4SLinus Torvalds 
381efc1a3b1SDavid Howells #ifdef CONFIG_MMU
382efc1a3b1SDavid Howells extern void arch_pick_mmap_layout(struct mm_struct *mm);
3831da177e4SLinus Torvalds extern unsigned long
3841da177e4SLinus Torvalds arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
3851da177e4SLinus Torvalds 		       unsigned long, unsigned long);
3861da177e4SLinus Torvalds extern unsigned long
3871da177e4SLinus Torvalds arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
3881da177e4SLinus Torvalds 			  unsigned long len, unsigned long pgoff,
3891da177e4SLinus Torvalds 			  unsigned long flags);
390efc1a3b1SDavid Howells #else
391efc1a3b1SDavid Howells static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
392efc1a3b1SDavid Howells #endif
3931da177e4SLinus Torvalds 
394901608d9SOleg Nesterov 
3956c5d5238SKawai, Hidehiro extern void set_dumpable(struct mm_struct *mm, int value);
3966c5d5238SKawai, Hidehiro extern int get_dumpable(struct mm_struct *mm);
3976c5d5238SKawai, Hidehiro 
398d049f74fSKees Cook #define SUID_DUMP_DISABLE	0	/* No setuid dumping */
399d049f74fSKees Cook #define SUID_DUMP_USER		1	/* Dump as user of process */
400d049f74fSKees Cook #define SUID_DUMP_ROOT		2	/* Dump as root */
401d049f74fSKees Cook 
4026c5d5238SKawai, Hidehiro /* mm flags */
4033cb4a0bbSKawai, Hidehiro /* dumpable bits */
4046c5d5238SKawai, Hidehiro #define MMF_DUMPABLE      0  /* core dump is permitted */
4056c5d5238SKawai, Hidehiro #define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
406f8af4da3SHugh Dickins 
4073cb4a0bbSKawai, Hidehiro #define MMF_DUMPABLE_BITS 2
408f8af4da3SHugh Dickins #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
4093cb4a0bbSKawai, Hidehiro 
4103cb4a0bbSKawai, Hidehiro /* coredump filter bits */
4113cb4a0bbSKawai, Hidehiro #define MMF_DUMP_ANON_PRIVATE	2
4123cb4a0bbSKawai, Hidehiro #define MMF_DUMP_ANON_SHARED	3
4133cb4a0bbSKawai, Hidehiro #define MMF_DUMP_MAPPED_PRIVATE	4
4143cb4a0bbSKawai, Hidehiro #define MMF_DUMP_MAPPED_SHARED	5
41582df3973SRoland McGrath #define MMF_DUMP_ELF_HEADERS	6
416e575f111SKOSAKI Motohiro #define MMF_DUMP_HUGETLB_PRIVATE 7
417e575f111SKOSAKI Motohiro #define MMF_DUMP_HUGETLB_SHARED  8
418f8af4da3SHugh Dickins 
4193cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
420e575f111SKOSAKI Motohiro #define MMF_DUMP_FILTER_BITS	7
4213cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_MASK \
4223cb4a0bbSKawai, Hidehiro 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
4233cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_DEFAULT \
424e575f111SKOSAKI Motohiro 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
425656eb2cdSRoland McGrath 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
426656eb2cdSRoland McGrath 
427656eb2cdSRoland McGrath #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
428656eb2cdSRoland McGrath # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
429656eb2cdSRoland McGrath #else
430656eb2cdSRoland McGrath # define MMF_DUMP_MASK_DEFAULT_ELF	0
431656eb2cdSRoland McGrath #endif
432f8af4da3SHugh Dickins 					/* leave room for more dump flags */
433f8af4da3SHugh Dickins #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
434ba76149fSAndrea Arcangeli #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
435bafb282dSKonstantin Khlebnikov #define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
436f8af4da3SHugh Dickins 
4379f68f672SOleg Nesterov #define MMF_HAS_UPROBES		19	/* has uprobes */
4389f68f672SOleg Nesterov #define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
439f8ac4ec9SOleg Nesterov 
440f8af4da3SHugh Dickins #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
4416c5d5238SKawai, Hidehiro 
4421da177e4SLinus Torvalds struct sighand_struct {
4431da177e4SLinus Torvalds 	atomic_t		count;
4441da177e4SLinus Torvalds 	struct k_sigaction	action[_NSIG];
4451da177e4SLinus Torvalds 	spinlock_t		siglock;
446b8fceee1SDavide Libenzi 	wait_queue_head_t	signalfd_wqh;
4471da177e4SLinus Torvalds };
4481da177e4SLinus Torvalds 
4490e464814SKaiGai Kohei struct pacct_struct {
450f6ec29a4SKaiGai Kohei 	int			ac_flag;
451f6ec29a4SKaiGai Kohei 	long			ac_exitcode;
4520e464814SKaiGai Kohei 	unsigned long		ac_mem;
45377787bfbSKaiGai Kohei 	cputime_t		ac_utime, ac_stime;
45477787bfbSKaiGai Kohei 	unsigned long		ac_minflt, ac_majflt;
4550e464814SKaiGai Kohei };
4560e464814SKaiGai Kohei 
45742c4ab41SStanislaw Gruszka struct cpu_itimer {
45842c4ab41SStanislaw Gruszka 	cputime_t expires;
45942c4ab41SStanislaw Gruszka 	cputime_t incr;
4608356b5f9SStanislaw Gruszka 	u32 error;
4618356b5f9SStanislaw Gruszka 	u32 incr_error;
46242c4ab41SStanislaw Gruszka };
46342c4ab41SStanislaw Gruszka 
464f06febc9SFrank Mayhar /**
465d37f761dSFrederic Weisbecker  * struct cputime - snaphsot of system and user cputime
466d37f761dSFrederic Weisbecker  * @utime: time spent in user mode
467d37f761dSFrederic Weisbecker  * @stime: time spent in system mode
468d37f761dSFrederic Weisbecker  *
469d37f761dSFrederic Weisbecker  * Gathers a generic snapshot of user and system time.
470d37f761dSFrederic Weisbecker  */
471d37f761dSFrederic Weisbecker struct cputime {
472d37f761dSFrederic Weisbecker 	cputime_t utime;
473d37f761dSFrederic Weisbecker 	cputime_t stime;
474d37f761dSFrederic Weisbecker };
475d37f761dSFrederic Weisbecker 
476d37f761dSFrederic Weisbecker /**
477f06febc9SFrank Mayhar  * struct task_cputime - collected CPU time counts
478f06febc9SFrank Mayhar  * @utime:		time spent in user mode, in &cputime_t units
479f06febc9SFrank Mayhar  * @stime:		time spent in kernel mode, in &cputime_t units
480f06febc9SFrank Mayhar  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
481f06febc9SFrank Mayhar  *
482d37f761dSFrederic Weisbecker  * This is an extension of struct cputime that includes the total runtime
483d37f761dSFrederic Weisbecker  * spent by the task from the scheduler point of view.
484d37f761dSFrederic Weisbecker  *
485d37f761dSFrederic Weisbecker  * As a result, this structure groups together three kinds of CPU time
486d37f761dSFrederic Weisbecker  * that are tracked for threads and thread groups.  Most things considering
487f06febc9SFrank Mayhar  * CPU time want to group these counts together and treat all three
488f06febc9SFrank Mayhar  * of them in parallel.
489f06febc9SFrank Mayhar  */
490f06febc9SFrank Mayhar struct task_cputime {
491f06febc9SFrank Mayhar 	cputime_t utime;
492f06febc9SFrank Mayhar 	cputime_t stime;
493f06febc9SFrank Mayhar 	unsigned long long sum_exec_runtime;
494f06febc9SFrank Mayhar };
495f06febc9SFrank Mayhar /* Alternate field names when used to cache expirations. */
496f06febc9SFrank Mayhar #define prof_exp	stime
497f06febc9SFrank Mayhar #define virt_exp	utime
498f06febc9SFrank Mayhar #define sched_exp	sum_exec_runtime
499f06febc9SFrank Mayhar 
5004cd4c1b4SPeter Zijlstra #define INIT_CPUTIME	\
5014cd4c1b4SPeter Zijlstra 	(struct task_cputime) {					\
50264861634SMartin Schwidefsky 		.utime = 0,					\
50364861634SMartin Schwidefsky 		.stime = 0,					\
5044cd4c1b4SPeter Zijlstra 		.sum_exec_runtime = 0,				\
5054cd4c1b4SPeter Zijlstra 	}
5064cd4c1b4SPeter Zijlstra 
507a233f112SPeter Zijlstra #ifdef CONFIG_PREEMPT_COUNT
508a233f112SPeter Zijlstra #define PREEMPT_DISABLED	(1 + PREEMPT_ENABLED)
509a233f112SPeter Zijlstra #else
510a233f112SPeter Zijlstra #define PREEMPT_DISABLED	PREEMPT_ENABLED
511a233f112SPeter Zijlstra #endif
512a233f112SPeter Zijlstra 
513c99e6efeSPeter Zijlstra /*
514c99e6efeSPeter Zijlstra  * Disable preemption until the scheduler is running.
515c99e6efeSPeter Zijlstra  * Reset by start_kernel()->sched_init()->init_idle().
516d86ee480SPeter Zijlstra  *
517d86ee480SPeter Zijlstra  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
518d86ee480SPeter Zijlstra  * before the scheduler is active -- see should_resched().
519c99e6efeSPeter Zijlstra  */
520a233f112SPeter Zijlstra #define INIT_PREEMPT_COUNT	(PREEMPT_DISABLED + PREEMPT_ACTIVE)
521c99e6efeSPeter Zijlstra 
522f06febc9SFrank Mayhar /**
5234cd4c1b4SPeter Zijlstra  * struct thread_group_cputimer - thread group interval timer counts
5244cd4c1b4SPeter Zijlstra  * @cputime:		thread group interval timers.
5254cd4c1b4SPeter Zijlstra  * @running:		non-zero when there are timers running and
5264cd4c1b4SPeter Zijlstra  * 			@cputime receives updates.
5274cd4c1b4SPeter Zijlstra  * @lock:		lock for fields in this struct.
528f06febc9SFrank Mayhar  *
529f06febc9SFrank Mayhar  * This structure contains the version of task_cputime, above, that is
5304cd4c1b4SPeter Zijlstra  * used for thread group CPU timer calculations.
531f06febc9SFrank Mayhar  */
5324cd4c1b4SPeter Zijlstra struct thread_group_cputimer {
5334cd4c1b4SPeter Zijlstra 	struct task_cputime cputime;
5344cd4c1b4SPeter Zijlstra 	int running;
535ee30a7b2SThomas Gleixner 	raw_spinlock_t lock;
536f06febc9SFrank Mayhar };
537f06febc9SFrank Mayhar 
5384714d1d3SBen Blum #include <linux/rwsem.h>
5395091faa4SMike Galbraith struct autogroup;
5405091faa4SMike Galbraith 
5411da177e4SLinus Torvalds /*
542e815f0a8SJonathan Neuschäfer  * NOTE! "signal_struct" does not have its own
5431da177e4SLinus Torvalds  * locking, because a shared signal_struct always
5441da177e4SLinus Torvalds  * implies a shared sighand_struct, so locking
5451da177e4SLinus Torvalds  * sighand_struct is always a proper superset of
5461da177e4SLinus Torvalds  * the locking of signal_struct.
5471da177e4SLinus Torvalds  */
5481da177e4SLinus Torvalds struct signal_struct {
549ea6d290cSOleg Nesterov 	atomic_t		sigcnt;
5501da177e4SLinus Torvalds 	atomic_t		live;
551b3ac022cSOleg Nesterov 	int			nr_threads;
5521da177e4SLinus Torvalds 
5531da177e4SLinus Torvalds 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
5541da177e4SLinus Torvalds 
5551da177e4SLinus Torvalds 	/* current thread group signal load-balancing target: */
55636c8b586SIngo Molnar 	struct task_struct	*curr_target;
5571da177e4SLinus Torvalds 
5581da177e4SLinus Torvalds 	/* shared signal handling: */
5591da177e4SLinus Torvalds 	struct sigpending	shared_pending;
5601da177e4SLinus Torvalds 
5611da177e4SLinus Torvalds 	/* thread group exit support */
5621da177e4SLinus Torvalds 	int			group_exit_code;
5631da177e4SLinus Torvalds 	/* overloaded:
5641da177e4SLinus Torvalds 	 * - notify group_exit_task when ->count is equal to notify_count
5651da177e4SLinus Torvalds 	 * - everyone except group_exit_task is stopped during signal delivery
5661da177e4SLinus Torvalds 	 *   of fatal signals, group_exit_task processes the signal.
5671da177e4SLinus Torvalds 	 */
5681da177e4SLinus Torvalds 	int			notify_count;
56907dd20e0SRichard Kennedy 	struct task_struct	*group_exit_task;
5701da177e4SLinus Torvalds 
5711da177e4SLinus Torvalds 	/* thread group stop support, overloads group_exit_code too */
5721da177e4SLinus Torvalds 	int			group_stop_count;
5731da177e4SLinus Torvalds 	unsigned int		flags; /* see SIGNAL_* flags below */
5741da177e4SLinus Torvalds 
575ebec18a6SLennart Poettering 	/*
576ebec18a6SLennart Poettering 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
577ebec18a6SLennart Poettering 	 * manager, to re-parent orphan (double-forking) child processes
578ebec18a6SLennart Poettering 	 * to this process instead of 'init'. The service manager is
579ebec18a6SLennart Poettering 	 * able to receive SIGCHLD signals and is able to investigate
580ebec18a6SLennart Poettering 	 * the process until it calls wait(). All children of this
581ebec18a6SLennart Poettering 	 * process will inherit a flag if they should look for a
582ebec18a6SLennart Poettering 	 * child_subreaper process at exit.
583ebec18a6SLennart Poettering 	 */
584ebec18a6SLennart Poettering 	unsigned int		is_child_subreaper:1;
585ebec18a6SLennart Poettering 	unsigned int		has_child_subreaper:1;
586ebec18a6SLennart Poettering 
5871da177e4SLinus Torvalds 	/* POSIX.1b Interval Timers */
5885ed67f05SPavel Emelyanov 	int			posix_timer_id;
5891da177e4SLinus Torvalds 	struct list_head	posix_timers;
5901da177e4SLinus Torvalds 
5911da177e4SLinus Torvalds 	/* ITIMER_REAL timer for the process */
5922ff678b8SThomas Gleixner 	struct hrtimer real_timer;
593fea9d175SOleg Nesterov 	struct pid *leader_pid;
5942ff678b8SThomas Gleixner 	ktime_t it_real_incr;
5951da177e4SLinus Torvalds 
59642c4ab41SStanislaw Gruszka 	/*
59742c4ab41SStanislaw Gruszka 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
59842c4ab41SStanislaw Gruszka 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
59942c4ab41SStanislaw Gruszka 	 * values are defined to 0 and 1 respectively
60042c4ab41SStanislaw Gruszka 	 */
60142c4ab41SStanislaw Gruszka 	struct cpu_itimer it[2];
6021da177e4SLinus Torvalds 
603f06febc9SFrank Mayhar 	/*
6044cd4c1b4SPeter Zijlstra 	 * Thread group totals for process CPU timers.
6054cd4c1b4SPeter Zijlstra 	 * See thread_group_cputimer(), et al, for details.
606f06febc9SFrank Mayhar 	 */
6074cd4c1b4SPeter Zijlstra 	struct thread_group_cputimer cputimer;
608f06febc9SFrank Mayhar 
609f06febc9SFrank Mayhar 	/* Earliest-expiration cache. */
610f06febc9SFrank Mayhar 	struct task_cputime cputime_expires;
611f06febc9SFrank Mayhar 
612f06febc9SFrank Mayhar 	struct list_head cpu_timers[3];
613f06febc9SFrank Mayhar 
614ab521dc0SEric W. Biederman 	struct pid *tty_old_pgrp;
6151ec320afSCedric Le Goater 
6161da177e4SLinus Torvalds 	/* boolean value for session group leader */
6171da177e4SLinus Torvalds 	int leader;
6181da177e4SLinus Torvalds 
6191da177e4SLinus Torvalds 	struct tty_struct *tty; /* NULL if no tty */
6201da177e4SLinus Torvalds 
6215091faa4SMike Galbraith #ifdef CONFIG_SCHED_AUTOGROUP
6225091faa4SMike Galbraith 	struct autogroup *autogroup;
6235091faa4SMike Galbraith #endif
6241da177e4SLinus Torvalds 	/*
6251da177e4SLinus Torvalds 	 * Cumulative resource counters for dead threads in the group,
6261da177e4SLinus Torvalds 	 * and for reaped dead child processes forked by this group.
6271da177e4SLinus Torvalds 	 * Live threads maintain their own counters and add to these
6281da177e4SLinus Torvalds 	 * in __exit_signal, except for the group leader.
6291da177e4SLinus Torvalds 	 */
63032bd671dSPeter Zijlstra 	cputime_t utime, stime, cutime, cstime;
6319ac52315SLaurent Vivier 	cputime_t gtime;
6329ac52315SLaurent Vivier 	cputime_t cgtime;
6339fbc42eaSFrederic Weisbecker #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
634d37f761dSFrederic Weisbecker 	struct cputime prev_cputime;
6350cf55e1eSHidetoshi Seto #endif
6361da177e4SLinus Torvalds 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
6371da177e4SLinus Torvalds 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
6386eaeeabaSEric Dumazet 	unsigned long inblock, oublock, cinblock, coublock;
6391f10206cSJiri Pirko 	unsigned long maxrss, cmaxrss;
640940389b8SAndrea Righi 	struct task_io_accounting ioac;
6411da177e4SLinus Torvalds 
6421da177e4SLinus Torvalds 	/*
64332bd671dSPeter Zijlstra 	 * Cumulative ns of schedule CPU time fo dead threads in the
64432bd671dSPeter Zijlstra 	 * group, not including a zombie group leader, (This only differs
64532bd671dSPeter Zijlstra 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
64632bd671dSPeter Zijlstra 	 * other than jiffies.)
64732bd671dSPeter Zijlstra 	 */
64832bd671dSPeter Zijlstra 	unsigned long long sum_sched_runtime;
64932bd671dSPeter Zijlstra 
65032bd671dSPeter Zijlstra 	/*
6511da177e4SLinus Torvalds 	 * We don't bother to synchronize most readers of this at all,
6521da177e4SLinus Torvalds 	 * because there is no reader checking a limit that actually needs
6531da177e4SLinus Torvalds 	 * to get both rlim_cur and rlim_max atomically, and either one
6541da177e4SLinus Torvalds 	 * alone is a single word that can safely be read normally.
6551da177e4SLinus Torvalds 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
6561da177e4SLinus Torvalds 	 * protect this instead of the siglock, because they really
6571da177e4SLinus Torvalds 	 * have no need to disable irqs.
6581da177e4SLinus Torvalds 	 */
6591da177e4SLinus Torvalds 	struct rlimit rlim[RLIM_NLIMITS];
6601da177e4SLinus Torvalds 
6610e464814SKaiGai Kohei #ifdef CONFIG_BSD_PROCESS_ACCT
6620e464814SKaiGai Kohei 	struct pacct_struct pacct;	/* per-process accounting information */
6630e464814SKaiGai Kohei #endif
664ad4ecbcbSShailabh Nagar #ifdef CONFIG_TASKSTATS
665ad4ecbcbSShailabh Nagar 	struct taskstats *stats;
666ad4ecbcbSShailabh Nagar #endif
667522ed776SMiloslav Trmac #ifdef CONFIG_AUDIT
668522ed776SMiloslav Trmac 	unsigned audit_tty;
66946e959eaSRichard Guy Briggs 	unsigned audit_tty_log_passwd;
670522ed776SMiloslav Trmac 	struct tty_audit_buf *tty_audit_buf;
671522ed776SMiloslav Trmac #endif
6724714d1d3SBen Blum #ifdef CONFIG_CGROUPS
6734714d1d3SBen Blum 	/*
67477e4ef99STejun Heo 	 * group_rwsem prevents new tasks from entering the threadgroup and
67577e4ef99STejun Heo 	 * member tasks from exiting,a more specifically, setting of
67677e4ef99STejun Heo 	 * PF_EXITING.  fork and exit paths are protected with this rwsem
67777e4ef99STejun Heo 	 * using threadgroup_change_begin/end().  Users which require
67877e4ef99STejun Heo 	 * threadgroup to remain stable should use threadgroup_[un]lock()
67977e4ef99STejun Heo 	 * which also takes care of exec path.  Currently, cgroup is the
68077e4ef99STejun Heo 	 * only user.
6814714d1d3SBen Blum 	 */
682257058aeSTejun Heo 	struct rw_semaphore group_rwsem;
6834714d1d3SBen Blum #endif
68428b83c51SKOSAKI Motohiro 
685e1e12d2fSDavid Rientjes 	oom_flags_t oom_flags;
686a9c58b90SDavid Rientjes 	short oom_score_adj;		/* OOM kill score adjustment */
687a9c58b90SDavid Rientjes 	short oom_score_adj_min;	/* OOM kill score adjustment min value.
688dabb16f6SMandeep Singh Baines 					 * Only settable by CAP_SYS_RESOURCE. */
6899b1bf12dSKOSAKI Motohiro 
6909b1bf12dSKOSAKI Motohiro 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
6919b1bf12dSKOSAKI Motohiro 					 * credential calculations
6929b1bf12dSKOSAKI Motohiro 					 * (notably. ptrace) */
6931da177e4SLinus Torvalds };
6941da177e4SLinus Torvalds 
6951da177e4SLinus Torvalds /*
6961da177e4SLinus Torvalds  * Bits in flags field of signal_struct.
6971da177e4SLinus Torvalds  */
6981da177e4SLinus Torvalds #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
699ee77f075SOleg Nesterov #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
700ee77f075SOleg Nesterov #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
701403bad72SOleg Nesterov #define SIGNAL_GROUP_COREDUMP	0x00000008 /* coredump in progress */
702e4420551SOleg Nesterov /*
703e4420551SOleg Nesterov  * Pending notifications to parent.
704e4420551SOleg Nesterov  */
705e4420551SOleg Nesterov #define SIGNAL_CLD_STOPPED	0x00000010
706e4420551SOleg Nesterov #define SIGNAL_CLD_CONTINUED	0x00000020
707e4420551SOleg Nesterov #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
7081da177e4SLinus Torvalds 
709fae5fa44SOleg Nesterov #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
710fae5fa44SOleg Nesterov 
711ed5d2cacSOleg Nesterov /* If true, all threads except ->group_exit_task have pending SIGKILL */
712ed5d2cacSOleg Nesterov static inline int signal_group_exit(const struct signal_struct *sig)
713ed5d2cacSOleg Nesterov {
714ed5d2cacSOleg Nesterov 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
715ed5d2cacSOleg Nesterov 		(sig->group_exit_task != NULL);
716ed5d2cacSOleg Nesterov }
717ed5d2cacSOleg Nesterov 
7181da177e4SLinus Torvalds /*
7191da177e4SLinus Torvalds  * Some day this will be a full-fledged user tracking system..
7201da177e4SLinus Torvalds  */
7211da177e4SLinus Torvalds struct user_struct {
7221da177e4SLinus Torvalds 	atomic_t __count;	/* reference count */
7231da177e4SLinus Torvalds 	atomic_t processes;	/* How many processes does this user have? */
7241da177e4SLinus Torvalds 	atomic_t files;		/* How many open files does this user have? */
7251da177e4SLinus Torvalds 	atomic_t sigpending;	/* How many pending signals does this user have? */
7262d9048e2SAmy Griffis #ifdef CONFIG_INOTIFY_USER
7270eeca283SRobert Love 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
7280eeca283SRobert Love 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
7290eeca283SRobert Love #endif
7304afeff85SEric Paris #ifdef CONFIG_FANOTIFY
7314afeff85SEric Paris 	atomic_t fanotify_listeners;
7324afeff85SEric Paris #endif
7337ef9964eSDavide Libenzi #ifdef CONFIG_EPOLL
73452bd19f7SRobin Holt 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
7357ef9964eSDavide Libenzi #endif
736970a8645SAlexey Dobriyan #ifdef CONFIG_POSIX_MQUEUE
7371da177e4SLinus Torvalds 	/* protected by mq_lock	*/
7381da177e4SLinus Torvalds 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
739970a8645SAlexey Dobriyan #endif
7401da177e4SLinus Torvalds 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
7411da177e4SLinus Torvalds 
7421da177e4SLinus Torvalds #ifdef CONFIG_KEYS
7431da177e4SLinus Torvalds 	struct key *uid_keyring;	/* UID specific keyring */
7441da177e4SLinus Torvalds 	struct key *session_keyring;	/* UID's default session keyring */
7451da177e4SLinus Torvalds #endif
7461da177e4SLinus Torvalds 
7471da177e4SLinus Torvalds 	/* Hash table maintenance information */
748735de223SPavel Emelyanov 	struct hlist_node uidhash_node;
7497b44ab97SEric W. Biederman 	kuid_t uid;
75024e377a8SSrivatsa Vaddagiri 
751cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
752789f90fcSPeter Zijlstra 	atomic_long_t locked_vm;
753789f90fcSPeter Zijlstra #endif
7541da177e4SLinus Torvalds };
7551da177e4SLinus Torvalds 
756eb41d946SKay Sievers extern int uids_sysfs_init(void);
7575cb350baSDhaval Giani 
7587b44ab97SEric W. Biederman extern struct user_struct *find_user(kuid_t);
7591da177e4SLinus Torvalds 
7601da177e4SLinus Torvalds extern struct user_struct root_user;
7611da177e4SLinus Torvalds #define INIT_USER (&root_user)
7621da177e4SLinus Torvalds 
763b6dff3ecSDavid Howells 
7641da177e4SLinus Torvalds struct backing_dev_info;
7651da177e4SLinus Torvalds struct reclaim_state;
7661da177e4SLinus Torvalds 
76752f17b6cSChandra Seetharaman #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
7681da177e4SLinus Torvalds struct sched_info {
7691da177e4SLinus Torvalds 	/* cumulative counters */
7702d72376bSIngo Molnar 	unsigned long pcount;	      /* # of times run on this cpu */
7719c2c4802SKen Chen 	unsigned long long run_delay; /* time spent waiting on a runqueue */
7721da177e4SLinus Torvalds 
7731da177e4SLinus Torvalds 	/* timestamps */
774172ba844SBalbir Singh 	unsigned long long last_arrival,/* when we last ran on a cpu */
7751da177e4SLinus Torvalds 			   last_queued;	/* when we were last queued to run */
7761da177e4SLinus Torvalds };
77752f17b6cSChandra Seetharaman #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
7781da177e4SLinus Torvalds 
779ca74e92bSShailabh Nagar #ifdef CONFIG_TASK_DELAY_ACCT
780ca74e92bSShailabh Nagar struct task_delay_info {
781ca74e92bSShailabh Nagar 	spinlock_t	lock;
782ca74e92bSShailabh Nagar 	unsigned int	flags;	/* Private per-task flags */
783ca74e92bSShailabh Nagar 
784ca74e92bSShailabh Nagar 	/* For each stat XXX, add following, aligned appropriately
785ca74e92bSShailabh Nagar 	 *
786ca74e92bSShailabh Nagar 	 * struct timespec XXX_start, XXX_end;
787ca74e92bSShailabh Nagar 	 * u64 XXX_delay;
788ca74e92bSShailabh Nagar 	 * u32 XXX_count;
789ca74e92bSShailabh Nagar 	 *
790ca74e92bSShailabh Nagar 	 * Atomicity of updates to XXX_delay, XXX_count protected by
791ca74e92bSShailabh Nagar 	 * single lock above (split into XXX_lock if contention is an issue).
792ca74e92bSShailabh Nagar 	 */
7930ff92245SShailabh Nagar 
7940ff92245SShailabh Nagar 	/*
7950ff92245SShailabh Nagar 	 * XXX_count is incremented on every XXX operation, the delay
7960ff92245SShailabh Nagar 	 * associated with the operation is added to XXX_delay.
7970ff92245SShailabh Nagar 	 * XXX_delay contains the accumulated delay time in nanoseconds.
7980ff92245SShailabh Nagar 	 */
7990ff92245SShailabh Nagar 	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
8000ff92245SShailabh Nagar 	u64 blkio_delay;	/* wait for sync block io completion */
8010ff92245SShailabh Nagar 	u64 swapin_delay;	/* wait for swapin block io completion */
8020ff92245SShailabh Nagar 	u32 blkio_count;	/* total count of the number of sync block */
8030ff92245SShailabh Nagar 				/* io operations performed */
8040ff92245SShailabh Nagar 	u32 swapin_count;	/* total count of the number of swapin block */
8050ff92245SShailabh Nagar 				/* io operations performed */
806873b4771SKeika Kobayashi 
807873b4771SKeika Kobayashi 	struct timespec freepages_start, freepages_end;
808873b4771SKeika Kobayashi 	u64 freepages_delay;	/* wait for memory reclaim */
809873b4771SKeika Kobayashi 	u32 freepages_count;	/* total count of memory reclaim */
810ca74e92bSShailabh Nagar };
81152f17b6cSChandra Seetharaman #endif	/* CONFIG_TASK_DELAY_ACCT */
81252f17b6cSChandra Seetharaman 
81352f17b6cSChandra Seetharaman static inline int sched_info_on(void)
81452f17b6cSChandra Seetharaman {
81552f17b6cSChandra Seetharaman #ifdef CONFIG_SCHEDSTATS
81652f17b6cSChandra Seetharaman 	return 1;
81752f17b6cSChandra Seetharaman #elif defined(CONFIG_TASK_DELAY_ACCT)
81852f17b6cSChandra Seetharaman 	extern int delayacct_on;
81952f17b6cSChandra Seetharaman 	return delayacct_on;
82052f17b6cSChandra Seetharaman #else
82152f17b6cSChandra Seetharaman 	return 0;
822ca74e92bSShailabh Nagar #endif
82352f17b6cSChandra Seetharaman }
824ca74e92bSShailabh Nagar 
825d15bcfdbSIngo Molnar enum cpu_idle_type {
826d15bcfdbSIngo Molnar 	CPU_IDLE,
827d15bcfdbSIngo Molnar 	CPU_NOT_IDLE,
828d15bcfdbSIngo Molnar 	CPU_NEWLY_IDLE,
829d15bcfdbSIngo Molnar 	CPU_MAX_IDLE_TYPES
8301da177e4SLinus Torvalds };
8311da177e4SLinus Torvalds 
8321da177e4SLinus Torvalds /*
8331399fa78SNikhil Rao  * Increase resolution of cpu_power calculations
8341399fa78SNikhil Rao  */
8351399fa78SNikhil Rao #define SCHED_POWER_SHIFT	10
8361399fa78SNikhil Rao #define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT)
8371da177e4SLinus Torvalds 
8381399fa78SNikhil Rao /*
8391399fa78SNikhil Rao  * sched-domains (multiprocessor balancing) declarations:
8401399fa78SNikhil Rao  */
8412dd73a4fSPeter Williams #ifdef CONFIG_SMP
842b5d978e0SPeter Zijlstra #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
843b5d978e0SPeter Zijlstra #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
844b5d978e0SPeter Zijlstra #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
845b5d978e0SPeter Zijlstra #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
846c88d5910SPeter Zijlstra #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
847b5d978e0SPeter Zijlstra #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
848b5d978e0SPeter Zijlstra #define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
849b5d978e0SPeter Zijlstra #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
850b5d978e0SPeter Zijlstra #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
851532cb4c4SMichael Neuling #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
852b5d978e0SPeter Zijlstra #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
853e3589f6cSPeter Zijlstra #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
8543a7053b3SMel Gorman #define SD_NUMA			0x4000	/* cross-node balancing */
8555c45bf27SSiddha, Suresh B 
856532cb4c4SMichael Neuling extern int __weak arch_sd_sibiling_asym_packing(void);
857532cb4c4SMichael Neuling 
8581d3504fcSHidetoshi Seto struct sched_domain_attr {
8591d3504fcSHidetoshi Seto 	int relax_domain_level;
8601d3504fcSHidetoshi Seto };
8611d3504fcSHidetoshi Seto 
8621d3504fcSHidetoshi Seto #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
8631d3504fcSHidetoshi Seto 	.relax_domain_level = -1,			\
8641d3504fcSHidetoshi Seto }
8651d3504fcSHidetoshi Seto 
86660495e77SPeter Zijlstra extern int sched_domain_level_max;
86760495e77SPeter Zijlstra 
8685e6521eaSLi Zefan struct sched_group;
8695e6521eaSLi Zefan 
8701da177e4SLinus Torvalds struct sched_domain {
8711da177e4SLinus Torvalds 	/* These fields must be setup */
8721da177e4SLinus Torvalds 	struct sched_domain *parent;	/* top domain must be null terminated */
8731a848870SSiddha, Suresh B 	struct sched_domain *child;	/* bottom domain must be null terminated */
8741da177e4SLinus Torvalds 	struct sched_group *groups;	/* the balancing groups of the domain */
8751da177e4SLinus Torvalds 	unsigned long min_interval;	/* Minimum balance interval ms */
8761da177e4SLinus Torvalds 	unsigned long max_interval;	/* Maximum balance interval ms */
8771da177e4SLinus Torvalds 	unsigned int busy_factor;	/* less balancing by factor if busy */
8781da177e4SLinus Torvalds 	unsigned int imbalance_pct;	/* No balance until over watermark */
8791da177e4SLinus Torvalds 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
8807897986bSNick Piggin 	unsigned int busy_idx;
8817897986bSNick Piggin 	unsigned int idle_idx;
8827897986bSNick Piggin 	unsigned int newidle_idx;
8837897986bSNick Piggin 	unsigned int wake_idx;
884147cbb4bSNick Piggin 	unsigned int forkexec_idx;
885a52bfd73SPeter Zijlstra 	unsigned int smt_gain;
88625f55d9dSVincent Guittot 
88725f55d9dSVincent Guittot 	int nohz_idle;			/* NOHZ IDLE status */
8881da177e4SLinus Torvalds 	int flags;			/* See SD_* */
88960495e77SPeter Zijlstra 	int level;
8901da177e4SLinus Torvalds 
8911da177e4SLinus Torvalds 	/* Runtime fields. */
8921da177e4SLinus Torvalds 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
8931da177e4SLinus Torvalds 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
8941da177e4SLinus Torvalds 	unsigned int nr_balance_failed; /* initialise to 0 */
8951da177e4SLinus Torvalds 
896f48627e6SJason Low 	/* idle_balance() stats */
8979bd721c5SJason Low 	u64 max_newidle_lb_cost;
898f48627e6SJason Low 	unsigned long next_decay_max_lb_cost;
8992398f2c6SPeter Zijlstra 
9001da177e4SLinus Torvalds #ifdef CONFIG_SCHEDSTATS
9011da177e4SLinus Torvalds 	/* load_balance() stats */
902480b9434SKen Chen 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
903480b9434SKen Chen 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
904480b9434SKen Chen 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
905480b9434SKen Chen 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
906480b9434SKen Chen 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
907480b9434SKen Chen 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
908480b9434SKen Chen 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
909480b9434SKen Chen 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
9101da177e4SLinus Torvalds 
9111da177e4SLinus Torvalds 	/* Active load balancing */
912480b9434SKen Chen 	unsigned int alb_count;
913480b9434SKen Chen 	unsigned int alb_failed;
914480b9434SKen Chen 	unsigned int alb_pushed;
9151da177e4SLinus Torvalds 
91668767a0aSNick Piggin 	/* SD_BALANCE_EXEC stats */
917480b9434SKen Chen 	unsigned int sbe_count;
918480b9434SKen Chen 	unsigned int sbe_balanced;
919480b9434SKen Chen 	unsigned int sbe_pushed;
9201da177e4SLinus Torvalds 
92168767a0aSNick Piggin 	/* SD_BALANCE_FORK stats */
922480b9434SKen Chen 	unsigned int sbf_count;
923480b9434SKen Chen 	unsigned int sbf_balanced;
924480b9434SKen Chen 	unsigned int sbf_pushed;
92568767a0aSNick Piggin 
9261da177e4SLinus Torvalds 	/* try_to_wake_up() stats */
927480b9434SKen Chen 	unsigned int ttwu_wake_remote;
928480b9434SKen Chen 	unsigned int ttwu_move_affine;
929480b9434SKen Chen 	unsigned int ttwu_move_balance;
9301da177e4SLinus Torvalds #endif
931a5d8c348SIngo Molnar #ifdef CONFIG_SCHED_DEBUG
932a5d8c348SIngo Molnar 	char *name;
933a5d8c348SIngo Molnar #endif
934dce840a0SPeter Zijlstra 	union {
935dce840a0SPeter Zijlstra 		void *private;		/* used during construction */
936dce840a0SPeter Zijlstra 		struct rcu_head rcu;	/* used during destruction */
937dce840a0SPeter Zijlstra 	};
9386c99e9adSRusty Russell 
939669c55e9SPeter Zijlstra 	unsigned int span_weight;
9404200efd9SIngo Molnar 	/*
9414200efd9SIngo Molnar 	 * Span of all CPUs in this domain.
9424200efd9SIngo Molnar 	 *
9434200efd9SIngo Molnar 	 * NOTE: this field is variable length. (Allocated dynamically
9444200efd9SIngo Molnar 	 * by attaching extra space to the end of the structure,
9454200efd9SIngo Molnar 	 * depending on how many CPUs the kernel has booted up with)
9464200efd9SIngo Molnar 	 */
9474200efd9SIngo Molnar 	unsigned long span[0];
9481da177e4SLinus Torvalds };
9491da177e4SLinus Torvalds 
950758b2cdcSRusty Russell static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
951758b2cdcSRusty Russell {
9526c99e9adSRusty Russell 	return to_cpumask(sd->span);
953758b2cdcSRusty Russell }
954758b2cdcSRusty Russell 
955acc3f5d7SRusty Russell extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
9561d3504fcSHidetoshi Seto 				    struct sched_domain_attr *dattr_new);
957029190c5SPaul Jackson 
958acc3f5d7SRusty Russell /* Allocate an array of sched domains, for partition_sched_domains(). */
959acc3f5d7SRusty Russell cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
960acc3f5d7SRusty Russell void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
961acc3f5d7SRusty Russell 
96239be3501SPeter Zijlstra bool cpus_share_cache(int this_cpu, int that_cpu);
96339be3501SPeter Zijlstra 
9641b427c15SIngo Molnar #else /* CONFIG_SMP */
9651da177e4SLinus Torvalds 
9661b427c15SIngo Molnar struct sched_domain_attr;
9671b427c15SIngo Molnar 
9681b427c15SIngo Molnar static inline void
969acc3f5d7SRusty Russell partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
9701b427c15SIngo Molnar 			struct sched_domain_attr *dattr_new)
971d02c7a8cSCon Kolivas {
972d02c7a8cSCon Kolivas }
97339be3501SPeter Zijlstra 
97439be3501SPeter Zijlstra static inline bool cpus_share_cache(int this_cpu, int that_cpu)
97539be3501SPeter Zijlstra {
97639be3501SPeter Zijlstra 	return true;
97739be3501SPeter Zijlstra }
97839be3501SPeter Zijlstra 
9791b427c15SIngo Molnar #endif	/* !CONFIG_SMP */
9801da177e4SLinus Torvalds 
98147fe38fcSPeter Zijlstra 
9821da177e4SLinus Torvalds struct io_context;			/* See blkdev.h */
9831da177e4SLinus Torvalds 
9841da177e4SLinus Torvalds 
985383f2835SChen, Kenneth W #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
98636c8b586SIngo Molnar extern void prefetch_stack(struct task_struct *t);
987383f2835SChen, Kenneth W #else
988383f2835SChen, Kenneth W static inline void prefetch_stack(struct task_struct *t) { }
989383f2835SChen, Kenneth W #endif
9901da177e4SLinus Torvalds 
9911da177e4SLinus Torvalds struct audit_context;		/* See audit.c */
9921da177e4SLinus Torvalds struct mempolicy;
993b92ce558SJens Axboe struct pipe_inode_info;
9944865ecf1SSerge E. Hallyn struct uts_namespace;
9951da177e4SLinus Torvalds 
99620b8a59fSIngo Molnar struct load_weight {
9979dbdb155SPeter Zijlstra 	unsigned long weight;
9989dbdb155SPeter Zijlstra 	u32 inv_weight;
99920b8a59fSIngo Molnar };
100020b8a59fSIngo Molnar 
10019d85f21cSPaul Turner struct sched_avg {
10029d85f21cSPaul Turner 	/*
10039d85f21cSPaul Turner 	 * These sums represent an infinite geometric series and so are bound
1004239003eaSKamalesh Babulal 	 * above by 1024/(1-y).  Thus we only need a u32 to store them for all
10059d85f21cSPaul Turner 	 * choices of y < 1-2^(-32)*1024.
10069d85f21cSPaul Turner 	 */
10079d85f21cSPaul Turner 	u32 runnable_avg_sum, runnable_avg_period;
10089d85f21cSPaul Turner 	u64 last_runnable_update;
10099ee474f5SPaul Turner 	s64 decay_count;
10102dac754eSPaul Turner 	unsigned long load_avg_contrib;
10119d85f21cSPaul Turner };
10129d85f21cSPaul Turner 
101394c18227SIngo Molnar #ifdef CONFIG_SCHEDSTATS
101441acab88SLucas De Marchi struct sched_statistics {
101594c18227SIngo Molnar 	u64			wait_start;
101694c18227SIngo Molnar 	u64			wait_max;
10176d082592SArjan van de Ven 	u64			wait_count;
10186d082592SArjan van de Ven 	u64			wait_sum;
10198f0dfc34SArjan van de Ven 	u64			iowait_count;
10208f0dfc34SArjan van de Ven 	u64			iowait_sum;
102194c18227SIngo Molnar 
102294c18227SIngo Molnar 	u64			sleep_start;
102320b8a59fSIngo Molnar 	u64			sleep_max;
102494c18227SIngo Molnar 	s64			sum_sleep_runtime;
102594c18227SIngo Molnar 
102694c18227SIngo Molnar 	u64			block_start;
102720b8a59fSIngo Molnar 	u64			block_max;
102820b8a59fSIngo Molnar 	u64			exec_max;
1029eba1ed4bSIngo Molnar 	u64			slice_max;
1030cc367732SIngo Molnar 
1031cc367732SIngo Molnar 	u64			nr_migrations_cold;
1032cc367732SIngo Molnar 	u64			nr_failed_migrations_affine;
1033cc367732SIngo Molnar 	u64			nr_failed_migrations_running;
1034cc367732SIngo Molnar 	u64			nr_failed_migrations_hot;
1035cc367732SIngo Molnar 	u64			nr_forced_migrations;
1036cc367732SIngo Molnar 
1037cc367732SIngo Molnar 	u64			nr_wakeups;
1038cc367732SIngo Molnar 	u64			nr_wakeups_sync;
1039cc367732SIngo Molnar 	u64			nr_wakeups_migrate;
1040cc367732SIngo Molnar 	u64			nr_wakeups_local;
1041cc367732SIngo Molnar 	u64			nr_wakeups_remote;
1042cc367732SIngo Molnar 	u64			nr_wakeups_affine;
1043cc367732SIngo Molnar 	u64			nr_wakeups_affine_attempts;
1044cc367732SIngo Molnar 	u64			nr_wakeups_passive;
1045cc367732SIngo Molnar 	u64			nr_wakeups_idle;
104641acab88SLucas De Marchi };
104741acab88SLucas De Marchi #endif
104841acab88SLucas De Marchi 
104941acab88SLucas De Marchi struct sched_entity {
105041acab88SLucas De Marchi 	struct load_weight	load;		/* for load-balancing */
105141acab88SLucas De Marchi 	struct rb_node		run_node;
105241acab88SLucas De Marchi 	struct list_head	group_node;
105341acab88SLucas De Marchi 	unsigned int		on_rq;
105441acab88SLucas De Marchi 
105541acab88SLucas De Marchi 	u64			exec_start;
105641acab88SLucas De Marchi 	u64			sum_exec_runtime;
105741acab88SLucas De Marchi 	u64			vruntime;
105841acab88SLucas De Marchi 	u64			prev_sum_exec_runtime;
105941acab88SLucas De Marchi 
106041acab88SLucas De Marchi 	u64			nr_migrations;
106141acab88SLucas De Marchi 
106241acab88SLucas De Marchi #ifdef CONFIG_SCHEDSTATS
106341acab88SLucas De Marchi 	struct sched_statistics statistics;
106494c18227SIngo Molnar #endif
106594c18227SIngo Molnar 
106620b8a59fSIngo Molnar #ifdef CONFIG_FAIR_GROUP_SCHED
106720b8a59fSIngo Molnar 	struct sched_entity	*parent;
106820b8a59fSIngo Molnar 	/* rq on which this entity is (to be) queued: */
106920b8a59fSIngo Molnar 	struct cfs_rq		*cfs_rq;
107020b8a59fSIngo Molnar 	/* rq "owned" by this entity/group: */
107120b8a59fSIngo Molnar 	struct cfs_rq		*my_q;
107220b8a59fSIngo Molnar #endif
10738bd75c77SClark Williams 
1074141965c7SAlex Shi #ifdef CONFIG_SMP
1075f4e26b12SPaul Turner 	/* Per-entity load-tracking */
10769d85f21cSPaul Turner 	struct sched_avg	avg;
10779d85f21cSPaul Turner #endif
107820b8a59fSIngo Molnar };
107970b97a7fSIngo Molnar 
1080fa717060SPeter Zijlstra struct sched_rt_entity {
1081fa717060SPeter Zijlstra 	struct list_head run_list;
108278f2c7dbSPeter Zijlstra 	unsigned long timeout;
108357d2aa00SYing Xue 	unsigned long watchdog_stamp;
1084bee367edSRichard Kennedy 	unsigned int time_slice;
10856f505b16SPeter Zijlstra 
108658d6c2d7SPeter Zijlstra 	struct sched_rt_entity *back;
1087052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
10886f505b16SPeter Zijlstra 	struct sched_rt_entity	*parent;
10896f505b16SPeter Zijlstra 	/* rq on which this entity is (to be) queued: */
10906f505b16SPeter Zijlstra 	struct rt_rq		*rt_rq;
10916f505b16SPeter Zijlstra 	/* rq "owned" by this entity/group: */
10926f505b16SPeter Zijlstra 	struct rt_rq		*my_q;
10936f505b16SPeter Zijlstra #endif
1094fa717060SPeter Zijlstra };
1095fa717060SPeter Zijlstra 
1096aab03e05SDario Faggioli struct sched_dl_entity {
1097aab03e05SDario Faggioli 	struct rb_node	rb_node;
1098aab03e05SDario Faggioli 
1099aab03e05SDario Faggioli 	/*
1100aab03e05SDario Faggioli 	 * Original scheduling parameters. Copied here from sched_attr
1101aab03e05SDario Faggioli 	 * during sched_setscheduler2(), they will remain the same until
1102aab03e05SDario Faggioli 	 * the next sched_setscheduler2().
1103aab03e05SDario Faggioli 	 */
1104aab03e05SDario Faggioli 	u64 dl_runtime;		/* maximum runtime for each instance	*/
1105aab03e05SDario Faggioli 	u64 dl_deadline;	/* relative deadline of each instance	*/
1106755378a4SHarald Gustafsson 	u64 dl_period;		/* separation of two instances (period) */
1107332ac17eSDario Faggioli 	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
1108aab03e05SDario Faggioli 
1109aab03e05SDario Faggioli 	/*
1110aab03e05SDario Faggioli 	 * Actual scheduling parameters. Initialized with the values above,
1111aab03e05SDario Faggioli 	 * they are continously updated during task execution. Note that
1112aab03e05SDario Faggioli 	 * the remaining runtime could be < 0 in case we are in overrun.
1113aab03e05SDario Faggioli 	 */
1114aab03e05SDario Faggioli 	s64 runtime;		/* remaining runtime for this instance	*/
1115aab03e05SDario Faggioli 	u64 deadline;		/* absolute deadline for this instance	*/
1116aab03e05SDario Faggioli 	unsigned int flags;	/* specifying the scheduler behaviour	*/
1117aab03e05SDario Faggioli 
1118aab03e05SDario Faggioli 	/*
1119aab03e05SDario Faggioli 	 * Some bool flags:
1120aab03e05SDario Faggioli 	 *
1121aab03e05SDario Faggioli 	 * @dl_throttled tells if we exhausted the runtime. If so, the
1122aab03e05SDario Faggioli 	 * task has to wait for a replenishment to be performed at the
1123aab03e05SDario Faggioli 	 * next firing of dl_timer.
1124aab03e05SDario Faggioli 	 *
1125aab03e05SDario Faggioli 	 * @dl_new tells if a new instance arrived. If so we must
1126aab03e05SDario Faggioli 	 * start executing it with full runtime and reset its absolute
1127aab03e05SDario Faggioli 	 * deadline;
11282d3d891dSDario Faggioli 	 *
11292d3d891dSDario Faggioli 	 * @dl_boosted tells if we are boosted due to DI. If so we are
11302d3d891dSDario Faggioli 	 * outside bandwidth enforcement mechanism (but only until we
11312d3d891dSDario Faggioli 	 * exit the critical section).
1132aab03e05SDario Faggioli 	 */
11332d3d891dSDario Faggioli 	int dl_throttled, dl_new, dl_boosted;
1134aab03e05SDario Faggioli 
1135aab03e05SDario Faggioli 	/*
1136aab03e05SDario Faggioli 	 * Bandwidth enforcement timer. Each -deadline task has its
1137aab03e05SDario Faggioli 	 * own bandwidth to be enforced, thus we need one timer per task.
1138aab03e05SDario Faggioli 	 */
1139aab03e05SDario Faggioli 	struct hrtimer dl_timer;
1140aab03e05SDario Faggioli };
11418bd75c77SClark Williams 
114286848966SPaul E. McKenney struct rcu_node;
114386848966SPaul E. McKenney 
11448dc85d54SPeter Zijlstra enum perf_event_task_context {
11458dc85d54SPeter Zijlstra 	perf_invalid_context = -1,
11468dc85d54SPeter Zijlstra 	perf_hw_context = 0,
114789a1e187SPeter Zijlstra 	perf_sw_context,
11488dc85d54SPeter Zijlstra 	perf_nr_task_contexts,
11498dc85d54SPeter Zijlstra };
11508dc85d54SPeter Zijlstra 
11511da177e4SLinus Torvalds struct task_struct {
11521da177e4SLinus Torvalds 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1153f7e4217bSRoman Zippel 	void *stack;
11541da177e4SLinus Torvalds 	atomic_t usage;
115597dc32cdSWilliam Cohen 	unsigned int flags;	/* per process flags, defined below */
115697dc32cdSWilliam Cohen 	unsigned int ptrace;
11571da177e4SLinus Torvalds 
11582dd73a4fSPeter Williams #ifdef CONFIG_SMP
1159fa14ff4aSPeter Zijlstra 	struct llist_node wake_entry;
11603ca7a440SPeter Zijlstra 	int on_cpu;
116162470419SMichael Wang 	struct task_struct *last_wakee;
116262470419SMichael Wang 	unsigned long wakee_flips;
116362470419SMichael Wang 	unsigned long wakee_flip_decay_ts;
1164ac66f547SPeter Zijlstra 
1165ac66f547SPeter Zijlstra 	int wake_cpu;
11664866cde0SNick Piggin #endif
1167fd2f4419SPeter Zijlstra 	int on_rq;
116850e645a8SIngo Molnar 
1169b29739f9SIngo Molnar 	int prio, static_prio, normal_prio;
1170c7aceabaSRichard Kennedy 	unsigned int rt_priority;
11715522d5d5SIngo Molnar 	const struct sched_class *sched_class;
117220b8a59fSIngo Molnar 	struct sched_entity se;
1173fa717060SPeter Zijlstra 	struct sched_rt_entity rt;
11748323f26cSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
11758323f26cSPeter Zijlstra 	struct task_group *sched_task_group;
11768323f26cSPeter Zijlstra #endif
1177aab03e05SDario Faggioli 	struct sched_dl_entity dl;
11781da177e4SLinus Torvalds 
1179e107be36SAvi Kivity #ifdef CONFIG_PREEMPT_NOTIFIERS
1180e107be36SAvi Kivity 	/* list of struct preempt_notifier: */
1181e107be36SAvi Kivity 	struct hlist_head preempt_notifiers;
1182e107be36SAvi Kivity #endif
1183e107be36SAvi Kivity 
11846c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE
11852056a782SJens Axboe 	unsigned int btrace_seq;
11866c5c9341SAlexey Dobriyan #endif
11871da177e4SLinus Torvalds 
118897dc32cdSWilliam Cohen 	unsigned int policy;
118929baa747SPeter Zijlstra 	int nr_cpus_allowed;
11901da177e4SLinus Torvalds 	cpumask_t cpus_allowed;
11911da177e4SLinus Torvalds 
1192a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
1193e260be67SPaul E. McKenney 	int rcu_read_lock_nesting;
1194f41d911fSPaul E. McKenney 	char rcu_read_unlock_special;
1195f41d911fSPaul E. McKenney 	struct list_head rcu_node_entry;
1196a57eb940SPaul E. McKenney #endif /* #ifdef CONFIG_PREEMPT_RCU */
1197a57eb940SPaul E. McKenney #ifdef CONFIG_TREE_PREEMPT_RCU
1198a57eb940SPaul E. McKenney 	struct rcu_node *rcu_blocked_node;
1199f41d911fSPaul E. McKenney #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
120024278d14SPaul E. McKenney #ifdef CONFIG_RCU_BOOST
120124278d14SPaul E. McKenney 	struct rt_mutex *rcu_boost_mutex;
120224278d14SPaul E. McKenney #endif /* #ifdef CONFIG_RCU_BOOST */
1203e260be67SPaul E. McKenney 
120452f17b6cSChandra Seetharaman #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
12051da177e4SLinus Torvalds 	struct sched_info sched_info;
12061da177e4SLinus Torvalds #endif
12071da177e4SLinus Torvalds 
12081da177e4SLinus Torvalds 	struct list_head tasks;
1209806c09a7SDario Faggioli #ifdef CONFIG_SMP
1210917b627dSGregory Haskins 	struct plist_node pushable_tasks;
12111baca4ceSJuri Lelli 	struct rb_node pushable_dl_tasks;
1212806c09a7SDario Faggioli #endif
12131da177e4SLinus Torvalds 
12141da177e4SLinus Torvalds 	struct mm_struct *mm, *active_mm;
12154471a675SJiri Kosina #ifdef CONFIG_COMPAT_BRK
12164471a675SJiri Kosina 	unsigned brk_randomized:1;
12174471a675SJiri Kosina #endif
121834e55232SKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING)
121934e55232SKAMEZAWA Hiroyuki 	struct task_rss_stat	rss_stat;
122034e55232SKAMEZAWA Hiroyuki #endif
12211da177e4SLinus Torvalds /* task state */
122297dc32cdSWilliam Cohen 	int exit_state;
12231da177e4SLinus Torvalds 	int exit_code, exit_signal;
12241da177e4SLinus Torvalds 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1225a8f072c1STejun Heo 	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
12269b89f6baSAndrei Epure 
12279b89f6baSAndrei Epure 	/* Used for emulating ABI behavior of previous Linux versions */
122897dc32cdSWilliam Cohen 	unsigned int personality;
12299b89f6baSAndrei Epure 
12301da177e4SLinus Torvalds 	unsigned did_exec:1;
1231f9ce1f1cSKentaro Takeda 	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1232f9ce1f1cSKentaro Takeda 				 * execve */
12338f0dfc34SArjan van de Ven 	unsigned in_iowait:1;
12348f0dfc34SArjan van de Ven 
1235259e5e6cSAndy Lutomirski 	/* task may not gain privileges */
1236259e5e6cSAndy Lutomirski 	unsigned no_new_privs:1;
1237ca94c442SLennart Poettering 
1238ca94c442SLennart Poettering 	/* Revert to default priority/policy when forking */
1239ca94c442SLennart Poettering 	unsigned sched_reset_on_fork:1;
1240a8e4f2eaSPeter Zijlstra 	unsigned sched_contributes_to_load:1;
1241ca94c442SLennart Poettering 
12421da177e4SLinus Torvalds 	pid_t pid;
12431da177e4SLinus Torvalds 	pid_t tgid;
12440a425405SArjan van de Ven 
12451314562aSHiroshi Shimamoto #ifdef CONFIG_CC_STACKPROTECTOR
12460a425405SArjan van de Ven 	/* Canary value for the -fstack-protector gcc feature */
12470a425405SArjan van de Ven 	unsigned long stack_canary;
12481314562aSHiroshi Shimamoto #endif
12491da177e4SLinus Torvalds 	/*
12501da177e4SLinus Torvalds 	 * pointers to (original) parent process, youngest child, younger sibling,
12511da177e4SLinus Torvalds 	 * older sibling, respectively.  (p->father can be replaced with
1252f470021aSRoland McGrath 	 * p->real_parent->pid)
12531da177e4SLinus Torvalds 	 */
1254abd63bc3SKees Cook 	struct task_struct __rcu *real_parent; /* real parent process */
1255abd63bc3SKees Cook 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
12561da177e4SLinus Torvalds 	/*
1257f470021aSRoland McGrath 	 * children/sibling forms the list of my natural children
12581da177e4SLinus Torvalds 	 */
12591da177e4SLinus Torvalds 	struct list_head children;	/* list of my children */
12601da177e4SLinus Torvalds 	struct list_head sibling;	/* linkage in my parent's children list */
12611da177e4SLinus Torvalds 	struct task_struct *group_leader;	/* threadgroup leader */
12621da177e4SLinus Torvalds 
1263f470021aSRoland McGrath 	/*
1264f470021aSRoland McGrath 	 * ptraced is the list of tasks this task is using ptrace on.
1265f470021aSRoland McGrath 	 * This includes both natural children and PTRACE_ATTACH targets.
1266f470021aSRoland McGrath 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1267f470021aSRoland McGrath 	 */
1268f470021aSRoland McGrath 	struct list_head ptraced;
1269f470021aSRoland McGrath 	struct list_head ptrace_entry;
1270f470021aSRoland McGrath 
12711da177e4SLinus Torvalds 	/* PID/PID hash table linkage. */
127292476d7fSEric W. Biederman 	struct pid_link pids[PIDTYPE_MAX];
127347e65328SOleg Nesterov 	struct list_head thread_group;
12741da177e4SLinus Torvalds 
12751da177e4SLinus Torvalds 	struct completion *vfork_done;		/* for vfork() */
12761da177e4SLinus Torvalds 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
12771da177e4SLinus Torvalds 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
12781da177e4SLinus Torvalds 
1279c66f08beSMichael Neuling 	cputime_t utime, stime, utimescaled, stimescaled;
12809ac52315SLaurent Vivier 	cputime_t gtime;
12819fbc42eaSFrederic Weisbecker #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1282d37f761dSFrederic Weisbecker 	struct cputime prev_cputime;
1283d99ca3b9SHidetoshi Seto #endif
12846a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
12856a61671bSFrederic Weisbecker 	seqlock_t vtime_seqlock;
12866a61671bSFrederic Weisbecker 	unsigned long long vtime_snap;
12876a61671bSFrederic Weisbecker 	enum {
12886a61671bSFrederic Weisbecker 		VTIME_SLEEPING = 0,
12896a61671bSFrederic Weisbecker 		VTIME_USER,
12906a61671bSFrederic Weisbecker 		VTIME_SYS,
12916a61671bSFrederic Weisbecker 	} vtime_snap_whence;
12926a61671bSFrederic Weisbecker #endif
12931da177e4SLinus Torvalds 	unsigned long nvcsw, nivcsw; /* context switch counts */
1294924b42d5STomas Janousek 	struct timespec start_time; 		/* monotonic time */
1295924b42d5STomas Janousek 	struct timespec real_start_time;	/* boot based time */
12961da177e4SLinus Torvalds /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
12971da177e4SLinus Torvalds 	unsigned long min_flt, maj_flt;
12981da177e4SLinus Torvalds 
1299f06febc9SFrank Mayhar 	struct task_cputime cputime_expires;
13001da177e4SLinus Torvalds 	struct list_head cpu_timers[3];
13011da177e4SLinus Torvalds 
13021da177e4SLinus Torvalds /* process credentials */
13031b0ba1c9SArnd Bergmann 	const struct cred __rcu *real_cred; /* objective and real subjective task
13043b11a1deSDavid Howells 					 * credentials (COW) */
13051b0ba1c9SArnd Bergmann 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
13063b11a1deSDavid Howells 					 * credentials (COW) */
130736772092SPaolo 'Blaisorblade' Giarrusso 	char comm[TASK_COMM_LEN]; /* executable name excluding path
130836772092SPaolo 'Blaisorblade' Giarrusso 				     - access with [gs]et_task_comm (which lock
130936772092SPaolo 'Blaisorblade' Giarrusso 				       it with task_lock())
1310221af7f8SLinus Torvalds 				     - initialized normally by setup_new_exec */
13111da177e4SLinus Torvalds /* file system info */
13121da177e4SLinus Torvalds 	int link_count, total_link_count;
13133d5b6fccSAlexey Dobriyan #ifdef CONFIG_SYSVIPC
13141da177e4SLinus Torvalds /* ipc stuff */
13151da177e4SLinus Torvalds 	struct sysv_sem sysvsem;
13163d5b6fccSAlexey Dobriyan #endif
1317e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK
131882a1fcb9SIngo Molnar /* hung task detection */
131982a1fcb9SIngo Molnar 	unsigned long last_switch_count;
132082a1fcb9SIngo Molnar #endif
13211da177e4SLinus Torvalds /* CPU-specific state of this task */
13221da177e4SLinus Torvalds 	struct thread_struct thread;
13231da177e4SLinus Torvalds /* filesystem information */
13241da177e4SLinus Torvalds 	struct fs_struct *fs;
13251da177e4SLinus Torvalds /* open file information */
13261da177e4SLinus Torvalds 	struct files_struct *files;
13271651e14eSSerge E. Hallyn /* namespaces */
1328ab516013SSerge E. Hallyn 	struct nsproxy *nsproxy;
13291da177e4SLinus Torvalds /* signal handlers */
13301da177e4SLinus Torvalds 	struct signal_struct *signal;
13311da177e4SLinus Torvalds 	struct sighand_struct *sighand;
13321da177e4SLinus Torvalds 
13331da177e4SLinus Torvalds 	sigset_t blocked, real_blocked;
1334f3de272bSRoland McGrath 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
13351da177e4SLinus Torvalds 	struct sigpending pending;
13361da177e4SLinus Torvalds 
13371da177e4SLinus Torvalds 	unsigned long sas_ss_sp;
13381da177e4SLinus Torvalds 	size_t sas_ss_size;
13391da177e4SLinus Torvalds 	int (*notifier)(void *priv);
13401da177e4SLinus Torvalds 	void *notifier_data;
13411da177e4SLinus Torvalds 	sigset_t *notifier_mask;
134267d12145SAl Viro 	struct callback_head *task_works;
1343e73f8959SOleg Nesterov 
13441da177e4SLinus Torvalds 	struct audit_context *audit_context;
1345bfef93a5SAl Viro #ifdef CONFIG_AUDITSYSCALL
1346e1760bd5SEric W. Biederman 	kuid_t loginuid;
13474746ec5bSEric Paris 	unsigned int sessionid;
1348bfef93a5SAl Viro #endif
1349932ecebbSWill Drewry 	struct seccomp seccomp;
13501da177e4SLinus Torvalds 
13511da177e4SLinus Torvalds /* Thread group tracking */
13521da177e4SLinus Torvalds    	u32 parent_exec_id;
13531da177e4SLinus Torvalds    	u32 self_exec_id;
135458568d2aSMiao Xie /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
135558568d2aSMiao Xie  * mempolicy */
13561da177e4SLinus Torvalds 	spinlock_t alloc_lock;
13571da177e4SLinus Torvalds 
1358b29739f9SIngo Molnar 	/* Protection of the PI data structures: */
13591d615482SThomas Gleixner 	raw_spinlock_t pi_lock;
1360b29739f9SIngo Molnar 
136123f78d4aSIngo Molnar #ifdef CONFIG_RT_MUTEXES
136223f78d4aSIngo Molnar 	/* PI waiters blocked on a rt_mutex held by this task */
1363fb00aca4SPeter Zijlstra 	struct rb_root pi_waiters;
1364fb00aca4SPeter Zijlstra 	struct rb_node *pi_waiters_leftmost;
136523f78d4aSIngo Molnar 	/* Deadlock detection and priority inheritance handling */
136623f78d4aSIngo Molnar 	struct rt_mutex_waiter *pi_blocked_on;
13672d3d891dSDario Faggioli 	/* Top pi_waiters task */
13682d3d891dSDario Faggioli 	struct task_struct *pi_top_task;
136923f78d4aSIngo Molnar #endif
137023f78d4aSIngo Molnar 
1371408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES
1372408894eeSIngo Molnar 	/* mutex deadlock detection */
1373408894eeSIngo Molnar 	struct mutex_waiter *blocked_on;
1374408894eeSIngo Molnar #endif
1375de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS
1376de30a2b3SIngo Molnar 	unsigned int irq_events;
1377de30a2b3SIngo Molnar 	unsigned long hardirq_enable_ip;
1378de30a2b3SIngo Molnar 	unsigned long hardirq_disable_ip;
1379fa1452e8SHiroshi Shimamoto 	unsigned int hardirq_enable_event;
1380de30a2b3SIngo Molnar 	unsigned int hardirq_disable_event;
1381fa1452e8SHiroshi Shimamoto 	int hardirqs_enabled;
1382de30a2b3SIngo Molnar 	int hardirq_context;
1383fa1452e8SHiroshi Shimamoto 	unsigned long softirq_disable_ip;
1384fa1452e8SHiroshi Shimamoto 	unsigned long softirq_enable_ip;
1385fa1452e8SHiroshi Shimamoto 	unsigned int softirq_disable_event;
1386fa1452e8SHiroshi Shimamoto 	unsigned int softirq_enable_event;
1387fa1452e8SHiroshi Shimamoto 	int softirqs_enabled;
1388de30a2b3SIngo Molnar 	int softirq_context;
1389de30a2b3SIngo Molnar #endif
1390fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP
1391bdb9441eSPeter Zijlstra # define MAX_LOCK_DEPTH 48UL
1392fbb9ce95SIngo Molnar 	u64 curr_chain_key;
1393fbb9ce95SIngo Molnar 	int lockdep_depth;
1394fbb9ce95SIngo Molnar 	unsigned int lockdep_recursion;
1395c7aceabaSRichard Kennedy 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1396cf40bd16SNick Piggin 	gfp_t lockdep_reclaim_gfp;
1397fbb9ce95SIngo Molnar #endif
1398408894eeSIngo Molnar 
13991da177e4SLinus Torvalds /* journalling filesystem info */
14001da177e4SLinus Torvalds 	void *journal_info;
14011da177e4SLinus Torvalds 
1402d89d8796SNeil Brown /* stacked block device info */
1403bddd87c7SAkinobu Mita 	struct bio_list *bio_list;
1404d89d8796SNeil Brown 
140573c10101SJens Axboe #ifdef CONFIG_BLOCK
140673c10101SJens Axboe /* stack plugging */
140773c10101SJens Axboe 	struct blk_plug *plug;
140873c10101SJens Axboe #endif
140973c10101SJens Axboe 
14101da177e4SLinus Torvalds /* VM state */
14111da177e4SLinus Torvalds 	struct reclaim_state *reclaim_state;
14121da177e4SLinus Torvalds 
14131da177e4SLinus Torvalds 	struct backing_dev_info *backing_dev_info;
14141da177e4SLinus Torvalds 
14151da177e4SLinus Torvalds 	struct io_context *io_context;
14161da177e4SLinus Torvalds 
14171da177e4SLinus Torvalds 	unsigned long ptrace_message;
14181da177e4SLinus Torvalds 	siginfo_t *last_siginfo; /* For ptrace use.  */
14197c3ab738SAndrew Morton 	struct task_io_accounting ioac;
14208f0ab514SJay Lan #if defined(CONFIG_TASK_XACCT)
14211da177e4SLinus Torvalds 	u64 acct_rss_mem1;	/* accumulated rss usage */
14221da177e4SLinus Torvalds 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
142349b5cf34SJonathan Lim 	cputime_t acct_timexpd;	/* stime + utime since last update */
14241da177e4SLinus Torvalds #endif
14251da177e4SLinus Torvalds #ifdef CONFIG_CPUSETS
142658568d2aSMiao Xie 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1427cc9a6c87SMel Gorman 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1428825a46afSPaul Jackson 	int cpuset_mem_spread_rotor;
14296adef3ebSJack Steiner 	int cpuset_slab_spread_rotor;
14301da177e4SLinus Torvalds #endif
1431ddbcc7e8SPaul Menage #ifdef CONFIG_CGROUPS
1432817929ecSPaul Menage 	/* Control Group info protected by css_set_lock */
14332c392b8cSArnd Bergmann 	struct css_set __rcu *cgroups;
1434817929ecSPaul Menage 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1435817929ecSPaul Menage 	struct list_head cg_list;
1436ddbcc7e8SPaul Menage #endif
143742b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX
14380771dfefSIngo Molnar 	struct robust_list_head __user *robust_list;
143934f192c6SIngo Molnar #ifdef CONFIG_COMPAT
144034f192c6SIngo Molnar 	struct compat_robust_list_head __user *compat_robust_list;
144134f192c6SIngo Molnar #endif
1442c87e2837SIngo Molnar 	struct list_head pi_state_list;
1443c87e2837SIngo Molnar 	struct futex_pi_state *pi_state_cache;
144442b2dd0aSAlexey Dobriyan #endif
1445cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
14468dc85d54SPeter Zijlstra 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1447cdd6c482SIngo Molnar 	struct mutex perf_event_mutex;
1448cdd6c482SIngo Molnar 	struct list_head perf_event_list;
1449a63eaf34SPaul Mackerras #endif
1450c7aceabaSRichard Kennedy #ifdef CONFIG_NUMA
145158568d2aSMiao Xie 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1452c7aceabaSRichard Kennedy 	short il_next;
1453207205a2SEric Dumazet 	short pref_node_fork;
1454c7aceabaSRichard Kennedy #endif
1455cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
1456cbee9f88SPeter Zijlstra 	int numa_scan_seq;
1457cbee9f88SPeter Zijlstra 	unsigned int numa_scan_period;
1458598f0ec0SMel Gorman 	unsigned int numa_scan_period_max;
1459de1c9ce6SRik van Riel 	int numa_preferred_nid;
14606b9a7460SMel Gorman 	unsigned long numa_migrate_retry;
1461cbee9f88SPeter Zijlstra 	u64 node_stamp;			/* migration stamp  */
1462*7e2703e6SRik van Riel 	u64 last_task_numa_placement;
1463*7e2703e6SRik van Riel 	u64 last_sum_exec_runtime;
1464cbee9f88SPeter Zijlstra 	struct callback_head numa_work;
1465f809ca9aSMel Gorman 
14668c8a743cSPeter Zijlstra 	struct list_head numa_entry;
14678c8a743cSPeter Zijlstra 	struct numa_group *numa_group;
14688c8a743cSPeter Zijlstra 
1469745d6147SMel Gorman 	/*
1470745d6147SMel Gorman 	 * Exponential decaying average of faults on a per-node basis.
1471745d6147SMel Gorman 	 * Scheduling placement decisions are made based on the these counts.
1472745d6147SMel Gorman 	 * The values remain static for the duration of a PTE scan
1473745d6147SMel Gorman 	 */
1474ff1df896SRik van Riel 	unsigned long *numa_faults_memory;
147583e1d2cdSMel Gorman 	unsigned long total_numa_faults;
1476745d6147SMel Gorman 
1477745d6147SMel Gorman 	/*
1478745d6147SMel Gorman 	 * numa_faults_buffer records faults per node during the current
1479ff1df896SRik van Riel 	 * scan window. When the scan completes, the counts in
1480ff1df896SRik van Riel 	 * numa_faults_memory decay and these values are copied.
1481745d6147SMel Gorman 	 */
1482ff1df896SRik van Riel 	unsigned long *numa_faults_buffer_memory;
1483745d6147SMel Gorman 
148404bb2f94SRik van Riel 	/*
148550ec8a40SRik van Riel 	 * Track the nodes the process was running on when a NUMA hinting
148650ec8a40SRik van Riel 	 * fault was incurred.
148750ec8a40SRik van Riel 	 */
148850ec8a40SRik van Riel 	unsigned long *numa_faults_cpu;
148950ec8a40SRik van Riel 	unsigned long *numa_faults_buffer_cpu;
149050ec8a40SRik van Riel 
149150ec8a40SRik van Riel 	/*
149204bb2f94SRik van Riel 	 * numa_faults_locality tracks if faults recorded during the last
149304bb2f94SRik van Riel 	 * scan window were remote/local. The task scan period is adapted
149404bb2f94SRik van Riel 	 * based on the locality of the faults with different weights
149504bb2f94SRik van Riel 	 * depending on whether they were shared or private faults
149604bb2f94SRik van Riel 	 */
149704bb2f94SRik van Riel 	unsigned long numa_faults_locality[2];
149804bb2f94SRik van Riel 
1499b32e86b4SIngo Molnar 	unsigned long numa_pages_migrated;
1500cbee9f88SPeter Zijlstra #endif /* CONFIG_NUMA_BALANCING */
1501cbee9f88SPeter Zijlstra 
1502e56d0903SIngo Molnar 	struct rcu_head rcu;
1503b92ce558SJens Axboe 
1504b92ce558SJens Axboe 	/*
1505b92ce558SJens Axboe 	 * cache last used pipe for splice
1506b92ce558SJens Axboe 	 */
1507b92ce558SJens Axboe 	struct pipe_inode_info *splice_pipe;
15085640f768SEric Dumazet 
15095640f768SEric Dumazet 	struct page_frag task_frag;
15105640f768SEric Dumazet 
1511ca74e92bSShailabh Nagar #ifdef	CONFIG_TASK_DELAY_ACCT
1512ca74e92bSShailabh Nagar 	struct task_delay_info *delays;
1513ca74e92bSShailabh Nagar #endif
1514f4f154fdSAkinobu Mita #ifdef CONFIG_FAULT_INJECTION
1515f4f154fdSAkinobu Mita 	int make_it_fail;
1516f4f154fdSAkinobu Mita #endif
15179d823e8fSWu Fengguang 	/*
15189d823e8fSWu Fengguang 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
15199d823e8fSWu Fengguang 	 * balance_dirty_pages() for some dirty throttling pause
15209d823e8fSWu Fengguang 	 */
15219d823e8fSWu Fengguang 	int nr_dirtied;
15229d823e8fSWu Fengguang 	int nr_dirtied_pause;
152383712358SWu Fengguang 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
15249d823e8fSWu Fengguang 
15259745512cSArjan van de Ven #ifdef CONFIG_LATENCYTOP
15269745512cSArjan van de Ven 	int latency_record_count;
15279745512cSArjan van de Ven 	struct latency_record latency_record[LT_SAVECOUNT];
15289745512cSArjan van de Ven #endif
15296976675dSArjan van de Ven 	/*
15306976675dSArjan van de Ven 	 * time slack values; these are used to round up poll() and
15316976675dSArjan van de Ven 	 * select() etc timeout values. These are in nanoseconds.
15326976675dSArjan van de Ven 	 */
15336976675dSArjan van de Ven 	unsigned long timer_slack_ns;
15346976675dSArjan van de Ven 	unsigned long default_timer_slack_ns;
1535f8d570a4SDavid Miller 
1536fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
15373ad2f3fbSDaniel Mack 	/* Index of current stored address in ret_stack */
1538f201ae23SFrederic Weisbecker 	int curr_ret_stack;
1539f201ae23SFrederic Weisbecker 	/* Stack of return addresses for return function tracing */
1540f201ae23SFrederic Weisbecker 	struct ftrace_ret_stack	*ret_stack;
15418aef2d28SSteven Rostedt 	/* time stamp for last schedule */
15428aef2d28SSteven Rostedt 	unsigned long long ftrace_timestamp;
1543f201ae23SFrederic Weisbecker 	/*
1544f201ae23SFrederic Weisbecker 	 * Number of functions that haven't been traced
1545f201ae23SFrederic Weisbecker 	 * because of depth overrun.
1546f201ae23SFrederic Weisbecker 	 */
1547f201ae23SFrederic Weisbecker 	atomic_t trace_overrun;
1548380c4b14SFrederic Weisbecker 	/* Pause for the tracing */
1549380c4b14SFrederic Weisbecker 	atomic_t tracing_graph_pause;
1550f201ae23SFrederic Weisbecker #endif
1551ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING
1552ea4e2bc4SSteven Rostedt 	/* state flags for use by tracers */
1553ea4e2bc4SSteven Rostedt 	unsigned long trace;
1554b1cff0adSSteven Rostedt 	/* bitmask and counter of trace recursion */
1555261842b7SSteven Rostedt 	unsigned long trace_recursion;
1556261842b7SSteven Rostedt #endif /* CONFIG_TRACING */
1557c255a458SAndrew Morton #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
1558569b846dSKAMEZAWA Hiroyuki 	struct memcg_batch_info {
1559569b846dSKAMEZAWA Hiroyuki 		int do_batch;	/* incremented when batch uncharge started */
1560569b846dSKAMEZAWA Hiroyuki 		struct mem_cgroup *memcg; /* target memcg of uncharge */
15617ffd4ca7SJohannes Weiner 		unsigned long nr_pages;	/* uncharged usage */
15627ffd4ca7SJohannes Weiner 		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1563569b846dSKAMEZAWA Hiroyuki 	} memcg_batch;
15640e9d92f2SGlauber Costa 	unsigned int memcg_kmem_skip_account;
1565519e5247SJohannes Weiner 	struct memcg_oom_info {
156649426420SJohannes Weiner 		struct mem_cgroup *memcg;
156749426420SJohannes Weiner 		gfp_t gfp_mask;
156849426420SJohannes Weiner 		int order;
1569519e5247SJohannes Weiner 		unsigned int may_oom:1;
1570519e5247SJohannes Weiner 	} memcg_oom;
1571569b846dSKAMEZAWA Hiroyuki #endif
15720326f5a9SSrikar Dronamraju #ifdef CONFIG_UPROBES
15730326f5a9SSrikar Dronamraju 	struct uprobe_task *utask;
15740326f5a9SSrikar Dronamraju #endif
1575cafe5635SKent Overstreet #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1576cafe5635SKent Overstreet 	unsigned int	sequential_io;
1577cafe5635SKent Overstreet 	unsigned int	sequential_io_avg;
1578cafe5635SKent Overstreet #endif
15791da177e4SLinus Torvalds };
15801da177e4SLinus Torvalds 
158176e6eee0SRusty Russell /* Future-safe accessor for struct task_struct's cpus_allowed. */
1582a4636818SRusty Russell #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
158376e6eee0SRusty Russell 
15846688cc05SPeter Zijlstra #define TNF_MIGRATED	0x01
15856688cc05SPeter Zijlstra #define TNF_NO_GROUP	0x02
1586dabe1d99SRik van Riel #define TNF_SHARED	0x04
158704bb2f94SRik van Riel #define TNF_FAULT_LOCAL	0x08
15886688cc05SPeter Zijlstra 
1589cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
15906688cc05SPeter Zijlstra extern void task_numa_fault(int last_node, int node, int pages, int flags);
1591e29cf08bSMel Gorman extern pid_t task_numa_group_id(struct task_struct *p);
15921a687c2eSMel Gorman extern void set_numabalancing_state(bool enabled);
159382727018SRik van Riel extern void task_numa_free(struct task_struct *p);
159410f39042SRik van Riel extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
159510f39042SRik van Riel 					int src_nid, int dst_cpu);
1596cbee9f88SPeter Zijlstra #else
1597ac8e895bSMel Gorman static inline void task_numa_fault(int last_node, int node, int pages,
15986688cc05SPeter Zijlstra 				   int flags)
1599cbee9f88SPeter Zijlstra {
1600cbee9f88SPeter Zijlstra }
1601e29cf08bSMel Gorman static inline pid_t task_numa_group_id(struct task_struct *p)
1602e29cf08bSMel Gorman {
1603e29cf08bSMel Gorman 	return 0;
1604e29cf08bSMel Gorman }
16051a687c2eSMel Gorman static inline void set_numabalancing_state(bool enabled)
16061a687c2eSMel Gorman {
16071a687c2eSMel Gorman }
160882727018SRik van Riel static inline void task_numa_free(struct task_struct *p)
160982727018SRik van Riel {
161082727018SRik van Riel }
161110f39042SRik van Riel static inline bool should_numa_migrate_memory(struct task_struct *p,
161210f39042SRik van Riel 				struct page *page, int src_nid, int dst_cpu)
161310f39042SRik van Riel {
161410f39042SRik van Riel 	return true;
161510f39042SRik van Riel }
1616cbee9f88SPeter Zijlstra #endif
1617cbee9f88SPeter Zijlstra 
1618e868171aSAlexey Dobriyan static inline struct pid *task_pid(struct task_struct *task)
161922c935f4SEric W. Biederman {
162022c935f4SEric W. Biederman 	return task->pids[PIDTYPE_PID].pid;
162122c935f4SEric W. Biederman }
162222c935f4SEric W. Biederman 
1623e868171aSAlexey Dobriyan static inline struct pid *task_tgid(struct task_struct *task)
162422c935f4SEric W. Biederman {
162522c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PID].pid;
162622c935f4SEric W. Biederman }
162722c935f4SEric W. Biederman 
16286dda81f4SOleg Nesterov /*
16296dda81f4SOleg Nesterov  * Without tasklist or rcu lock it is not safe to dereference
16306dda81f4SOleg Nesterov  * the result of task_pgrp/task_session even if task == current,
16316dda81f4SOleg Nesterov  * we can race with another thread doing sys_setsid/sys_setpgid.
16326dda81f4SOleg Nesterov  */
1633e868171aSAlexey Dobriyan static inline struct pid *task_pgrp(struct task_struct *task)
163422c935f4SEric W. Biederman {
163522c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PGID].pid;
163622c935f4SEric W. Biederman }
163722c935f4SEric W. Biederman 
1638e868171aSAlexey Dobriyan static inline struct pid *task_session(struct task_struct *task)
163922c935f4SEric W. Biederman {
164022c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_SID].pid;
164122c935f4SEric W. Biederman }
164222c935f4SEric W. Biederman 
16437af57294SPavel Emelyanov struct pid_namespace;
16447af57294SPavel Emelyanov 
16457af57294SPavel Emelyanov /*
16467af57294SPavel Emelyanov  * the helpers to get the task's different pids as they are seen
16477af57294SPavel Emelyanov  * from various namespaces
16487af57294SPavel Emelyanov  *
16497af57294SPavel Emelyanov  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
165044c4e1b2SEric W. Biederman  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
165144c4e1b2SEric W. Biederman  *                     current.
16527af57294SPavel Emelyanov  * task_xid_nr_ns()  : id seen from the ns specified;
16537af57294SPavel Emelyanov  *
16547af57294SPavel Emelyanov  * set_task_vxid()   : assigns a virtual id to a task;
16557af57294SPavel Emelyanov  *
16567af57294SPavel Emelyanov  * see also pid_nr() etc in include/linux/pid.h
16577af57294SPavel Emelyanov  */
165852ee2dfdSOleg Nesterov pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
165952ee2dfdSOleg Nesterov 			struct pid_namespace *ns);
16607af57294SPavel Emelyanov 
1661e868171aSAlexey Dobriyan static inline pid_t task_pid_nr(struct task_struct *tsk)
16627af57294SPavel Emelyanov {
16637af57294SPavel Emelyanov 	return tsk->pid;
16647af57294SPavel Emelyanov }
16657af57294SPavel Emelyanov 
166652ee2dfdSOleg Nesterov static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
166752ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
166852ee2dfdSOleg Nesterov {
166952ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
167052ee2dfdSOleg Nesterov }
16717af57294SPavel Emelyanov 
16727af57294SPavel Emelyanov static inline pid_t task_pid_vnr(struct task_struct *tsk)
16737af57294SPavel Emelyanov {
167452ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
16757af57294SPavel Emelyanov }
16767af57294SPavel Emelyanov 
16777af57294SPavel Emelyanov 
1678e868171aSAlexey Dobriyan static inline pid_t task_tgid_nr(struct task_struct *tsk)
16797af57294SPavel Emelyanov {
16807af57294SPavel Emelyanov 	return tsk->tgid;
16817af57294SPavel Emelyanov }
16827af57294SPavel Emelyanov 
16832f2a3a46SPavel Emelyanov pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
16847af57294SPavel Emelyanov 
16857af57294SPavel Emelyanov static inline pid_t task_tgid_vnr(struct task_struct *tsk)
16867af57294SPavel Emelyanov {
16877af57294SPavel Emelyanov 	return pid_vnr(task_tgid(tsk));
16887af57294SPavel Emelyanov }
16897af57294SPavel Emelyanov 
16907af57294SPavel Emelyanov 
169152ee2dfdSOleg Nesterov static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
169252ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
16937af57294SPavel Emelyanov {
169452ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
16957af57294SPavel Emelyanov }
16967af57294SPavel Emelyanov 
16977af57294SPavel Emelyanov static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
16987af57294SPavel Emelyanov {
169952ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
17007af57294SPavel Emelyanov }
17017af57294SPavel Emelyanov 
17027af57294SPavel Emelyanov 
170352ee2dfdSOleg Nesterov static inline pid_t task_session_nr_ns(struct task_struct *tsk,
170452ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
17057af57294SPavel Emelyanov {
170652ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
17077af57294SPavel Emelyanov }
17087af57294SPavel Emelyanov 
17097af57294SPavel Emelyanov static inline pid_t task_session_vnr(struct task_struct *tsk)
17107af57294SPavel Emelyanov {
171152ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
17127af57294SPavel Emelyanov }
17137af57294SPavel Emelyanov 
17141b0f7ffdSOleg Nesterov /* obsolete, do not use */
17151b0f7ffdSOleg Nesterov static inline pid_t task_pgrp_nr(struct task_struct *tsk)
17161b0f7ffdSOleg Nesterov {
17171b0f7ffdSOleg Nesterov 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
17181b0f7ffdSOleg Nesterov }
17197af57294SPavel Emelyanov 
17201da177e4SLinus Torvalds /**
17211da177e4SLinus Torvalds  * pid_alive - check that a task structure is not stale
17221da177e4SLinus Torvalds  * @p: Task structure to be checked.
17231da177e4SLinus Torvalds  *
17241da177e4SLinus Torvalds  * Test if a process is not yet dead (at most zombie state)
17251da177e4SLinus Torvalds  * If pid_alive fails, then pointers within the task structure
17261da177e4SLinus Torvalds  * can be stale and must not be dereferenced.
1727e69f6186SYacine Belkadi  *
1728e69f6186SYacine Belkadi  * Return: 1 if the process is alive. 0 otherwise.
17291da177e4SLinus Torvalds  */
1730e868171aSAlexey Dobriyan static inline int pid_alive(struct task_struct *p)
17311da177e4SLinus Torvalds {
173292476d7fSEric W. Biederman 	return p->pids[PIDTYPE_PID].pid != NULL;
17331da177e4SLinus Torvalds }
17341da177e4SLinus Torvalds 
1735f400e198SSukadev Bhattiprolu /**
1736b460cbc5SSerge E. Hallyn  * is_global_init - check if a task structure is init
17373260259fSHenne  * @tsk: Task structure to be checked.
17383260259fSHenne  *
17393260259fSHenne  * Check if a task structure is the first user space task the kernel created.
1740e69f6186SYacine Belkadi  *
1741e69f6186SYacine Belkadi  * Return: 1 if the task structure is init. 0 otherwise.
1742f400e198SSukadev Bhattiprolu  */
1743e868171aSAlexey Dobriyan static inline int is_global_init(struct task_struct *tsk)
1744b461cc03SPavel Emelyanov {
1745b461cc03SPavel Emelyanov 	return tsk->pid == 1;
1746b461cc03SPavel Emelyanov }
1747b460cbc5SSerge E. Hallyn 
17489ec52099SCedric Le Goater extern struct pid *cad_pid;
17499ec52099SCedric Le Goater 
17501da177e4SLinus Torvalds extern void free_task(struct task_struct *tsk);
17511da177e4SLinus Torvalds #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1752e56d0903SIngo Molnar 
1753158d9ebdSAndrew Morton extern void __put_task_struct(struct task_struct *t);
1754e56d0903SIngo Molnar 
1755e56d0903SIngo Molnar static inline void put_task_struct(struct task_struct *t)
1756e56d0903SIngo Molnar {
1757e56d0903SIngo Molnar 	if (atomic_dec_and_test(&t->usage))
17588c7904a0SEric W. Biederman 		__put_task_struct(t);
1759e56d0903SIngo Molnar }
17601da177e4SLinus Torvalds 
17616a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
17626a61671bSFrederic Weisbecker extern void task_cputime(struct task_struct *t,
17636a61671bSFrederic Weisbecker 			 cputime_t *utime, cputime_t *stime);
17646a61671bSFrederic Weisbecker extern void task_cputime_scaled(struct task_struct *t,
17656a61671bSFrederic Weisbecker 				cputime_t *utimescaled, cputime_t *stimescaled);
17666a61671bSFrederic Weisbecker extern cputime_t task_gtime(struct task_struct *t);
17676a61671bSFrederic Weisbecker #else
17686fac4829SFrederic Weisbecker static inline void task_cputime(struct task_struct *t,
17696fac4829SFrederic Weisbecker 				cputime_t *utime, cputime_t *stime)
17706fac4829SFrederic Weisbecker {
17716fac4829SFrederic Weisbecker 	if (utime)
17726fac4829SFrederic Weisbecker 		*utime = t->utime;
17736fac4829SFrederic Weisbecker 	if (stime)
17746fac4829SFrederic Weisbecker 		*stime = t->stime;
17756fac4829SFrederic Weisbecker }
17766fac4829SFrederic Weisbecker 
17776fac4829SFrederic Weisbecker static inline void task_cputime_scaled(struct task_struct *t,
17786fac4829SFrederic Weisbecker 				       cputime_t *utimescaled,
17796fac4829SFrederic Weisbecker 				       cputime_t *stimescaled)
17806fac4829SFrederic Weisbecker {
17816fac4829SFrederic Weisbecker 	if (utimescaled)
17826fac4829SFrederic Weisbecker 		*utimescaled = t->utimescaled;
17836fac4829SFrederic Weisbecker 	if (stimescaled)
17846fac4829SFrederic Weisbecker 		*stimescaled = t->stimescaled;
17856fac4829SFrederic Weisbecker }
17866a61671bSFrederic Weisbecker 
17876a61671bSFrederic Weisbecker static inline cputime_t task_gtime(struct task_struct *t)
17886a61671bSFrederic Weisbecker {
17896a61671bSFrederic Weisbecker 	return t->gtime;
17906a61671bSFrederic Weisbecker }
17916a61671bSFrederic Weisbecker #endif
1792e80d0a1aSFrederic Weisbecker extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1793e80d0a1aSFrederic Weisbecker extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
179449048622SBalbir Singh 
17951da177e4SLinus Torvalds /*
17961da177e4SLinus Torvalds  * Per process flags
17971da177e4SLinus Torvalds  */
17981da177e4SLinus Torvalds #define PF_EXITING	0x00000004	/* getting shut down */
1799778e9a9cSAlexey Kuznetsov #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
180094886b84SLaurent Vivier #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
180121aa9af0STejun Heo #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
18021da177e4SLinus Torvalds #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
18034db96cf0SAndi Kleen #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
18041da177e4SLinus Torvalds #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
18051da177e4SLinus Torvalds #define PF_DUMPCORE	0x00000200	/* dumped core */
18061da177e4SLinus Torvalds #define PF_SIGNALED	0x00000400	/* killed by a signal */
18071da177e4SLinus Torvalds #define PF_MEMALLOC	0x00000800	/* Allocating memory */
180872fa5997SVasiliy Kulikov #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
18091da177e4SLinus Torvalds #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1810774a1221STejun Heo #define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
18111da177e4SLinus Torvalds #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
18121da177e4SLinus Torvalds #define PF_FROZEN	0x00010000	/* frozen for system suspend */
18131da177e4SLinus Torvalds #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
18141da177e4SLinus Torvalds #define PF_KSWAPD	0x00040000	/* I am kswapd */
181521caf2fcSMing Lei #define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
18161da177e4SLinus Torvalds #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1817246bb0b1SOleg Nesterov #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1818b31dc66aSJens Axboe #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1819b31dc66aSJens Axboe #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1820b31dc66aSJens Axboe #define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1821b31dc66aSJens Axboe #define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
182214a40ffcSTejun Heo #define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
18234db96cf0SAndi Kleen #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1824c61afb18SPaul Jackson #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
182561a87122SThomas Gleixner #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
182658a69cb4STejun Heo #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
18272b44c4dbSColin Cross #define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
18281da177e4SLinus Torvalds 
18291da177e4SLinus Torvalds /*
18301da177e4SLinus Torvalds  * Only the _current_ task can read/write to tsk->flags, but other
18311da177e4SLinus Torvalds  * tasks can access tsk->flags in readonly mode for example
18321da177e4SLinus Torvalds  * with tsk_used_math (like during threaded core dumping).
18331da177e4SLinus Torvalds  * There is however an exception to this rule during ptrace
18341da177e4SLinus Torvalds  * or during fork: the ptracer task is allowed to write to the
18351da177e4SLinus Torvalds  * child->flags of its traced child (same goes for fork, the parent
18361da177e4SLinus Torvalds  * can write to the child->flags), because we're guaranteed the
18371da177e4SLinus Torvalds  * child is not running and in turn not changing child->flags
18381da177e4SLinus Torvalds  * at the same time the parent does it.
18391da177e4SLinus Torvalds  */
18401da177e4SLinus Torvalds #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
18411da177e4SLinus Torvalds #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
18421da177e4SLinus Torvalds #define clear_used_math() clear_stopped_child_used_math(current)
18431da177e4SLinus Torvalds #define set_used_math() set_stopped_child_used_math(current)
18441da177e4SLinus Torvalds #define conditional_stopped_child_used_math(condition, child) \
18451da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
18461da177e4SLinus Torvalds #define conditional_used_math(condition) \
18471da177e4SLinus Torvalds 	conditional_stopped_child_used_math(condition, current)
18481da177e4SLinus Torvalds #define copy_to_stopped_child_used_math(child) \
18491da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
18501da177e4SLinus Torvalds /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
18511da177e4SLinus Torvalds #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
18521da177e4SLinus Torvalds #define used_math() tsk_used_math(current)
18531da177e4SLinus Torvalds 
185421caf2fcSMing Lei /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
185521caf2fcSMing Lei static inline gfp_t memalloc_noio_flags(gfp_t flags)
185621caf2fcSMing Lei {
185721caf2fcSMing Lei 	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
185821caf2fcSMing Lei 		flags &= ~__GFP_IO;
185921caf2fcSMing Lei 	return flags;
186021caf2fcSMing Lei }
186121caf2fcSMing Lei 
186221caf2fcSMing Lei static inline unsigned int memalloc_noio_save(void)
186321caf2fcSMing Lei {
186421caf2fcSMing Lei 	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
186521caf2fcSMing Lei 	current->flags |= PF_MEMALLOC_NOIO;
186621caf2fcSMing Lei 	return flags;
186721caf2fcSMing Lei }
186821caf2fcSMing Lei 
186921caf2fcSMing Lei static inline void memalloc_noio_restore(unsigned int flags)
187021caf2fcSMing Lei {
187121caf2fcSMing Lei 	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
187221caf2fcSMing Lei }
187321caf2fcSMing Lei 
1874e5c1902eSTejun Heo /*
1875a8f072c1STejun Heo  * task->jobctl flags
1876e5c1902eSTejun Heo  */
1877a8f072c1STejun Heo #define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
1878e5c1902eSTejun Heo 
1879a8f072c1STejun Heo #define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
1880a8f072c1STejun Heo #define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
1881a8f072c1STejun Heo #define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
188273ddff2bSTejun Heo #define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
1883fb1d910cSTejun Heo #define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
1884a8f072c1STejun Heo #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
1885544b2c91STejun Heo #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
1886a8f072c1STejun Heo 
1887a8f072c1STejun Heo #define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT)
1888a8f072c1STejun Heo #define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT)
1889a8f072c1STejun Heo #define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT)
189073ddff2bSTejun Heo #define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT)
1891fb1d910cSTejun Heo #define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT)
1892a8f072c1STejun Heo #define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT)
1893544b2c91STejun Heo #define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT)
1894a8f072c1STejun Heo 
1895fb1d910cSTejun Heo #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
189673ddff2bSTejun Heo #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
18973759a0d9STejun Heo 
18987dd3db54STejun Heo extern bool task_set_jobctl_pending(struct task_struct *task,
18997dd3db54STejun Heo 				    unsigned int mask);
190073ddff2bSTejun Heo extern void task_clear_jobctl_trapping(struct task_struct *task);
19013759a0d9STejun Heo extern void task_clear_jobctl_pending(struct task_struct *task,
19023759a0d9STejun Heo 				      unsigned int mask);
190339efa3efSTejun Heo 
1904a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
1905f41d911fSPaul E. McKenney 
1906f41d911fSPaul E. McKenney #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
19071aa03f11SPaul E. McKenney #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1908f41d911fSPaul E. McKenney 
1909f41d911fSPaul E. McKenney static inline void rcu_copy_process(struct task_struct *p)
1910f41d911fSPaul E. McKenney {
1911f41d911fSPaul E. McKenney 	p->rcu_read_lock_nesting = 0;
1912f41d911fSPaul E. McKenney 	p->rcu_read_unlock_special = 0;
1913a57eb940SPaul E. McKenney #ifdef CONFIG_TREE_PREEMPT_RCU
1914dd5d19baSPaul E. McKenney 	p->rcu_blocked_node = NULL;
191524278d14SPaul E. McKenney #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
191624278d14SPaul E. McKenney #ifdef CONFIG_RCU_BOOST
191724278d14SPaul E. McKenney 	p->rcu_boost_mutex = NULL;
191824278d14SPaul E. McKenney #endif /* #ifdef CONFIG_RCU_BOOST */
1919f41d911fSPaul E. McKenney 	INIT_LIST_HEAD(&p->rcu_node_entry);
1920f41d911fSPaul E. McKenney }
1921f41d911fSPaul E. McKenney 
1922f41d911fSPaul E. McKenney #else
1923f41d911fSPaul E. McKenney 
1924f41d911fSPaul E. McKenney static inline void rcu_copy_process(struct task_struct *p)
1925f41d911fSPaul E. McKenney {
1926f41d911fSPaul E. McKenney }
1927f41d911fSPaul E. McKenney 
1928f41d911fSPaul E. McKenney #endif
1929f41d911fSPaul E. McKenney 
1930907aed48SMel Gorman static inline void tsk_restore_flags(struct task_struct *task,
1931907aed48SMel Gorman 				unsigned long orig_flags, unsigned long flags)
1932907aed48SMel Gorman {
1933907aed48SMel Gorman 	task->flags &= ~flags;
1934907aed48SMel Gorman 	task->flags |= orig_flags & flags;
1935907aed48SMel Gorman }
1936907aed48SMel Gorman 
19371da177e4SLinus Torvalds #ifdef CONFIG_SMP
19381e1b6c51SKOSAKI Motohiro extern void do_set_cpus_allowed(struct task_struct *p,
19391e1b6c51SKOSAKI Motohiro 			       const struct cpumask *new_mask);
19401e1b6c51SKOSAKI Motohiro 
1941cd8ba7cdSMike Travis extern int set_cpus_allowed_ptr(struct task_struct *p,
194296f874e2SRusty Russell 				const struct cpumask *new_mask);
19431da177e4SLinus Torvalds #else
19441e1b6c51SKOSAKI Motohiro static inline void do_set_cpus_allowed(struct task_struct *p,
19451e1b6c51SKOSAKI Motohiro 				      const struct cpumask *new_mask)
19461e1b6c51SKOSAKI Motohiro {
19471e1b6c51SKOSAKI Motohiro }
1948cd8ba7cdSMike Travis static inline int set_cpus_allowed_ptr(struct task_struct *p,
194996f874e2SRusty Russell 				       const struct cpumask *new_mask)
19501da177e4SLinus Torvalds {
195196f874e2SRusty Russell 	if (!cpumask_test_cpu(0, new_mask))
19521da177e4SLinus Torvalds 		return -EINVAL;
19531da177e4SLinus Torvalds 	return 0;
19541da177e4SLinus Torvalds }
19551da177e4SLinus Torvalds #endif
1956e0ad9556SRusty Russell 
19573451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON
19585167e8d5SPeter Zijlstra void calc_load_enter_idle(void);
19595167e8d5SPeter Zijlstra void calc_load_exit_idle(void);
19605167e8d5SPeter Zijlstra #else
19615167e8d5SPeter Zijlstra static inline void calc_load_enter_idle(void) { }
19625167e8d5SPeter Zijlstra static inline void calc_load_exit_idle(void) { }
19633451d024SFrederic Weisbecker #endif /* CONFIG_NO_HZ_COMMON */
19645167e8d5SPeter Zijlstra 
1965e0ad9556SRusty Russell #ifndef CONFIG_CPUMASK_OFFSTACK
1966cd8ba7cdSMike Travis static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1967cd8ba7cdSMike Travis {
1968cd8ba7cdSMike Travis 	return set_cpus_allowed_ptr(p, &new_mask);
1969cd8ba7cdSMike Travis }
1970e0ad9556SRusty Russell #endif
19711da177e4SLinus Torvalds 
1972b342501cSIngo Molnar /*
1973c676329aSPeter Zijlstra  * Do not use outside of architecture code which knows its limitations.
1974c676329aSPeter Zijlstra  *
1975c676329aSPeter Zijlstra  * sched_clock() has no promise of monotonicity or bounded drift between
1976c676329aSPeter Zijlstra  * CPUs, use (which you should not) requires disabling IRQs.
1977c676329aSPeter Zijlstra  *
1978c676329aSPeter Zijlstra  * Please use one of the three interfaces below.
1979b342501cSIngo Molnar  */
19801bbfa6f2SMike Frysinger extern unsigned long long notrace sched_clock(void);
1981c676329aSPeter Zijlstra /*
1982489a71b0SHiroshi Shimamoto  * See the comment in kernel/sched/clock.c
1983c676329aSPeter Zijlstra  */
1984c676329aSPeter Zijlstra extern u64 cpu_clock(int cpu);
1985c676329aSPeter Zijlstra extern u64 local_clock(void);
1986c676329aSPeter Zijlstra extern u64 sched_clock_cpu(int cpu);
1987c676329aSPeter Zijlstra 
1988e436d800SIngo Molnar 
1989c1955a3dSPeter Zijlstra extern void sched_clock_init(void);
1990c1955a3dSPeter Zijlstra 
19913e51f33fSPeter Zijlstra #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
19923e51f33fSPeter Zijlstra static inline void sched_clock_tick(void)
19933e51f33fSPeter Zijlstra {
19943e51f33fSPeter Zijlstra }
19953e51f33fSPeter Zijlstra 
19963e51f33fSPeter Zijlstra static inline void sched_clock_idle_sleep_event(void)
19973e51f33fSPeter Zijlstra {
19983e51f33fSPeter Zijlstra }
19993e51f33fSPeter Zijlstra 
20003e51f33fSPeter Zijlstra static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
20013e51f33fSPeter Zijlstra {
20023e51f33fSPeter Zijlstra }
20033e51f33fSPeter Zijlstra #else
2004c676329aSPeter Zijlstra /*
2005c676329aSPeter Zijlstra  * Architectures can set this to 1 if they have specified
2006c676329aSPeter Zijlstra  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
2007c676329aSPeter Zijlstra  * but then during bootup it turns out that sched_clock()
2008c676329aSPeter Zijlstra  * is reliable after all:
2009c676329aSPeter Zijlstra  */
201035af99e6SPeter Zijlstra extern int sched_clock_stable(void);
201135af99e6SPeter Zijlstra extern void set_sched_clock_stable(void);
201235af99e6SPeter Zijlstra extern void clear_sched_clock_stable(void);
2013c676329aSPeter Zijlstra 
20143e51f33fSPeter Zijlstra extern void sched_clock_tick(void);
20153e51f33fSPeter Zijlstra extern void sched_clock_idle_sleep_event(void);
20163e51f33fSPeter Zijlstra extern void sched_clock_idle_wakeup_event(u64 delta_ns);
20173e51f33fSPeter Zijlstra #endif
20183e51f33fSPeter Zijlstra 
2019b52bfee4SVenkatesh Pallipadi #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2020b52bfee4SVenkatesh Pallipadi /*
2021b52bfee4SVenkatesh Pallipadi  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2022b52bfee4SVenkatesh Pallipadi  * The reason for this explicit opt-in is not to have perf penalty with
2023b52bfee4SVenkatesh Pallipadi  * slow sched_clocks.
2024b52bfee4SVenkatesh Pallipadi  */
2025b52bfee4SVenkatesh Pallipadi extern void enable_sched_clock_irqtime(void);
2026b52bfee4SVenkatesh Pallipadi extern void disable_sched_clock_irqtime(void);
2027b52bfee4SVenkatesh Pallipadi #else
2028b52bfee4SVenkatesh Pallipadi static inline void enable_sched_clock_irqtime(void) {}
2029b52bfee4SVenkatesh Pallipadi static inline void disable_sched_clock_irqtime(void) {}
2030b52bfee4SVenkatesh Pallipadi #endif
2031b52bfee4SVenkatesh Pallipadi 
203236c8b586SIngo Molnar extern unsigned long long
203341b86e9cSIngo Molnar task_sched_runtime(struct task_struct *task);
20341da177e4SLinus Torvalds 
20351da177e4SLinus Torvalds /* sched_exec is called by processes performing an exec */
20361da177e4SLinus Torvalds #ifdef CONFIG_SMP
20371da177e4SLinus Torvalds extern void sched_exec(void);
20381da177e4SLinus Torvalds #else
20391da177e4SLinus Torvalds #define sched_exec()   {}
20401da177e4SLinus Torvalds #endif
20411da177e4SLinus Torvalds 
20422aa44d05SIngo Molnar extern void sched_clock_idle_sleep_event(void);
20432aa44d05SIngo Molnar extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2044bb29ab26SIngo Molnar 
20451da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU
20461da177e4SLinus Torvalds extern void idle_task_exit(void);
20471da177e4SLinus Torvalds #else
20481da177e4SLinus Torvalds static inline void idle_task_exit(void) {}
20491da177e4SLinus Torvalds #endif
20501da177e4SLinus Torvalds 
20513451d024SFrederic Weisbecker #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
20521c20091eSFrederic Weisbecker extern void wake_up_nohz_cpu(int cpu);
205306d8308cSThomas Gleixner #else
20541c20091eSFrederic Weisbecker static inline void wake_up_nohz_cpu(int cpu) { }
205506d8308cSThomas Gleixner #endif
205606d8308cSThomas Gleixner 
2057ce831b38SFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL
2058ce831b38SFrederic Weisbecker extern bool sched_can_stop_tick(void);
2059265f22a9SFrederic Weisbecker extern u64 scheduler_tick_max_deferment(void);
2060ce831b38SFrederic Weisbecker #else
2061ce831b38SFrederic Weisbecker static inline bool sched_can_stop_tick(void) { return false; }
2062bf0f6f24SIngo Molnar #endif
2063bf0f6f24SIngo Molnar 
20645091faa4SMike Galbraith #ifdef CONFIG_SCHED_AUTOGROUP
20655091faa4SMike Galbraith extern void sched_autogroup_create_attach(struct task_struct *p);
20665091faa4SMike Galbraith extern void sched_autogroup_detach(struct task_struct *p);
20675091faa4SMike Galbraith extern void sched_autogroup_fork(struct signal_struct *sig);
20685091faa4SMike Galbraith extern void sched_autogroup_exit(struct signal_struct *sig);
20695091faa4SMike Galbraith #ifdef CONFIG_PROC_FS
20705091faa4SMike Galbraith extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
20712e5b5b3aSHiroshi Shimamoto extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
20725091faa4SMike Galbraith #endif
20735091faa4SMike Galbraith #else
20745091faa4SMike Galbraith static inline void sched_autogroup_create_attach(struct task_struct *p) { }
20755091faa4SMike Galbraith static inline void sched_autogroup_detach(struct task_struct *p) { }
20765091faa4SMike Galbraith static inline void sched_autogroup_fork(struct signal_struct *sig) { }
20775091faa4SMike Galbraith static inline void sched_autogroup_exit(struct signal_struct *sig) { }
20785091faa4SMike Galbraith #endif
20795091faa4SMike Galbraith 
2080d95f4122SMike Galbraith extern bool yield_to(struct task_struct *p, bool preempt);
208136c8b586SIngo Molnar extern void set_user_nice(struct task_struct *p, long nice);
208236c8b586SIngo Molnar extern int task_prio(const struct task_struct *p);
208336c8b586SIngo Molnar extern int task_nice(const struct task_struct *p);
208436c8b586SIngo Molnar extern int can_nice(const struct task_struct *p, const int nice);
208536c8b586SIngo Molnar extern int task_curr(const struct task_struct *p);
20861da177e4SLinus Torvalds extern int idle_cpu(int cpu);
2087fe7de49fSKOSAKI Motohiro extern int sched_setscheduler(struct task_struct *, int,
2088fe7de49fSKOSAKI Motohiro 			      const struct sched_param *);
2089961ccdddSRusty Russell extern int sched_setscheduler_nocheck(struct task_struct *, int,
2090fe7de49fSKOSAKI Motohiro 				      const struct sched_param *);
2091d50dde5aSDario Faggioli extern int sched_setattr(struct task_struct *,
2092d50dde5aSDario Faggioli 			 const struct sched_attr *);
209336c8b586SIngo Molnar extern struct task_struct *idle_task(int cpu);
2094c4f30608SPaul E. McKenney /**
2095c4f30608SPaul E. McKenney  * is_idle_task - is the specified task an idle task?
2096fa757281SRandy Dunlap  * @p: the task in question.
2097e69f6186SYacine Belkadi  *
2098e69f6186SYacine Belkadi  * Return: 1 if @p is an idle task. 0 otherwise.
2099c4f30608SPaul E. McKenney  */
21007061ca3bSPaul E. McKenney static inline bool is_idle_task(const struct task_struct *p)
2101c4f30608SPaul E. McKenney {
2102c4f30608SPaul E. McKenney 	return p->pid == 0;
2103c4f30608SPaul E. McKenney }
210436c8b586SIngo Molnar extern struct task_struct *curr_task(int cpu);
210536c8b586SIngo Molnar extern void set_curr_task(int cpu, struct task_struct *p);
21061da177e4SLinus Torvalds 
21071da177e4SLinus Torvalds void yield(void);
21081da177e4SLinus Torvalds 
21091da177e4SLinus Torvalds /*
21101da177e4SLinus Torvalds  * The default (Linux) execution domain.
21111da177e4SLinus Torvalds  */
21121da177e4SLinus Torvalds extern struct exec_domain	default_exec_domain;
21131da177e4SLinus Torvalds 
21141da177e4SLinus Torvalds union thread_union {
21151da177e4SLinus Torvalds 	struct thread_info thread_info;
21161da177e4SLinus Torvalds 	unsigned long stack[THREAD_SIZE/sizeof(long)];
21171da177e4SLinus Torvalds };
21181da177e4SLinus Torvalds 
21191da177e4SLinus Torvalds #ifndef __HAVE_ARCH_KSTACK_END
21201da177e4SLinus Torvalds static inline int kstack_end(void *addr)
21211da177e4SLinus Torvalds {
21221da177e4SLinus Torvalds 	/* Reliable end of stack detection:
21231da177e4SLinus Torvalds 	 * Some APM bios versions misalign the stack
21241da177e4SLinus Torvalds 	 */
21251da177e4SLinus Torvalds 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
21261da177e4SLinus Torvalds }
21271da177e4SLinus Torvalds #endif
21281da177e4SLinus Torvalds 
21291da177e4SLinus Torvalds extern union thread_union init_thread_union;
21301da177e4SLinus Torvalds extern struct task_struct init_task;
21311da177e4SLinus Torvalds 
21321da177e4SLinus Torvalds extern struct   mm_struct init_mm;
21331da177e4SLinus Torvalds 
2134198fe21bSPavel Emelyanov extern struct pid_namespace init_pid_ns;
2135198fe21bSPavel Emelyanov 
2136198fe21bSPavel Emelyanov /*
2137198fe21bSPavel Emelyanov  * find a task by one of its numerical ids
2138198fe21bSPavel Emelyanov  *
2139198fe21bSPavel Emelyanov  * find_task_by_pid_ns():
2140198fe21bSPavel Emelyanov  *      finds a task by its pid in the specified namespace
2141228ebcbeSPavel Emelyanov  * find_task_by_vpid():
2142228ebcbeSPavel Emelyanov  *      finds a task by its virtual pid
2143198fe21bSPavel Emelyanov  *
2144e49859e7SPavel Emelyanov  * see also find_vpid() etc in include/linux/pid.h
2145198fe21bSPavel Emelyanov  */
2146198fe21bSPavel Emelyanov 
2147228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_vpid(pid_t nr);
2148228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2149228ebcbeSPavel Emelyanov 		struct pid_namespace *ns);
2150198fe21bSPavel Emelyanov 
21511da177e4SLinus Torvalds /* per-UID process charging. */
21527b44ab97SEric W. Biederman extern struct user_struct * alloc_uid(kuid_t);
21531da177e4SLinus Torvalds static inline struct user_struct *get_uid(struct user_struct *u)
21541da177e4SLinus Torvalds {
21551da177e4SLinus Torvalds 	atomic_inc(&u->__count);
21561da177e4SLinus Torvalds 	return u;
21571da177e4SLinus Torvalds }
21581da177e4SLinus Torvalds extern void free_uid(struct user_struct *);
21591da177e4SLinus Torvalds 
21601da177e4SLinus Torvalds #include <asm/current.h>
21611da177e4SLinus Torvalds 
2162f0af911aSTorben Hohn extern void xtime_update(unsigned long ticks);
21631da177e4SLinus Torvalds 
2164b3c97528SHarvey Harrison extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2165b3c97528SHarvey Harrison extern int wake_up_process(struct task_struct *tsk);
21663e51e3edSSamir Bellabes extern void wake_up_new_task(struct task_struct *tsk);
21671da177e4SLinus Torvalds #ifdef CONFIG_SMP
21681da177e4SLinus Torvalds  extern void kick_process(struct task_struct *tsk);
21691da177e4SLinus Torvalds #else
21701da177e4SLinus Torvalds  static inline void kick_process(struct task_struct *tsk) { }
21711da177e4SLinus Torvalds #endif
2172aab03e05SDario Faggioli extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2173ad46c2c4SIngo Molnar extern void sched_dead(struct task_struct *p);
21741da177e4SLinus Torvalds 
21751da177e4SLinus Torvalds extern void proc_caches_init(void);
21761da177e4SLinus Torvalds extern void flush_signals(struct task_struct *);
21773bcac026SDavid Howells extern void __flush_signals(struct task_struct *);
217810ab825bSOleg Nesterov extern void ignore_signals(struct task_struct *);
21791da177e4SLinus Torvalds extern void flush_signal_handlers(struct task_struct *, int force_default);
21801da177e4SLinus Torvalds extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
21811da177e4SLinus Torvalds 
21821da177e4SLinus Torvalds static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
21831da177e4SLinus Torvalds {
21841da177e4SLinus Torvalds 	unsigned long flags;
21851da177e4SLinus Torvalds 	int ret;
21861da177e4SLinus Torvalds 
21871da177e4SLinus Torvalds 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
21881da177e4SLinus Torvalds 	ret = dequeue_signal(tsk, mask, info);
21891da177e4SLinus Torvalds 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
21901da177e4SLinus Torvalds 
21911da177e4SLinus Torvalds 	return ret;
21921da177e4SLinus Torvalds }
21931da177e4SLinus Torvalds 
21941da177e4SLinus Torvalds extern void block_all_signals(int (*notifier)(void *priv), void *priv,
21951da177e4SLinus Torvalds 			      sigset_t *mask);
21961da177e4SLinus Torvalds extern void unblock_all_signals(void);
21971da177e4SLinus Torvalds extern void release_task(struct task_struct * p);
21981da177e4SLinus Torvalds extern int send_sig_info(int, struct siginfo *, struct task_struct *);
21991da177e4SLinus Torvalds extern int force_sigsegv(int, struct task_struct *);
22001da177e4SLinus Torvalds extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2201c4b92fc1SEric W. Biederman extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2202c4b92fc1SEric W. Biederman extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2203d178bc3aSSerge Hallyn extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2204d178bc3aSSerge Hallyn 				const struct cred *, u32);
2205c4b92fc1SEric W. Biederman extern int kill_pgrp(struct pid *pid, int sig, int priv);
2206c4b92fc1SEric W. Biederman extern int kill_pid(struct pid *pid, int sig, int priv);
2207c3de4b38SMatthew Wilcox extern int kill_proc_info(int, struct siginfo *, pid_t);
220886773473SOleg Nesterov extern __must_check bool do_notify_parent(struct task_struct *, int);
2209a7f0765eSOleg Nesterov extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
22101da177e4SLinus Torvalds extern void force_sig(int, struct task_struct *);
22111da177e4SLinus Torvalds extern int send_sig(int, struct task_struct *, int);
221209faef11SOleg Nesterov extern int zap_other_threads(struct task_struct *p);
22131da177e4SLinus Torvalds extern struct sigqueue *sigqueue_alloc(void);
22141da177e4SLinus Torvalds extern void sigqueue_free(struct sigqueue *);
2215ac5c2153SOleg Nesterov extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
22169ac95f2fSOleg Nesterov extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
22171da177e4SLinus Torvalds 
221851a7b448SAl Viro static inline void restore_saved_sigmask(void)
221951a7b448SAl Viro {
222051a7b448SAl Viro 	if (test_and_clear_restore_sigmask())
222177097ae5SAl Viro 		__set_current_blocked(&current->saved_sigmask);
222251a7b448SAl Viro }
222351a7b448SAl Viro 
2224b7f9a11aSAl Viro static inline sigset_t *sigmask_to_save(void)
2225b7f9a11aSAl Viro {
2226b7f9a11aSAl Viro 	sigset_t *res = &current->blocked;
2227b7f9a11aSAl Viro 	if (unlikely(test_restore_sigmask()))
2228b7f9a11aSAl Viro 		res = &current->saved_sigmask;
2229b7f9a11aSAl Viro 	return res;
2230b7f9a11aSAl Viro }
2231b7f9a11aSAl Viro 
22329ec52099SCedric Le Goater static inline int kill_cad_pid(int sig, int priv)
22339ec52099SCedric Le Goater {
22349ec52099SCedric Le Goater 	return kill_pid(cad_pid, sig, priv);
22359ec52099SCedric Le Goater }
22369ec52099SCedric Le Goater 
22371da177e4SLinus Torvalds /* These can be the second arg to send_sig_info/send_group_sig_info.  */
22381da177e4SLinus Torvalds #define SEND_SIG_NOINFO ((struct siginfo *) 0)
22391da177e4SLinus Torvalds #define SEND_SIG_PRIV	((struct siginfo *) 1)
22401da177e4SLinus Torvalds #define SEND_SIG_FORCED	((struct siginfo *) 2)
22411da177e4SLinus Torvalds 
22422a855dd0SSebastian Andrzej Siewior /*
22432a855dd0SSebastian Andrzej Siewior  * True if we are on the alternate signal stack.
22442a855dd0SSebastian Andrzej Siewior  */
22451da177e4SLinus Torvalds static inline int on_sig_stack(unsigned long sp)
22461da177e4SLinus Torvalds {
22472a855dd0SSebastian Andrzej Siewior #ifdef CONFIG_STACK_GROWSUP
22482a855dd0SSebastian Andrzej Siewior 	return sp >= current->sas_ss_sp &&
22492a855dd0SSebastian Andrzej Siewior 		sp - current->sas_ss_sp < current->sas_ss_size;
22502a855dd0SSebastian Andrzej Siewior #else
22512a855dd0SSebastian Andrzej Siewior 	return sp > current->sas_ss_sp &&
22522a855dd0SSebastian Andrzej Siewior 		sp - current->sas_ss_sp <= current->sas_ss_size;
22532a855dd0SSebastian Andrzej Siewior #endif
22541da177e4SLinus Torvalds }
22551da177e4SLinus Torvalds 
22561da177e4SLinus Torvalds static inline int sas_ss_flags(unsigned long sp)
22571da177e4SLinus Torvalds {
22581da177e4SLinus Torvalds 	return (current->sas_ss_size == 0 ? SS_DISABLE
22591da177e4SLinus Torvalds 		: on_sig_stack(sp) ? SS_ONSTACK : 0);
22601da177e4SLinus Torvalds }
22611da177e4SLinus Torvalds 
22625a1b98d3SAl Viro static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
22635a1b98d3SAl Viro {
22645a1b98d3SAl Viro 	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
22655a1b98d3SAl Viro #ifdef CONFIG_STACK_GROWSUP
22665a1b98d3SAl Viro 		return current->sas_ss_sp;
22675a1b98d3SAl Viro #else
22685a1b98d3SAl Viro 		return current->sas_ss_sp + current->sas_ss_size;
22695a1b98d3SAl Viro #endif
22705a1b98d3SAl Viro 	return sp;
22715a1b98d3SAl Viro }
22725a1b98d3SAl Viro 
22731da177e4SLinus Torvalds /*
22741da177e4SLinus Torvalds  * Routines for handling mm_structs
22751da177e4SLinus Torvalds  */
22761da177e4SLinus Torvalds extern struct mm_struct * mm_alloc(void);
22771da177e4SLinus Torvalds 
22781da177e4SLinus Torvalds /* mmdrop drops the mm and the page tables */
2279b3c97528SHarvey Harrison extern void __mmdrop(struct mm_struct *);
22801da177e4SLinus Torvalds static inline void mmdrop(struct mm_struct * mm)
22811da177e4SLinus Torvalds {
22826fb43d7bSIngo Molnar 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
22831da177e4SLinus Torvalds 		__mmdrop(mm);
22841da177e4SLinus Torvalds }
22851da177e4SLinus Torvalds 
22861da177e4SLinus Torvalds /* mmput gets rid of the mappings and all user-space */
22871da177e4SLinus Torvalds extern void mmput(struct mm_struct *);
22881da177e4SLinus Torvalds /* Grab a reference to a task's mm, if it is not already going away */
22891da177e4SLinus Torvalds extern struct mm_struct *get_task_mm(struct task_struct *task);
22908cdb878dSChristopher Yeoh /*
22918cdb878dSChristopher Yeoh  * Grab a reference to a task's mm, if it is not already going away
22928cdb878dSChristopher Yeoh  * and ptrace_may_access with the mode parameter passed to it
22938cdb878dSChristopher Yeoh  * succeeds.
22948cdb878dSChristopher Yeoh  */
22958cdb878dSChristopher Yeoh extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
22961da177e4SLinus Torvalds /* Remove the current tasks stale references to the old mm_struct */
22971da177e4SLinus Torvalds extern void mm_release(struct task_struct *, struct mm_struct *);
2298402b0862SCarsten Otte /* Allocate a new mm structure and copy contents from tsk->mm */
2299402b0862SCarsten Otte extern struct mm_struct *dup_mm(struct task_struct *tsk);
23001da177e4SLinus Torvalds 
23016f2c55b8SAlexey Dobriyan extern int copy_thread(unsigned long, unsigned long, unsigned long,
2302afa86fc4SAl Viro 			struct task_struct *);
23031da177e4SLinus Torvalds extern void flush_thread(void);
23041da177e4SLinus Torvalds extern void exit_thread(void);
23051da177e4SLinus Torvalds 
23061da177e4SLinus Torvalds extern void exit_files(struct task_struct *);
2307a7e5328aSOleg Nesterov extern void __cleanup_sighand(struct sighand_struct *);
2308cbaffba1SOleg Nesterov 
23091da177e4SLinus Torvalds extern void exit_itimers(struct signal_struct *);
2310cbaffba1SOleg Nesterov extern void flush_itimer_signals(void);
23111da177e4SLinus Torvalds 
23129402c95fSJoe Perches extern void do_group_exit(int);
23131da177e4SLinus Torvalds 
23141da177e4SLinus Torvalds extern int allow_signal(int);
23151da177e4SLinus Torvalds extern int disallow_signal(int);
23161da177e4SLinus Torvalds 
2317d7627467SDavid Howells extern int do_execve(const char *,
2318d7627467SDavid Howells 		     const char __user * const __user *,
2319da3d4c5fSAl Viro 		     const char __user * const __user *);
2320e80d6661SAl Viro extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
232136c8b586SIngo Molnar struct task_struct *fork_idle(int);
23222aa3a7f8SAl Viro extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
23231da177e4SLinus Torvalds 
23241da177e4SLinus Torvalds extern void set_task_comm(struct task_struct *tsk, char *from);
232559714d65SAndrew Morton extern char *get_task_comm(char *to, struct task_struct *tsk);
23261da177e4SLinus Torvalds 
23271da177e4SLinus Torvalds #ifdef CONFIG_SMP
2328317f3941SPeter Zijlstra void scheduler_ipi(void);
232985ba2d86SRoland McGrath extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
23301da177e4SLinus Torvalds #else
2331184748ccSPeter Zijlstra static inline void scheduler_ipi(void) { }
233285ba2d86SRoland McGrath static inline unsigned long wait_task_inactive(struct task_struct *p,
233385ba2d86SRoland McGrath 					       long match_state)
233485ba2d86SRoland McGrath {
233585ba2d86SRoland McGrath 	return 1;
233685ba2d86SRoland McGrath }
23371da177e4SLinus Torvalds #endif
23381da177e4SLinus Torvalds 
233905725f7eSJiri Pirko #define next_task(p) \
234005725f7eSJiri Pirko 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
23411da177e4SLinus Torvalds 
23421da177e4SLinus Torvalds #define for_each_process(p) \
23431da177e4SLinus Torvalds 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
23441da177e4SLinus Torvalds 
23455bb459bbSOleg Nesterov extern bool current_is_single_threaded(void);
2346d84f4f99SDavid Howells 
23471da177e4SLinus Torvalds /*
23481da177e4SLinus Torvalds  * Careful: do_each_thread/while_each_thread is a double loop so
23491da177e4SLinus Torvalds  *          'break' will not work as expected - use goto instead.
23501da177e4SLinus Torvalds  */
23511da177e4SLinus Torvalds #define do_each_thread(g, t) \
23521da177e4SLinus Torvalds 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
23531da177e4SLinus Torvalds 
23541da177e4SLinus Torvalds #define while_each_thread(g, t) \
23551da177e4SLinus Torvalds 	while ((t = next_thread(t)) != g)
23561da177e4SLinus Torvalds 
23577e49827cSOleg Nesterov static inline int get_nr_threads(struct task_struct *tsk)
23587e49827cSOleg Nesterov {
2359b3ac022cSOleg Nesterov 	return tsk->signal->nr_threads;
23607e49827cSOleg Nesterov }
23617e49827cSOleg Nesterov 
2362087806b1SOleg Nesterov static inline bool thread_group_leader(struct task_struct *p)
2363087806b1SOleg Nesterov {
2364087806b1SOleg Nesterov 	return p->exit_signal >= 0;
2365087806b1SOleg Nesterov }
23661da177e4SLinus Torvalds 
23670804ef4bSEric W. Biederman /* Do to the insanities of de_thread it is possible for a process
23680804ef4bSEric W. Biederman  * to have the pid of the thread group leader without actually being
23690804ef4bSEric W. Biederman  * the thread group leader.  For iteration through the pids in proc
23700804ef4bSEric W. Biederman  * all we care about is that we have a task with the appropriate
23710804ef4bSEric W. Biederman  * pid, we don't actually care if we have the right task.
23720804ef4bSEric W. Biederman  */
2373e1403b8eSOleg Nesterov static inline bool has_group_leader_pid(struct task_struct *p)
23740804ef4bSEric W. Biederman {
2375e1403b8eSOleg Nesterov 	return task_pid(p) == p->signal->leader_pid;
23760804ef4bSEric W. Biederman }
23770804ef4bSEric W. Biederman 
2378bac0abd6SPavel Emelyanov static inline
2379e1403b8eSOleg Nesterov bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2380bac0abd6SPavel Emelyanov {
2381e1403b8eSOleg Nesterov 	return p1->signal == p2->signal;
2382bac0abd6SPavel Emelyanov }
2383bac0abd6SPavel Emelyanov 
238436c8b586SIngo Molnar static inline struct task_struct *next_thread(const struct task_struct *p)
238547e65328SOleg Nesterov {
238605725f7eSJiri Pirko 	return list_entry_rcu(p->thread_group.next,
238736c8b586SIngo Molnar 			      struct task_struct, thread_group);
238847e65328SOleg Nesterov }
238947e65328SOleg Nesterov 
2390e868171aSAlexey Dobriyan static inline int thread_group_empty(struct task_struct *p)
23911da177e4SLinus Torvalds {
239247e65328SOleg Nesterov 	return list_empty(&p->thread_group);
23931da177e4SLinus Torvalds }
23941da177e4SLinus Torvalds 
23951da177e4SLinus Torvalds #define delay_group_leader(p) \
23961da177e4SLinus Torvalds 		(thread_group_leader(p) && !thread_group_empty(p))
23971da177e4SLinus Torvalds 
23981da177e4SLinus Torvalds /*
2399260ea101SEric W. Biederman  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
240022e2c507SJens Axboe  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2401ddbcc7e8SPaul Menage  * pins the final release of task.io_context.  Also protects ->cpuset and
2402d68b46feSOleg Nesterov  * ->cgroup.subsys[]. And ->vfork_done.
24031da177e4SLinus Torvalds  *
24041da177e4SLinus Torvalds  * Nests both inside and outside of read_lock(&tasklist_lock).
24051da177e4SLinus Torvalds  * It must not be nested with write_lock_irq(&tasklist_lock),
24061da177e4SLinus Torvalds  * neither inside nor outside.
24071da177e4SLinus Torvalds  */
24081da177e4SLinus Torvalds static inline void task_lock(struct task_struct *p)
24091da177e4SLinus Torvalds {
24101da177e4SLinus Torvalds 	spin_lock(&p->alloc_lock);
24111da177e4SLinus Torvalds }
24121da177e4SLinus Torvalds 
24131da177e4SLinus Torvalds static inline void task_unlock(struct task_struct *p)
24141da177e4SLinus Torvalds {
24151da177e4SLinus Torvalds 	spin_unlock(&p->alloc_lock);
24161da177e4SLinus Torvalds }
24171da177e4SLinus Torvalds 
2418b8ed374eSNamhyung Kim extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2419f63ee72eSOleg Nesterov 							unsigned long *flags);
2420f63ee72eSOleg Nesterov 
24219388dc30SAnton Vorontsov static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
24229388dc30SAnton Vorontsov 						       unsigned long *flags)
24239388dc30SAnton Vorontsov {
24249388dc30SAnton Vorontsov 	struct sighand_struct *ret;
24259388dc30SAnton Vorontsov 
24269388dc30SAnton Vorontsov 	ret = __lock_task_sighand(tsk, flags);
24279388dc30SAnton Vorontsov 	(void)__cond_lock(&tsk->sighand->siglock, ret);
24289388dc30SAnton Vorontsov 	return ret;
24299388dc30SAnton Vorontsov }
2430b8ed374eSNamhyung Kim 
2431f63ee72eSOleg Nesterov static inline void unlock_task_sighand(struct task_struct *tsk,
2432f63ee72eSOleg Nesterov 						unsigned long *flags)
2433f63ee72eSOleg Nesterov {
2434f63ee72eSOleg Nesterov 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2435f63ee72eSOleg Nesterov }
2436f63ee72eSOleg Nesterov 
24374714d1d3SBen Blum #ifdef CONFIG_CGROUPS
2438257058aeSTejun Heo static inline void threadgroup_change_begin(struct task_struct *tsk)
24394714d1d3SBen Blum {
2440257058aeSTejun Heo 	down_read(&tsk->signal->group_rwsem);
24414714d1d3SBen Blum }
2442257058aeSTejun Heo static inline void threadgroup_change_end(struct task_struct *tsk)
24434714d1d3SBen Blum {
2444257058aeSTejun Heo 	up_read(&tsk->signal->group_rwsem);
24454714d1d3SBen Blum }
244677e4ef99STejun Heo 
244777e4ef99STejun Heo /**
244877e4ef99STejun Heo  * threadgroup_lock - lock threadgroup
244977e4ef99STejun Heo  * @tsk: member task of the threadgroup to lock
245077e4ef99STejun Heo  *
245177e4ef99STejun Heo  * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
245277e4ef99STejun Heo  * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2453e56fb287SOleg Nesterov  * change ->group_leader/pid.  This is useful for cases where the threadgroup
2454e56fb287SOleg Nesterov  * needs to stay stable across blockable operations.
245577e4ef99STejun Heo  *
245677e4ef99STejun Heo  * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
245777e4ef99STejun Heo  * synchronization.  While held, no new task will be added to threadgroup
245877e4ef99STejun Heo  * and no existing live task will have its PF_EXITING set.
245977e4ef99STejun Heo  *
2460e56fb287SOleg Nesterov  * de_thread() does threadgroup_change_{begin|end}() when a non-leader
2461e56fb287SOleg Nesterov  * sub-thread becomes a new leader.
246277e4ef99STejun Heo  */
2463257058aeSTejun Heo static inline void threadgroup_lock(struct task_struct *tsk)
24644714d1d3SBen Blum {
2465257058aeSTejun Heo 	down_write(&tsk->signal->group_rwsem);
24664714d1d3SBen Blum }
246777e4ef99STejun Heo 
246877e4ef99STejun Heo /**
246977e4ef99STejun Heo  * threadgroup_unlock - unlock threadgroup
247077e4ef99STejun Heo  * @tsk: member task of the threadgroup to unlock
247177e4ef99STejun Heo  *
247277e4ef99STejun Heo  * Reverse threadgroup_lock().
247377e4ef99STejun Heo  */
2474257058aeSTejun Heo static inline void threadgroup_unlock(struct task_struct *tsk)
24754714d1d3SBen Blum {
2476257058aeSTejun Heo 	up_write(&tsk->signal->group_rwsem);
24774714d1d3SBen Blum }
24784714d1d3SBen Blum #else
2479257058aeSTejun Heo static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2480257058aeSTejun Heo static inline void threadgroup_change_end(struct task_struct *tsk) {}
2481257058aeSTejun Heo static inline void threadgroup_lock(struct task_struct *tsk) {}
2482257058aeSTejun Heo static inline void threadgroup_unlock(struct task_struct *tsk) {}
24834714d1d3SBen Blum #endif
24844714d1d3SBen Blum 
2485f037360fSAl Viro #ifndef __HAVE_THREAD_FUNCTIONS
2486f037360fSAl Viro 
2487f7e4217bSRoman Zippel #define task_thread_info(task)	((struct thread_info *)(task)->stack)
2488f7e4217bSRoman Zippel #define task_stack_page(task)	((task)->stack)
2489a1261f54SAl Viro 
249010ebffdeSAl Viro static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
249110ebffdeSAl Viro {
249210ebffdeSAl Viro 	*task_thread_info(p) = *task_thread_info(org);
249310ebffdeSAl Viro 	task_thread_info(p)->task = p;
249410ebffdeSAl Viro }
249510ebffdeSAl Viro 
249610ebffdeSAl Viro static inline unsigned long *end_of_stack(struct task_struct *p)
249710ebffdeSAl Viro {
2498f7e4217bSRoman Zippel 	return (unsigned long *)(task_thread_info(p) + 1);
249910ebffdeSAl Viro }
250010ebffdeSAl Viro 
2501f037360fSAl Viro #endif
2502f037360fSAl Viro 
25038b05c7e6SFUJITA Tomonori static inline int object_is_on_stack(void *obj)
25048b05c7e6SFUJITA Tomonori {
25058b05c7e6SFUJITA Tomonori 	void *stack = task_stack_page(current);
25068b05c7e6SFUJITA Tomonori 
25078b05c7e6SFUJITA Tomonori 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
25088b05c7e6SFUJITA Tomonori }
25098b05c7e6SFUJITA Tomonori 
25108c9843e5SBenjamin Herrenschmidt extern void thread_info_cache_init(void);
25118c9843e5SBenjamin Herrenschmidt 
25127c9f8861SEric Sandeen #ifdef CONFIG_DEBUG_STACK_USAGE
25137c9f8861SEric Sandeen static inline unsigned long stack_not_used(struct task_struct *p)
25147c9f8861SEric Sandeen {
25157c9f8861SEric Sandeen 	unsigned long *n = end_of_stack(p);
25167c9f8861SEric Sandeen 
25177c9f8861SEric Sandeen 	do { 	/* Skip over canary */
25187c9f8861SEric Sandeen 		n++;
25197c9f8861SEric Sandeen 	} while (!*n);
25207c9f8861SEric Sandeen 
25217c9f8861SEric Sandeen 	return (unsigned long)n - (unsigned long)end_of_stack(p);
25227c9f8861SEric Sandeen }
25237c9f8861SEric Sandeen #endif
25247c9f8861SEric Sandeen 
25251da177e4SLinus Torvalds /* set thread flags in other task's structures
25261da177e4SLinus Torvalds  * - see asm/thread_info.h for TIF_xxxx flags available
25271da177e4SLinus Torvalds  */
25281da177e4SLinus Torvalds static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
25291da177e4SLinus Torvalds {
2530a1261f54SAl Viro 	set_ti_thread_flag(task_thread_info(tsk), flag);
25311da177e4SLinus Torvalds }
25321da177e4SLinus Torvalds 
25331da177e4SLinus Torvalds static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
25341da177e4SLinus Torvalds {
2535a1261f54SAl Viro 	clear_ti_thread_flag(task_thread_info(tsk), flag);
25361da177e4SLinus Torvalds }
25371da177e4SLinus Torvalds 
25381da177e4SLinus Torvalds static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
25391da177e4SLinus Torvalds {
2540a1261f54SAl Viro 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
25411da177e4SLinus Torvalds }
25421da177e4SLinus Torvalds 
25431da177e4SLinus Torvalds static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
25441da177e4SLinus Torvalds {
2545a1261f54SAl Viro 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
25461da177e4SLinus Torvalds }
25471da177e4SLinus Torvalds 
25481da177e4SLinus Torvalds static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
25491da177e4SLinus Torvalds {
2550a1261f54SAl Viro 	return test_ti_thread_flag(task_thread_info(tsk), flag);
25511da177e4SLinus Torvalds }
25521da177e4SLinus Torvalds 
25531da177e4SLinus Torvalds static inline void set_tsk_need_resched(struct task_struct *tsk)
25541da177e4SLinus Torvalds {
25551da177e4SLinus Torvalds 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
25561da177e4SLinus Torvalds }
25571da177e4SLinus Torvalds 
25581da177e4SLinus Torvalds static inline void clear_tsk_need_resched(struct task_struct *tsk)
25591da177e4SLinus Torvalds {
25601da177e4SLinus Torvalds 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
25611da177e4SLinus Torvalds }
25621da177e4SLinus Torvalds 
25638ae121acSGregory Haskins static inline int test_tsk_need_resched(struct task_struct *tsk)
25648ae121acSGregory Haskins {
25658ae121acSGregory Haskins 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
25668ae121acSGregory Haskins }
25678ae121acSGregory Haskins 
2568690cc3ffSEric W. Biederman static inline int restart_syscall(void)
2569690cc3ffSEric W. Biederman {
2570690cc3ffSEric W. Biederman 	set_tsk_thread_flag(current, TIF_SIGPENDING);
2571690cc3ffSEric W. Biederman 	return -ERESTARTNOINTR;
2572690cc3ffSEric W. Biederman }
2573690cc3ffSEric W. Biederman 
25741da177e4SLinus Torvalds static inline int signal_pending(struct task_struct *p)
25751da177e4SLinus Torvalds {
25761da177e4SLinus Torvalds 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
25771da177e4SLinus Torvalds }
25781da177e4SLinus Torvalds 
2579d9588725SRoland McGrath static inline int __fatal_signal_pending(struct task_struct *p)
2580d9588725SRoland McGrath {
2581d9588725SRoland McGrath 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2582d9588725SRoland McGrath }
2583f776d12dSMatthew Wilcox 
2584f776d12dSMatthew Wilcox static inline int fatal_signal_pending(struct task_struct *p)
2585f776d12dSMatthew Wilcox {
2586f776d12dSMatthew Wilcox 	return signal_pending(p) && __fatal_signal_pending(p);
2587f776d12dSMatthew Wilcox }
2588f776d12dSMatthew Wilcox 
258916882c1eSOleg Nesterov static inline int signal_pending_state(long state, struct task_struct *p)
259016882c1eSOleg Nesterov {
259116882c1eSOleg Nesterov 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
259216882c1eSOleg Nesterov 		return 0;
259316882c1eSOleg Nesterov 	if (!signal_pending(p))
259416882c1eSOleg Nesterov 		return 0;
259516882c1eSOleg Nesterov 
259616882c1eSOleg Nesterov 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
259716882c1eSOleg Nesterov }
259816882c1eSOleg Nesterov 
25991da177e4SLinus Torvalds /*
26001da177e4SLinus Torvalds  * cond_resched() and cond_resched_lock(): latency reduction via
26011da177e4SLinus Torvalds  * explicit rescheduling in places that are safe. The return
26021da177e4SLinus Torvalds  * value indicates whether a reschedule was done in fact.
26031da177e4SLinus Torvalds  * cond_resched_lock() will drop the spinlock before scheduling,
26041da177e4SLinus Torvalds  * cond_resched_softirq() will enable bhs before scheduling.
26051da177e4SLinus Torvalds  */
2606c3921ab7SLinus Torvalds extern int _cond_resched(void);
26076f80bd98SFrederic Weisbecker 
2608613afbf8SFrederic Weisbecker #define cond_resched() ({			\
2609613afbf8SFrederic Weisbecker 	__might_sleep(__FILE__, __LINE__, 0);	\
2610613afbf8SFrederic Weisbecker 	_cond_resched();			\
2611613afbf8SFrederic Weisbecker })
26126f80bd98SFrederic Weisbecker 
2613613afbf8SFrederic Weisbecker extern int __cond_resched_lock(spinlock_t *lock);
2614613afbf8SFrederic Weisbecker 
2615bdd4e85dSFrederic Weisbecker #ifdef CONFIG_PREEMPT_COUNT
2616716a4234SFrederic Weisbecker #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
261702b67cc3SHerbert Xu #else
2618716a4234SFrederic Weisbecker #define PREEMPT_LOCK_OFFSET	0
261902b67cc3SHerbert Xu #endif
2620716a4234SFrederic Weisbecker 
2621613afbf8SFrederic Weisbecker #define cond_resched_lock(lock) ({				\
2622716a4234SFrederic Weisbecker 	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2623613afbf8SFrederic Weisbecker 	__cond_resched_lock(lock);				\
2624613afbf8SFrederic Weisbecker })
2625613afbf8SFrederic Weisbecker 
2626613afbf8SFrederic Weisbecker extern int __cond_resched_softirq(void);
2627613afbf8SFrederic Weisbecker 
2628613afbf8SFrederic Weisbecker #define cond_resched_softirq() ({					\
262975e1056fSVenkatesh Pallipadi 	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2630613afbf8SFrederic Weisbecker 	__cond_resched_softirq();					\
2631613afbf8SFrederic Weisbecker })
26321da177e4SLinus Torvalds 
2633f6f3c437SSimon Horman static inline void cond_resched_rcu(void)
2634f6f3c437SSimon Horman {
2635f6f3c437SSimon Horman #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2636f6f3c437SSimon Horman 	rcu_read_unlock();
2637f6f3c437SSimon Horman 	cond_resched();
2638f6f3c437SSimon Horman 	rcu_read_lock();
2639f6f3c437SSimon Horman #endif
2640f6f3c437SSimon Horman }
2641f6f3c437SSimon Horman 
26421da177e4SLinus Torvalds /*
26431da177e4SLinus Torvalds  * Does a critical section need to be broken due to another
264495c354feSNick Piggin  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
264595c354feSNick Piggin  * but a general need for low latency)
26461da177e4SLinus Torvalds  */
264795c354feSNick Piggin static inline int spin_needbreak(spinlock_t *lock)
26481da177e4SLinus Torvalds {
264995c354feSNick Piggin #ifdef CONFIG_PREEMPT
265095c354feSNick Piggin 	return spin_is_contended(lock);
265195c354feSNick Piggin #else
26521da177e4SLinus Torvalds 	return 0;
265395c354feSNick Piggin #endif
26541da177e4SLinus Torvalds }
26551da177e4SLinus Torvalds 
26567bb44adeSRoland McGrath /*
2657ee761f62SThomas Gleixner  * Idle thread specific functions to determine the need_resched
2658ee761f62SThomas Gleixner  * polling state. We have two versions, one based on TS_POLLING in
2659ee761f62SThomas Gleixner  * thread_info.status and one based on TIF_POLLING_NRFLAG in
2660ee761f62SThomas Gleixner  * thread_info.flags
2661ee761f62SThomas Gleixner  */
2662ee761f62SThomas Gleixner #ifdef TS_POLLING
2663ee761f62SThomas Gleixner static inline int tsk_is_polling(struct task_struct *p)
2664ee761f62SThomas Gleixner {
2665ee761f62SThomas Gleixner 	return task_thread_info(p)->status & TS_POLLING;
2666ee761f62SThomas Gleixner }
2667ea811747SPeter Zijlstra static inline void __current_set_polling(void)
26683a98f871SThomas Gleixner {
26693a98f871SThomas Gleixner 	current_thread_info()->status |= TS_POLLING;
26703a98f871SThomas Gleixner }
26713a98f871SThomas Gleixner 
2672ea811747SPeter Zijlstra static inline bool __must_check current_set_polling_and_test(void)
2673ea811747SPeter Zijlstra {
2674ea811747SPeter Zijlstra 	__current_set_polling();
2675ea811747SPeter Zijlstra 
2676ea811747SPeter Zijlstra 	/*
2677ea811747SPeter Zijlstra 	 * Polling state must be visible before we test NEED_RESCHED,
2678ea811747SPeter Zijlstra 	 * paired by resched_task()
2679ea811747SPeter Zijlstra 	 */
2680ea811747SPeter Zijlstra 	smp_mb();
2681ea811747SPeter Zijlstra 
2682ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2683ea811747SPeter Zijlstra }
2684ea811747SPeter Zijlstra 
2685ea811747SPeter Zijlstra static inline void __current_clr_polling(void)
26863a98f871SThomas Gleixner {
26873a98f871SThomas Gleixner 	current_thread_info()->status &= ~TS_POLLING;
2688ea811747SPeter Zijlstra }
2689ea811747SPeter Zijlstra 
2690ea811747SPeter Zijlstra static inline bool __must_check current_clr_polling_and_test(void)
2691ea811747SPeter Zijlstra {
2692ea811747SPeter Zijlstra 	__current_clr_polling();
2693ea811747SPeter Zijlstra 
2694ea811747SPeter Zijlstra 	/*
2695ea811747SPeter Zijlstra 	 * Polling state must be visible before we test NEED_RESCHED,
2696ea811747SPeter Zijlstra 	 * paired by resched_task()
2697ea811747SPeter Zijlstra 	 */
2698ea811747SPeter Zijlstra 	smp_mb();
2699ea811747SPeter Zijlstra 
2700ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
27013a98f871SThomas Gleixner }
2702ee761f62SThomas Gleixner #elif defined(TIF_POLLING_NRFLAG)
2703ee761f62SThomas Gleixner static inline int tsk_is_polling(struct task_struct *p)
2704ee761f62SThomas Gleixner {
2705ee761f62SThomas Gleixner 	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2706ee761f62SThomas Gleixner }
2707ea811747SPeter Zijlstra 
2708ea811747SPeter Zijlstra static inline void __current_set_polling(void)
27093a98f871SThomas Gleixner {
27103a98f871SThomas Gleixner 	set_thread_flag(TIF_POLLING_NRFLAG);
27113a98f871SThomas Gleixner }
27123a98f871SThomas Gleixner 
2713ea811747SPeter Zijlstra static inline bool __must_check current_set_polling_and_test(void)
2714ea811747SPeter Zijlstra {
2715ea811747SPeter Zijlstra 	__current_set_polling();
2716ea811747SPeter Zijlstra 
2717ea811747SPeter Zijlstra 	/*
2718ea811747SPeter Zijlstra 	 * Polling state must be visible before we test NEED_RESCHED,
2719ea811747SPeter Zijlstra 	 * paired by resched_task()
2720ea811747SPeter Zijlstra 	 *
2721ea811747SPeter Zijlstra 	 * XXX: assumes set/clear bit are identical barrier wise.
2722ea811747SPeter Zijlstra 	 */
2723ea811747SPeter Zijlstra 	smp_mb__after_clear_bit();
2724ea811747SPeter Zijlstra 
2725ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2726ea811747SPeter Zijlstra }
2727ea811747SPeter Zijlstra 
2728ea811747SPeter Zijlstra static inline void __current_clr_polling(void)
27293a98f871SThomas Gleixner {
27303a98f871SThomas Gleixner 	clear_thread_flag(TIF_POLLING_NRFLAG);
27313a98f871SThomas Gleixner }
2732ea811747SPeter Zijlstra 
2733ea811747SPeter Zijlstra static inline bool __must_check current_clr_polling_and_test(void)
2734ea811747SPeter Zijlstra {
2735ea811747SPeter Zijlstra 	__current_clr_polling();
2736ea811747SPeter Zijlstra 
2737ea811747SPeter Zijlstra 	/*
2738ea811747SPeter Zijlstra 	 * Polling state must be visible before we test NEED_RESCHED,
2739ea811747SPeter Zijlstra 	 * paired by resched_task()
2740ea811747SPeter Zijlstra 	 */
2741ea811747SPeter Zijlstra 	smp_mb__after_clear_bit();
2742ea811747SPeter Zijlstra 
2743ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2744ea811747SPeter Zijlstra }
2745ea811747SPeter Zijlstra 
2746ee761f62SThomas Gleixner #else
2747ee761f62SThomas Gleixner static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2748ea811747SPeter Zijlstra static inline void __current_set_polling(void) { }
2749ea811747SPeter Zijlstra static inline void __current_clr_polling(void) { }
2750ea811747SPeter Zijlstra 
2751ea811747SPeter Zijlstra static inline bool __must_check current_set_polling_and_test(void)
2752ea811747SPeter Zijlstra {
2753ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2754ea811747SPeter Zijlstra }
2755ea811747SPeter Zijlstra static inline bool __must_check current_clr_polling_and_test(void)
2756ea811747SPeter Zijlstra {
2757ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2758ea811747SPeter Zijlstra }
2759ee761f62SThomas Gleixner #endif
2760ee761f62SThomas Gleixner 
27618cb75e0cSPeter Zijlstra static inline void current_clr_polling(void)
27628cb75e0cSPeter Zijlstra {
27638cb75e0cSPeter Zijlstra 	__current_clr_polling();
27648cb75e0cSPeter Zijlstra 
27658cb75e0cSPeter Zijlstra 	/*
27668cb75e0cSPeter Zijlstra 	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
27678cb75e0cSPeter Zijlstra 	 * Once the bit is cleared, we'll get IPIs with every new
27688cb75e0cSPeter Zijlstra 	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
27698cb75e0cSPeter Zijlstra 	 * fold.
27708cb75e0cSPeter Zijlstra 	 */
27718cb75e0cSPeter Zijlstra 	smp_mb(); /* paired with resched_task() */
27728cb75e0cSPeter Zijlstra 
27738cb75e0cSPeter Zijlstra 	preempt_fold_need_resched();
27748cb75e0cSPeter Zijlstra }
27758cb75e0cSPeter Zijlstra 
277675f93fedSPeter Zijlstra static __always_inline bool need_resched(void)
277775f93fedSPeter Zijlstra {
277875f93fedSPeter Zijlstra 	return unlikely(tif_need_resched());
277975f93fedSPeter Zijlstra }
278075f93fedSPeter Zijlstra 
2781ee761f62SThomas Gleixner /*
2782f06febc9SFrank Mayhar  * Thread group CPU time accounting.
2783f06febc9SFrank Mayhar  */
27844cd4c1b4SPeter Zijlstra void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
27854da94d49SPeter Zijlstra void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2786f06febc9SFrank Mayhar 
2787f06febc9SFrank Mayhar static inline void thread_group_cputime_init(struct signal_struct *sig)
2788f06febc9SFrank Mayhar {
2789ee30a7b2SThomas Gleixner 	raw_spin_lock_init(&sig->cputimer.lock);
2790f06febc9SFrank Mayhar }
2791f06febc9SFrank Mayhar 
2792f06febc9SFrank Mayhar /*
27937bb44adeSRoland McGrath  * Reevaluate whether the task has signals pending delivery.
27947bb44adeSRoland McGrath  * Wake the task if so.
27957bb44adeSRoland McGrath  * This is required every time the blocked sigset_t changes.
27967bb44adeSRoland McGrath  * callers must hold sighand->siglock.
27977bb44adeSRoland McGrath  */
27987bb44adeSRoland McGrath extern void recalc_sigpending_and_wake(struct task_struct *t);
27991da177e4SLinus Torvalds extern void recalc_sigpending(void);
28001da177e4SLinus Torvalds 
2801910ffdb1SOleg Nesterov extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2802910ffdb1SOleg Nesterov 
2803910ffdb1SOleg Nesterov static inline void signal_wake_up(struct task_struct *t, bool resume)
2804910ffdb1SOleg Nesterov {
2805910ffdb1SOleg Nesterov 	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2806910ffdb1SOleg Nesterov }
2807910ffdb1SOleg Nesterov static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2808910ffdb1SOleg Nesterov {
2809910ffdb1SOleg Nesterov 	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2810910ffdb1SOleg Nesterov }
28111da177e4SLinus Torvalds 
28121da177e4SLinus Torvalds /*
28131da177e4SLinus Torvalds  * Wrappers for p->thread_info->cpu access. No-op on UP.
28141da177e4SLinus Torvalds  */
28151da177e4SLinus Torvalds #ifdef CONFIG_SMP
28161da177e4SLinus Torvalds 
28171da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
28181da177e4SLinus Torvalds {
2819a1261f54SAl Viro 	return task_thread_info(p)->cpu;
28201da177e4SLinus Torvalds }
28211da177e4SLinus Torvalds 
2822b32e86b4SIngo Molnar static inline int task_node(const struct task_struct *p)
2823b32e86b4SIngo Molnar {
2824b32e86b4SIngo Molnar 	return cpu_to_node(task_cpu(p));
2825b32e86b4SIngo Molnar }
2826b32e86b4SIngo Molnar 
2827c65cc870SIngo Molnar extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
28281da177e4SLinus Torvalds 
28291da177e4SLinus Torvalds #else
28301da177e4SLinus Torvalds 
28311da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
28321da177e4SLinus Torvalds {
28331da177e4SLinus Torvalds 	return 0;
28341da177e4SLinus Torvalds }
28351da177e4SLinus Torvalds 
28361da177e4SLinus Torvalds static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
28371da177e4SLinus Torvalds {
28381da177e4SLinus Torvalds }
28391da177e4SLinus Torvalds 
28401da177e4SLinus Torvalds #endif /* CONFIG_SMP */
28411da177e4SLinus Torvalds 
284296f874e2SRusty Russell extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
284396f874e2SRusty Russell extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
28445c45bf27SSiddha, Suresh B 
28457c941438SDhaval Giani #ifdef CONFIG_CGROUP_SCHED
284607e06b01SYong Zhang extern struct task_group root_task_group;
28478323f26cSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */
28489b5b7751SSrivatsa Vaddagiri 
284954e99124SDhaval Giani extern int task_can_switch_user(struct user_struct *up,
285054e99124SDhaval Giani 					struct task_struct *tsk);
285154e99124SDhaval Giani 
28524b98d11bSAlexey Dobriyan #ifdef CONFIG_TASK_XACCT
28534b98d11bSAlexey Dobriyan static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
28544b98d11bSAlexey Dobriyan {
2855940389b8SAndrea Righi 	tsk->ioac.rchar += amt;
28564b98d11bSAlexey Dobriyan }
28574b98d11bSAlexey Dobriyan 
28584b98d11bSAlexey Dobriyan static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
28594b98d11bSAlexey Dobriyan {
2860940389b8SAndrea Righi 	tsk->ioac.wchar += amt;
28614b98d11bSAlexey Dobriyan }
28624b98d11bSAlexey Dobriyan 
28634b98d11bSAlexey Dobriyan static inline void inc_syscr(struct task_struct *tsk)
28644b98d11bSAlexey Dobriyan {
2865940389b8SAndrea Righi 	tsk->ioac.syscr++;
28664b98d11bSAlexey Dobriyan }
28674b98d11bSAlexey Dobriyan 
28684b98d11bSAlexey Dobriyan static inline void inc_syscw(struct task_struct *tsk)
28694b98d11bSAlexey Dobriyan {
2870940389b8SAndrea Righi 	tsk->ioac.syscw++;
28714b98d11bSAlexey Dobriyan }
28724b98d11bSAlexey Dobriyan #else
28734b98d11bSAlexey Dobriyan static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
28744b98d11bSAlexey Dobriyan {
28754b98d11bSAlexey Dobriyan }
28764b98d11bSAlexey Dobriyan 
28774b98d11bSAlexey Dobriyan static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
28784b98d11bSAlexey Dobriyan {
28794b98d11bSAlexey Dobriyan }
28804b98d11bSAlexey Dobriyan 
28814b98d11bSAlexey Dobriyan static inline void inc_syscr(struct task_struct *tsk)
28824b98d11bSAlexey Dobriyan {
28834b98d11bSAlexey Dobriyan }
28844b98d11bSAlexey Dobriyan 
28854b98d11bSAlexey Dobriyan static inline void inc_syscw(struct task_struct *tsk)
28864b98d11bSAlexey Dobriyan {
28874b98d11bSAlexey Dobriyan }
28884b98d11bSAlexey Dobriyan #endif
28894b98d11bSAlexey Dobriyan 
289082455257SDave Hansen #ifndef TASK_SIZE_OF
289182455257SDave Hansen #define TASK_SIZE_OF(tsk)	TASK_SIZE
289282455257SDave Hansen #endif
289382455257SDave Hansen 
2894cf475ad2SBalbir Singh #ifdef CONFIG_MM_OWNER
2895cf475ad2SBalbir Singh extern void mm_update_next_owner(struct mm_struct *mm);
2896cf475ad2SBalbir Singh extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2897cf475ad2SBalbir Singh #else
2898cf475ad2SBalbir Singh static inline void mm_update_next_owner(struct mm_struct *mm)
2899cf475ad2SBalbir Singh {
2900cf475ad2SBalbir Singh }
2901cf475ad2SBalbir Singh 
2902cf475ad2SBalbir Singh static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2903cf475ad2SBalbir Singh {
2904cf475ad2SBalbir Singh }
2905cf475ad2SBalbir Singh #endif /* CONFIG_MM_OWNER */
2906cf475ad2SBalbir Singh 
29073e10e716SJiri Slaby static inline unsigned long task_rlimit(const struct task_struct *tsk,
29083e10e716SJiri Slaby 		unsigned int limit)
29093e10e716SJiri Slaby {
29103e10e716SJiri Slaby 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
29113e10e716SJiri Slaby }
29123e10e716SJiri Slaby 
29133e10e716SJiri Slaby static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
29143e10e716SJiri Slaby 		unsigned int limit)
29153e10e716SJiri Slaby {
29163e10e716SJiri Slaby 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
29173e10e716SJiri Slaby }
29183e10e716SJiri Slaby 
29193e10e716SJiri Slaby static inline unsigned long rlimit(unsigned int limit)
29203e10e716SJiri Slaby {
29213e10e716SJiri Slaby 	return task_rlimit(current, limit);
29223e10e716SJiri Slaby }
29233e10e716SJiri Slaby 
29243e10e716SJiri Slaby static inline unsigned long rlimit_max(unsigned int limit)
29253e10e716SJiri Slaby {
29263e10e716SJiri Slaby 	return task_rlimit_max(current, limit);
29273e10e716SJiri Slaby }
29283e10e716SJiri Slaby 
29291da177e4SLinus Torvalds #endif
2930