xref: /linux/include/linux/sched.h (revision 1d082fd061884a587c490c4fc8a2056ce1e47624)
11da177e4SLinus Torvalds #ifndef _LINUX_SCHED_H
21da177e4SLinus Torvalds #define _LINUX_SCHED_H
31da177e4SLinus Torvalds 
4607ca46eSDavid Howells #include <uapi/linux/sched.h>
5b7b3c76aSDavid Woodhouse 
65c228079SDongsheng Yang #include <linux/sched/prio.h>
75c228079SDongsheng Yang 
8b7b3c76aSDavid Woodhouse 
9b7b3c76aSDavid Woodhouse struct sched_param {
10b7b3c76aSDavid Woodhouse 	int sched_priority;
11b7b3c76aSDavid Woodhouse };
12b7b3c76aSDavid Woodhouse 
131da177e4SLinus Torvalds #include <asm/param.h>	/* for HZ */
141da177e4SLinus Torvalds 
151da177e4SLinus Torvalds #include <linux/capability.h>
161da177e4SLinus Torvalds #include <linux/threads.h>
171da177e4SLinus Torvalds #include <linux/kernel.h>
181da177e4SLinus Torvalds #include <linux/types.h>
191da177e4SLinus Torvalds #include <linux/timex.h>
201da177e4SLinus Torvalds #include <linux/jiffies.h>
21fb00aca4SPeter Zijlstra #include <linux/plist.h>
221da177e4SLinus Torvalds #include <linux/rbtree.h>
231da177e4SLinus Torvalds #include <linux/thread_info.h>
241da177e4SLinus Torvalds #include <linux/cpumask.h>
251da177e4SLinus Torvalds #include <linux/errno.h>
261da177e4SLinus Torvalds #include <linux/nodemask.h>
27c92ff1bdSMartin Schwidefsky #include <linux/mm_types.h>
2800d1a39eSThomas Gleixner #include <linux/preempt_mask.h>
291da177e4SLinus Torvalds 
301da177e4SLinus Torvalds #include <asm/page.h>
311da177e4SLinus Torvalds #include <asm/ptrace.h>
32bfc3f028SFrederic Weisbecker #include <linux/cputime.h>
331da177e4SLinus Torvalds 
341da177e4SLinus Torvalds #include <linux/smp.h>
351da177e4SLinus Torvalds #include <linux/sem.h>
36ab602f79SJack Miller #include <linux/shm.h>
371da177e4SLinus Torvalds #include <linux/signal.h>
381da177e4SLinus Torvalds #include <linux/compiler.h>
391da177e4SLinus Torvalds #include <linux/completion.h>
401da177e4SLinus Torvalds #include <linux/pid.h>
411da177e4SLinus Torvalds #include <linux/percpu.h>
421da177e4SLinus Torvalds #include <linux/topology.h>
433e26c149SPeter Zijlstra #include <linux/proportions.h>
441da177e4SLinus Torvalds #include <linux/seccomp.h>
45e56d0903SIngo Molnar #include <linux/rcupdate.h>
4605725f7eSJiri Pirko #include <linux/rculist.h>
4723f78d4aSIngo Molnar #include <linux/rtmutex.h>
481da177e4SLinus Torvalds 
49a3b6714eSDavid Woodhouse #include <linux/time.h>
50a3b6714eSDavid Woodhouse #include <linux/param.h>
51a3b6714eSDavid Woodhouse #include <linux/resource.h>
52a3b6714eSDavid Woodhouse #include <linux/timer.h>
53a3b6714eSDavid Woodhouse #include <linux/hrtimer.h>
547c3ab738SAndrew Morton #include <linux/task_io_accounting.h>
559745512cSArjan van de Ven #include <linux/latencytop.h>
569e2b2dc4SDavid Howells #include <linux/cred.h>
57fa14ff4aSPeter Zijlstra #include <linux/llist.h>
587b44ab97SEric W. Biederman #include <linux/uidgid.h>
5921caf2fcSMing Lei #include <linux/gfp.h>
60a3b6714eSDavid Woodhouse 
61a3b6714eSDavid Woodhouse #include <asm/processor.h>
6236d57ac4SH. J. Lu 
63d50dde5aSDario Faggioli #define SCHED_ATTR_SIZE_VER0	48	/* sizeof first published struct */
64d50dde5aSDario Faggioli 
65d50dde5aSDario Faggioli /*
66d50dde5aSDario Faggioli  * Extended scheduling parameters data structure.
67d50dde5aSDario Faggioli  *
68d50dde5aSDario Faggioli  * This is needed because the original struct sched_param can not be
69d50dde5aSDario Faggioli  * altered without introducing ABI issues with legacy applications
70d50dde5aSDario Faggioli  * (e.g., in sched_getparam()).
71d50dde5aSDario Faggioli  *
72d50dde5aSDario Faggioli  * However, the possibility of specifying more than just a priority for
73d50dde5aSDario Faggioli  * the tasks may be useful for a wide variety of application fields, e.g.,
74d50dde5aSDario Faggioli  * multimedia, streaming, automation and control, and many others.
75d50dde5aSDario Faggioli  *
76d50dde5aSDario Faggioli  * This variant (sched_attr) is meant at describing a so-called
77d50dde5aSDario Faggioli  * sporadic time-constrained task. In such model a task is specified by:
78d50dde5aSDario Faggioli  *  - the activation period or minimum instance inter-arrival time;
79d50dde5aSDario Faggioli  *  - the maximum (or average, depending on the actual scheduling
80d50dde5aSDario Faggioli  *    discipline) computation time of all instances, a.k.a. runtime;
81d50dde5aSDario Faggioli  *  - the deadline (relative to the actual activation time) of each
82d50dde5aSDario Faggioli  *    instance.
83d50dde5aSDario Faggioli  * Very briefly, a periodic (sporadic) task asks for the execution of
84d50dde5aSDario Faggioli  * some specific computation --which is typically called an instance--
85d50dde5aSDario Faggioli  * (at most) every period. Moreover, each instance typically lasts no more
86d50dde5aSDario Faggioli  * than the runtime and must be completed by time instant t equal to
87d50dde5aSDario Faggioli  * the instance activation time + the deadline.
88d50dde5aSDario Faggioli  *
89d50dde5aSDario Faggioli  * This is reflected by the actual fields of the sched_attr structure:
90d50dde5aSDario Faggioli  *
91d50dde5aSDario Faggioli  *  @size		size of the structure, for fwd/bwd compat.
92d50dde5aSDario Faggioli  *
93d50dde5aSDario Faggioli  *  @sched_policy	task's scheduling policy
94d50dde5aSDario Faggioli  *  @sched_flags	for customizing the scheduler behaviour
95d50dde5aSDario Faggioli  *  @sched_nice		task's nice value      (SCHED_NORMAL/BATCH)
96d50dde5aSDario Faggioli  *  @sched_priority	task's static priority (SCHED_FIFO/RR)
97d50dde5aSDario Faggioli  *  @sched_deadline	representative of the task's deadline
98d50dde5aSDario Faggioli  *  @sched_runtime	representative of the task's runtime
99d50dde5aSDario Faggioli  *  @sched_period	representative of the task's period
100d50dde5aSDario Faggioli  *
101d50dde5aSDario Faggioli  * Given this task model, there are a multiplicity of scheduling algorithms
102d50dde5aSDario Faggioli  * and policies, that can be used to ensure all the tasks will make their
103d50dde5aSDario Faggioli  * timing constraints.
104aab03e05SDario Faggioli  *
105aab03e05SDario Faggioli  * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
106aab03e05SDario Faggioli  * only user of this new interface. More information about the algorithm
107aab03e05SDario Faggioli  * available in the scheduling class file or in Documentation/.
108d50dde5aSDario Faggioli  */
109d50dde5aSDario Faggioli struct sched_attr {
110d50dde5aSDario Faggioli 	u32 size;
111d50dde5aSDario Faggioli 
112d50dde5aSDario Faggioli 	u32 sched_policy;
113d50dde5aSDario Faggioli 	u64 sched_flags;
114d50dde5aSDario Faggioli 
115d50dde5aSDario Faggioli 	/* SCHED_NORMAL, SCHED_BATCH */
116d50dde5aSDario Faggioli 	s32 sched_nice;
117d50dde5aSDario Faggioli 
118d50dde5aSDario Faggioli 	/* SCHED_FIFO, SCHED_RR */
119d50dde5aSDario Faggioli 	u32 sched_priority;
120d50dde5aSDario Faggioli 
121d50dde5aSDario Faggioli 	/* SCHED_DEADLINE */
122d50dde5aSDario Faggioli 	u64 sched_runtime;
123d50dde5aSDario Faggioli 	u64 sched_deadline;
124d50dde5aSDario Faggioli 	u64 sched_period;
125d50dde5aSDario Faggioli };
126d50dde5aSDario Faggioli 
1271da177e4SLinus Torvalds struct exec_domain;
128c87e2837SIngo Molnar struct futex_pi_state;
129286100a6SAlexey Dobriyan struct robust_list_head;
130bddd87c7SAkinobu Mita struct bio_list;
1315ad4e53bSAl Viro struct fs_struct;
132cdd6c482SIngo Molnar struct perf_event_context;
13373c10101SJens Axboe struct blk_plug;
134c4ad8f98SLinus Torvalds struct filename;
1351da177e4SLinus Torvalds 
136615d6e87SDavidlohr Bueso #define VMACACHE_BITS 2
137615d6e87SDavidlohr Bueso #define VMACACHE_SIZE (1U << VMACACHE_BITS)
138615d6e87SDavidlohr Bueso #define VMACACHE_MASK (VMACACHE_SIZE - 1)
139615d6e87SDavidlohr Bueso 
1401da177e4SLinus Torvalds /*
1411da177e4SLinus Torvalds  * These are the constant used to fake the fixed-point load-average
1421da177e4SLinus Torvalds  * counting. Some notes:
1431da177e4SLinus Torvalds  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
1441da177e4SLinus Torvalds  *    a load-average precision of 10 bits integer + 11 bits fractional
1451da177e4SLinus Torvalds  *  - if you want to count load-averages more often, you need more
1461da177e4SLinus Torvalds  *    precision, or rounding will get you. With 2-second counting freq,
1471da177e4SLinus Torvalds  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
1481da177e4SLinus Torvalds  *    11 bit fractions.
1491da177e4SLinus Torvalds  */
1501da177e4SLinus Torvalds extern unsigned long avenrun[];		/* Load averages */
1512d02494fSThomas Gleixner extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
1521da177e4SLinus Torvalds 
1531da177e4SLinus Torvalds #define FSHIFT		11		/* nr of bits of precision */
1541da177e4SLinus Torvalds #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
1550c2043abSLinus Torvalds #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
1561da177e4SLinus Torvalds #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
1571da177e4SLinus Torvalds #define EXP_5		2014		/* 1/exp(5sec/5min) */
1581da177e4SLinus Torvalds #define EXP_15		2037		/* 1/exp(5sec/15min) */
1591da177e4SLinus Torvalds 
1601da177e4SLinus Torvalds #define CALC_LOAD(load,exp,n) \
1611da177e4SLinus Torvalds 	load *= exp; \
1621da177e4SLinus Torvalds 	load += n*(FIXED_1-exp); \
1631da177e4SLinus Torvalds 	load >>= FSHIFT;
1641da177e4SLinus Torvalds 
1651da177e4SLinus Torvalds extern unsigned long total_forks;
1661da177e4SLinus Torvalds extern int nr_threads;
1671da177e4SLinus Torvalds DECLARE_PER_CPU(unsigned long, process_counts);
1681da177e4SLinus Torvalds extern int nr_processes(void);
1691da177e4SLinus Torvalds extern unsigned long nr_running(void);
1701da177e4SLinus Torvalds extern unsigned long nr_iowait(void);
1718c215bd3SPeter Zijlstra extern unsigned long nr_iowait_cpu(int cpu);
172372ba8cbSMel Gorman extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
17369d25870SArjan van de Ven 
1740f004f5aSPeter Zijlstra extern void calc_global_load(unsigned long ticks);
1755aaa0b7aSPeter Zijlstra extern void update_cpu_load_nohz(void);
1761da177e4SLinus Torvalds 
1777e49fcceSSteven Rostedt extern unsigned long get_parent_ip(unsigned long addr);
1787e49fcceSSteven Rostedt 
179b637a328SPaul E. McKenney extern void dump_cpu_task(int cpu);
180b637a328SPaul E. McKenney 
18143ae34cbSIngo Molnar struct seq_file;
18243ae34cbSIngo Molnar struct cfs_rq;
1834cf86d77SIngo Molnar struct task_group;
18443ae34cbSIngo Molnar #ifdef CONFIG_SCHED_DEBUG
18543ae34cbSIngo Molnar extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
18643ae34cbSIngo Molnar extern void proc_sched_set_task(struct task_struct *p);
18743ae34cbSIngo Molnar extern void
1885cef9ecaSIngo Molnar print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
18943ae34cbSIngo Molnar #endif
1901da177e4SLinus Torvalds 
1914a8342d2SLinus Torvalds /*
1924a8342d2SLinus Torvalds  * Task state bitmask. NOTE! These bits are also
1934a8342d2SLinus Torvalds  * encoded in fs/proc/array.c: get_task_state().
1944a8342d2SLinus Torvalds  *
1954a8342d2SLinus Torvalds  * We have two separate sets of flags: task->state
1964a8342d2SLinus Torvalds  * is about runnability, while task->exit_state are
1974a8342d2SLinus Torvalds  * about the task exiting. Confusing, but this way
1984a8342d2SLinus Torvalds  * modifying one set can't modify the other one by
1994a8342d2SLinus Torvalds  * mistake.
2004a8342d2SLinus Torvalds  */
2011da177e4SLinus Torvalds #define TASK_RUNNING		0
2021da177e4SLinus Torvalds #define TASK_INTERRUPTIBLE	1
2031da177e4SLinus Torvalds #define TASK_UNINTERRUPTIBLE	2
204f021a3c2SMatthew Wilcox #define __TASK_STOPPED		4
205f021a3c2SMatthew Wilcox #define __TASK_TRACED		8
2064a8342d2SLinus Torvalds /* in tsk->exit_state */
207ad86622bSOleg Nesterov #define EXIT_DEAD		16
208ad86622bSOleg Nesterov #define EXIT_ZOMBIE		32
209abd50b39SOleg Nesterov #define EXIT_TRACE		(EXIT_ZOMBIE | EXIT_DEAD)
2104a8342d2SLinus Torvalds /* in tsk->state again */
211af927232SMike Galbraith #define TASK_DEAD		64
212f021a3c2SMatthew Wilcox #define TASK_WAKEKILL		128
213e9c84311SPeter Zijlstra #define TASK_WAKING		256
214f2530dc7SThomas Gleixner #define TASK_PARKED		512
215f2530dc7SThomas Gleixner #define TASK_STATE_MAX		1024
216f021a3c2SMatthew Wilcox 
217ad0f614eSMasatake YAMATO #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
21873342151SPeter Zijlstra 
219e1781538SPeter Zijlstra extern char ___assert_task_state[1 - 2*!!(
220e1781538SPeter Zijlstra 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
221f021a3c2SMatthew Wilcox 
222f021a3c2SMatthew Wilcox /* Convenience macros for the sake of set_task_state */
223f021a3c2SMatthew Wilcox #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
224f021a3c2SMatthew Wilcox #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
225f021a3c2SMatthew Wilcox #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
2261da177e4SLinus Torvalds 
22792a1f4bcSMatthew Wilcox /* Convenience macros for the sake of wake_up */
22892a1f4bcSMatthew Wilcox #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
229f021a3c2SMatthew Wilcox #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
23092a1f4bcSMatthew Wilcox 
23192a1f4bcSMatthew Wilcox /* get_task_state() */
23292a1f4bcSMatthew Wilcox #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
233f021a3c2SMatthew Wilcox 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
23474e37200SOleg Nesterov 				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
23592a1f4bcSMatthew Wilcox 
236f021a3c2SMatthew Wilcox #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
237f021a3c2SMatthew Wilcox #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
23892a1f4bcSMatthew Wilcox #define task_is_stopped_or_traced(task)	\
239f021a3c2SMatthew Wilcox 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
24092a1f4bcSMatthew Wilcox #define task_contributes_to_load(task)	\
241e3c8ca83SNathan Lynch 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
242376fede8STejun Heo 				 (task->flags & PF_FROZEN) == 0)
2431da177e4SLinus Torvalds 
2441da177e4SLinus Torvalds #define __set_task_state(tsk, state_value)		\
2451da177e4SLinus Torvalds 	do { (tsk)->state = (state_value); } while (0)
2461da177e4SLinus Torvalds #define set_task_state(tsk, state_value)		\
2471da177e4SLinus Torvalds 	set_mb((tsk)->state, (state_value))
2481da177e4SLinus Torvalds 
249498d0c57SAndrew Morton /*
250498d0c57SAndrew Morton  * set_current_state() includes a barrier so that the write of current->state
251498d0c57SAndrew Morton  * is correctly serialised wrt the caller's subsequent test of whether to
252498d0c57SAndrew Morton  * actually sleep:
253498d0c57SAndrew Morton  *
254498d0c57SAndrew Morton  *	set_current_state(TASK_UNINTERRUPTIBLE);
255498d0c57SAndrew Morton  *	if (do_i_need_to_sleep())
256498d0c57SAndrew Morton  *		schedule();
257498d0c57SAndrew Morton  *
258498d0c57SAndrew Morton  * If the caller does not need such serialisation then use __set_current_state()
259498d0c57SAndrew Morton  */
2601da177e4SLinus Torvalds #define __set_current_state(state_value)			\
2611da177e4SLinus Torvalds 	do { current->state = (state_value); } while (0)
2621da177e4SLinus Torvalds #define set_current_state(state_value)		\
2631da177e4SLinus Torvalds 	set_mb(current->state, (state_value))
2641da177e4SLinus Torvalds 
2651da177e4SLinus Torvalds /* Task command name length */
2661da177e4SLinus Torvalds #define TASK_COMM_LEN 16
2671da177e4SLinus Torvalds 
2681da177e4SLinus Torvalds #include <linux/spinlock.h>
2691da177e4SLinus Torvalds 
2701da177e4SLinus Torvalds /*
2711da177e4SLinus Torvalds  * This serializes "schedule()" and also protects
2721da177e4SLinus Torvalds  * the run-queue from deletions/modifications (but
2731da177e4SLinus Torvalds  * _adding_ to the beginning of the run-queue has
2741da177e4SLinus Torvalds  * a separate lock).
2751da177e4SLinus Torvalds  */
2761da177e4SLinus Torvalds extern rwlock_t tasklist_lock;
2771da177e4SLinus Torvalds extern spinlock_t mmlist_lock;
2781da177e4SLinus Torvalds 
27936c8b586SIngo Molnar struct task_struct;
2801da177e4SLinus Torvalds 
281db1466b3SPaul E. McKenney #ifdef CONFIG_PROVE_RCU
282db1466b3SPaul E. McKenney extern int lockdep_tasklist_lock_is_held(void);
283db1466b3SPaul E. McKenney #endif /* #ifdef CONFIG_PROVE_RCU */
284db1466b3SPaul E. McKenney 
2851da177e4SLinus Torvalds extern void sched_init(void);
2861da177e4SLinus Torvalds extern void sched_init_smp(void);
2872d07b255SHarvey Harrison extern asmlinkage void schedule_tail(struct task_struct *prev);
28836c8b586SIngo Molnar extern void init_idle(struct task_struct *idle, int cpu);
2891df21055SIngo Molnar extern void init_idle_bootup_task(struct task_struct *idle);
2901da177e4SLinus Torvalds 
29189f19f04SAndrew Morton extern int runqueue_is_locked(int cpu);
292017730c1SIngo Molnar 
2933451d024SFrederic Weisbecker #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
294c1cc017cSAlex Shi extern void nohz_balance_enter_idle(int cpu);
29569e1e811SSuresh Siddha extern void set_cpu_sd_state_idle(void);
2966201b4d6SViresh Kumar extern int get_nohz_timer_target(int pinned);
29746cb4b7cSSiddha, Suresh B #else
298c1cc017cSAlex Shi static inline void nohz_balance_enter_idle(int cpu) { }
299fdaabd80SPeter Zijlstra static inline void set_cpu_sd_state_idle(void) { }
3006201b4d6SViresh Kumar static inline int get_nohz_timer_target(int pinned)
3016201b4d6SViresh Kumar {
3026201b4d6SViresh Kumar 	return smp_processor_id();
3036201b4d6SViresh Kumar }
30446cb4b7cSSiddha, Suresh B #endif
3051da177e4SLinus Torvalds 
306e59e2ae2SIngo Molnar /*
30739bc89fdSIngo Molnar  * Only dump TASK_* tasks. (0 for all tasks)
308e59e2ae2SIngo Molnar  */
309e59e2ae2SIngo Molnar extern void show_state_filter(unsigned long state_filter);
310e59e2ae2SIngo Molnar 
311e59e2ae2SIngo Molnar static inline void show_state(void)
312e59e2ae2SIngo Molnar {
31339bc89fdSIngo Molnar 	show_state_filter(0);
314e59e2ae2SIngo Molnar }
315e59e2ae2SIngo Molnar 
3161da177e4SLinus Torvalds extern void show_regs(struct pt_regs *);
3171da177e4SLinus Torvalds 
3181da177e4SLinus Torvalds /*
3191da177e4SLinus Torvalds  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
3201da177e4SLinus Torvalds  * task), SP is the stack pointer of the first frame that should be shown in the back
3211da177e4SLinus Torvalds  * trace (or NULL if the entire call-chain of the task should be shown).
3221da177e4SLinus Torvalds  */
3231da177e4SLinus Torvalds extern void show_stack(struct task_struct *task, unsigned long *sp);
3241da177e4SLinus Torvalds 
3251da177e4SLinus Torvalds void io_schedule(void);
3261da177e4SLinus Torvalds long io_schedule_timeout(long timeout);
3271da177e4SLinus Torvalds 
3281da177e4SLinus Torvalds extern void cpu_init (void);
3291da177e4SLinus Torvalds extern void trap_init(void);
3301da177e4SLinus Torvalds extern void update_process_times(int user);
3311da177e4SLinus Torvalds extern void scheduler_tick(void);
3321da177e4SLinus Torvalds 
33382a1fcb9SIngo Molnar extern void sched_show_task(struct task_struct *p);
33482a1fcb9SIngo Molnar 
33519cc36c0SFrederic Weisbecker #ifdef CONFIG_LOCKUP_DETECTOR
3368446f1d3SIngo Molnar extern void touch_softlockup_watchdog(void);
337d6ad3e28SJason Wessel extern void touch_softlockup_watchdog_sync(void);
33804c9167fSJeremy Fitzhardinge extern void touch_all_softlockup_watchdogs(void);
339332fbdbcSDon Zickus extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
3408d65af78SAlexey Dobriyan 				  void __user *buffer,
341baf48f65SMandeep Singh Baines 				  size_t *lenp, loff_t *ppos);
3429c44bc03SIngo Molnar extern unsigned int  softlockup_panic;
343004417a6SPeter Zijlstra void lockup_detector_init(void);
3448446f1d3SIngo Molnar #else
3458446f1d3SIngo Molnar static inline void touch_softlockup_watchdog(void)
3468446f1d3SIngo Molnar {
3478446f1d3SIngo Molnar }
348d6ad3e28SJason Wessel static inline void touch_softlockup_watchdog_sync(void)
349d6ad3e28SJason Wessel {
350d6ad3e28SJason Wessel }
35104c9167fSJeremy Fitzhardinge static inline void touch_all_softlockup_watchdogs(void)
35204c9167fSJeremy Fitzhardinge {
35304c9167fSJeremy Fitzhardinge }
354004417a6SPeter Zijlstra static inline void lockup_detector_init(void)
355004417a6SPeter Zijlstra {
356004417a6SPeter Zijlstra }
3578446f1d3SIngo Molnar #endif
3588446f1d3SIngo Molnar 
3598b414521SMarcelo Tosatti #ifdef CONFIG_DETECT_HUNG_TASK
3608b414521SMarcelo Tosatti void reset_hung_task_detector(void);
3618b414521SMarcelo Tosatti #else
3628b414521SMarcelo Tosatti static inline void reset_hung_task_detector(void)
3638b414521SMarcelo Tosatti {
3648b414521SMarcelo Tosatti }
3658b414521SMarcelo Tosatti #endif
3668b414521SMarcelo Tosatti 
3671da177e4SLinus Torvalds /* Attach to any functions which should be ignored in wchan output. */
3681da177e4SLinus Torvalds #define __sched		__attribute__((__section__(".sched.text")))
369deaf2227SIngo Molnar 
370deaf2227SIngo Molnar /* Linker adds these: start and end of __sched functions */
371deaf2227SIngo Molnar extern char __sched_text_start[], __sched_text_end[];
372deaf2227SIngo Molnar 
3731da177e4SLinus Torvalds /* Is this address in the __sched functions? */
3741da177e4SLinus Torvalds extern int in_sched_functions(unsigned long addr);
3751da177e4SLinus Torvalds 
3761da177e4SLinus Torvalds #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
377b3c97528SHarvey Harrison extern signed long schedule_timeout(signed long timeout);
37864ed93a2SNishanth Aravamudan extern signed long schedule_timeout_interruptible(signed long timeout);
379294d5cc2SMatthew Wilcox extern signed long schedule_timeout_killable(signed long timeout);
38064ed93a2SNishanth Aravamudan extern signed long schedule_timeout_uninterruptible(signed long timeout);
3811da177e4SLinus Torvalds asmlinkage void schedule(void);
382c5491ea7SThomas Gleixner extern void schedule_preempt_disabled(void);
3831da177e4SLinus Torvalds 
384ab516013SSerge E. Hallyn struct nsproxy;
385acce292cSCedric Le Goater struct user_namespace;
3861da177e4SLinus Torvalds 
387efc1a3b1SDavid Howells #ifdef CONFIG_MMU
388efc1a3b1SDavid Howells extern void arch_pick_mmap_layout(struct mm_struct *mm);
3891da177e4SLinus Torvalds extern unsigned long
3901da177e4SLinus Torvalds arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
3911da177e4SLinus Torvalds 		       unsigned long, unsigned long);
3921da177e4SLinus Torvalds extern unsigned long
3931da177e4SLinus Torvalds arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
3941da177e4SLinus Torvalds 			  unsigned long len, unsigned long pgoff,
3951da177e4SLinus Torvalds 			  unsigned long flags);
396efc1a3b1SDavid Howells #else
397efc1a3b1SDavid Howells static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
398efc1a3b1SDavid Howells #endif
3991da177e4SLinus Torvalds 
400d049f74fSKees Cook #define SUID_DUMP_DISABLE	0	/* No setuid dumping */
401d049f74fSKees Cook #define SUID_DUMP_USER		1	/* Dump as user of process */
402d049f74fSKees Cook #define SUID_DUMP_ROOT		2	/* Dump as root */
403d049f74fSKees Cook 
4046c5d5238SKawai, Hidehiro /* mm flags */
405f8af4da3SHugh Dickins 
4067288e118SOleg Nesterov /* for SUID_DUMP_* above */
4073cb4a0bbSKawai, Hidehiro #define MMF_DUMPABLE_BITS 2
408f8af4da3SHugh Dickins #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
4093cb4a0bbSKawai, Hidehiro 
410942be387SOleg Nesterov extern void set_dumpable(struct mm_struct *mm, int value);
411942be387SOleg Nesterov /*
412942be387SOleg Nesterov  * This returns the actual value of the suid_dumpable flag. For things
413942be387SOleg Nesterov  * that are using this for checking for privilege transitions, it must
414942be387SOleg Nesterov  * test against SUID_DUMP_USER rather than treating it as a boolean
415942be387SOleg Nesterov  * value.
416942be387SOleg Nesterov  */
417942be387SOleg Nesterov static inline int __get_dumpable(unsigned long mm_flags)
418942be387SOleg Nesterov {
419942be387SOleg Nesterov 	return mm_flags & MMF_DUMPABLE_MASK;
420942be387SOleg Nesterov }
421942be387SOleg Nesterov 
422942be387SOleg Nesterov static inline int get_dumpable(struct mm_struct *mm)
423942be387SOleg Nesterov {
424942be387SOleg Nesterov 	return __get_dumpable(mm->flags);
425942be387SOleg Nesterov }
426942be387SOleg Nesterov 
4273cb4a0bbSKawai, Hidehiro /* coredump filter bits */
4283cb4a0bbSKawai, Hidehiro #define MMF_DUMP_ANON_PRIVATE	2
4293cb4a0bbSKawai, Hidehiro #define MMF_DUMP_ANON_SHARED	3
4303cb4a0bbSKawai, Hidehiro #define MMF_DUMP_MAPPED_PRIVATE	4
4313cb4a0bbSKawai, Hidehiro #define MMF_DUMP_MAPPED_SHARED	5
43282df3973SRoland McGrath #define MMF_DUMP_ELF_HEADERS	6
433e575f111SKOSAKI Motohiro #define MMF_DUMP_HUGETLB_PRIVATE 7
434e575f111SKOSAKI Motohiro #define MMF_DUMP_HUGETLB_SHARED  8
435f8af4da3SHugh Dickins 
4363cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
437e575f111SKOSAKI Motohiro #define MMF_DUMP_FILTER_BITS	7
4383cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_MASK \
4393cb4a0bbSKawai, Hidehiro 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
4403cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_DEFAULT \
441e575f111SKOSAKI Motohiro 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
442656eb2cdSRoland McGrath 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
443656eb2cdSRoland McGrath 
444656eb2cdSRoland McGrath #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
445656eb2cdSRoland McGrath # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
446656eb2cdSRoland McGrath #else
447656eb2cdSRoland McGrath # define MMF_DUMP_MASK_DEFAULT_ELF	0
448656eb2cdSRoland McGrath #endif
449f8af4da3SHugh Dickins 					/* leave room for more dump flags */
450f8af4da3SHugh Dickins #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
451ba76149fSAndrea Arcangeli #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
452bafb282dSKonstantin Khlebnikov #define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
453f8af4da3SHugh Dickins 
4549f68f672SOleg Nesterov #define MMF_HAS_UPROBES		19	/* has uprobes */
4559f68f672SOleg Nesterov #define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
456f8ac4ec9SOleg Nesterov 
457f8af4da3SHugh Dickins #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
4586c5d5238SKawai, Hidehiro 
4591da177e4SLinus Torvalds struct sighand_struct {
4601da177e4SLinus Torvalds 	atomic_t		count;
4611da177e4SLinus Torvalds 	struct k_sigaction	action[_NSIG];
4621da177e4SLinus Torvalds 	spinlock_t		siglock;
463b8fceee1SDavide Libenzi 	wait_queue_head_t	signalfd_wqh;
4641da177e4SLinus Torvalds };
4651da177e4SLinus Torvalds 
4660e464814SKaiGai Kohei struct pacct_struct {
467f6ec29a4SKaiGai Kohei 	int			ac_flag;
468f6ec29a4SKaiGai Kohei 	long			ac_exitcode;
4690e464814SKaiGai Kohei 	unsigned long		ac_mem;
47077787bfbSKaiGai Kohei 	cputime_t		ac_utime, ac_stime;
47177787bfbSKaiGai Kohei 	unsigned long		ac_minflt, ac_majflt;
4720e464814SKaiGai Kohei };
4730e464814SKaiGai Kohei 
47442c4ab41SStanislaw Gruszka struct cpu_itimer {
47542c4ab41SStanislaw Gruszka 	cputime_t expires;
47642c4ab41SStanislaw Gruszka 	cputime_t incr;
4778356b5f9SStanislaw Gruszka 	u32 error;
4788356b5f9SStanislaw Gruszka 	u32 incr_error;
47942c4ab41SStanislaw Gruszka };
48042c4ab41SStanislaw Gruszka 
481f06febc9SFrank Mayhar /**
482d37f761dSFrederic Weisbecker  * struct cputime - snaphsot of system and user cputime
483d37f761dSFrederic Weisbecker  * @utime: time spent in user mode
484d37f761dSFrederic Weisbecker  * @stime: time spent in system mode
485d37f761dSFrederic Weisbecker  *
486d37f761dSFrederic Weisbecker  * Gathers a generic snapshot of user and system time.
487d37f761dSFrederic Weisbecker  */
488d37f761dSFrederic Weisbecker struct cputime {
489d37f761dSFrederic Weisbecker 	cputime_t utime;
490d37f761dSFrederic Weisbecker 	cputime_t stime;
491d37f761dSFrederic Weisbecker };
492d37f761dSFrederic Weisbecker 
493d37f761dSFrederic Weisbecker /**
494f06febc9SFrank Mayhar  * struct task_cputime - collected CPU time counts
495f06febc9SFrank Mayhar  * @utime:		time spent in user mode, in &cputime_t units
496f06febc9SFrank Mayhar  * @stime:		time spent in kernel mode, in &cputime_t units
497f06febc9SFrank Mayhar  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
498f06febc9SFrank Mayhar  *
499d37f761dSFrederic Weisbecker  * This is an extension of struct cputime that includes the total runtime
500d37f761dSFrederic Weisbecker  * spent by the task from the scheduler point of view.
501d37f761dSFrederic Weisbecker  *
502d37f761dSFrederic Weisbecker  * As a result, this structure groups together three kinds of CPU time
503d37f761dSFrederic Weisbecker  * that are tracked for threads and thread groups.  Most things considering
504f06febc9SFrank Mayhar  * CPU time want to group these counts together and treat all three
505f06febc9SFrank Mayhar  * of them in parallel.
506f06febc9SFrank Mayhar  */
507f06febc9SFrank Mayhar struct task_cputime {
508f06febc9SFrank Mayhar 	cputime_t utime;
509f06febc9SFrank Mayhar 	cputime_t stime;
510f06febc9SFrank Mayhar 	unsigned long long sum_exec_runtime;
511f06febc9SFrank Mayhar };
512f06febc9SFrank Mayhar /* Alternate field names when used to cache expirations. */
513f06febc9SFrank Mayhar #define prof_exp	stime
514f06febc9SFrank Mayhar #define virt_exp	utime
515f06febc9SFrank Mayhar #define sched_exp	sum_exec_runtime
516f06febc9SFrank Mayhar 
5174cd4c1b4SPeter Zijlstra #define INIT_CPUTIME	\
5184cd4c1b4SPeter Zijlstra 	(struct task_cputime) {					\
51964861634SMartin Schwidefsky 		.utime = 0,					\
52064861634SMartin Schwidefsky 		.stime = 0,					\
5214cd4c1b4SPeter Zijlstra 		.sum_exec_runtime = 0,				\
5224cd4c1b4SPeter Zijlstra 	}
5234cd4c1b4SPeter Zijlstra 
524a233f112SPeter Zijlstra #ifdef CONFIG_PREEMPT_COUNT
525a233f112SPeter Zijlstra #define PREEMPT_DISABLED	(1 + PREEMPT_ENABLED)
526a233f112SPeter Zijlstra #else
527a233f112SPeter Zijlstra #define PREEMPT_DISABLED	PREEMPT_ENABLED
528a233f112SPeter Zijlstra #endif
529a233f112SPeter Zijlstra 
530c99e6efeSPeter Zijlstra /*
531c99e6efeSPeter Zijlstra  * Disable preemption until the scheduler is running.
532c99e6efeSPeter Zijlstra  * Reset by start_kernel()->sched_init()->init_idle().
533d86ee480SPeter Zijlstra  *
534d86ee480SPeter Zijlstra  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
535d86ee480SPeter Zijlstra  * before the scheduler is active -- see should_resched().
536c99e6efeSPeter Zijlstra  */
537a233f112SPeter Zijlstra #define INIT_PREEMPT_COUNT	(PREEMPT_DISABLED + PREEMPT_ACTIVE)
538c99e6efeSPeter Zijlstra 
539f06febc9SFrank Mayhar /**
5404cd4c1b4SPeter Zijlstra  * struct thread_group_cputimer - thread group interval timer counts
5414cd4c1b4SPeter Zijlstra  * @cputime:		thread group interval timers.
5424cd4c1b4SPeter Zijlstra  * @running:		non-zero when there are timers running and
5434cd4c1b4SPeter Zijlstra  * 			@cputime receives updates.
5444cd4c1b4SPeter Zijlstra  * @lock:		lock for fields in this struct.
545f06febc9SFrank Mayhar  *
546f06febc9SFrank Mayhar  * This structure contains the version of task_cputime, above, that is
5474cd4c1b4SPeter Zijlstra  * used for thread group CPU timer calculations.
548f06febc9SFrank Mayhar  */
5494cd4c1b4SPeter Zijlstra struct thread_group_cputimer {
5504cd4c1b4SPeter Zijlstra 	struct task_cputime cputime;
5514cd4c1b4SPeter Zijlstra 	int running;
552ee30a7b2SThomas Gleixner 	raw_spinlock_t lock;
553f06febc9SFrank Mayhar };
554f06febc9SFrank Mayhar 
5554714d1d3SBen Blum #include <linux/rwsem.h>
5565091faa4SMike Galbraith struct autogroup;
5575091faa4SMike Galbraith 
5581da177e4SLinus Torvalds /*
559e815f0a8SJonathan Neuschäfer  * NOTE! "signal_struct" does not have its own
5601da177e4SLinus Torvalds  * locking, because a shared signal_struct always
5611da177e4SLinus Torvalds  * implies a shared sighand_struct, so locking
5621da177e4SLinus Torvalds  * sighand_struct is always a proper superset of
5631da177e4SLinus Torvalds  * the locking of signal_struct.
5641da177e4SLinus Torvalds  */
5651da177e4SLinus Torvalds struct signal_struct {
566ea6d290cSOleg Nesterov 	atomic_t		sigcnt;
5671da177e4SLinus Torvalds 	atomic_t		live;
568b3ac022cSOleg Nesterov 	int			nr_threads;
5690c740d0aSOleg Nesterov 	struct list_head	thread_head;
5701da177e4SLinus Torvalds 
5711da177e4SLinus Torvalds 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
5721da177e4SLinus Torvalds 
5731da177e4SLinus Torvalds 	/* current thread group signal load-balancing target: */
57436c8b586SIngo Molnar 	struct task_struct	*curr_target;
5751da177e4SLinus Torvalds 
5761da177e4SLinus Torvalds 	/* shared signal handling: */
5771da177e4SLinus Torvalds 	struct sigpending	shared_pending;
5781da177e4SLinus Torvalds 
5791da177e4SLinus Torvalds 	/* thread group exit support */
5801da177e4SLinus Torvalds 	int			group_exit_code;
5811da177e4SLinus Torvalds 	/* overloaded:
5821da177e4SLinus Torvalds 	 * - notify group_exit_task when ->count is equal to notify_count
5831da177e4SLinus Torvalds 	 * - everyone except group_exit_task is stopped during signal delivery
5841da177e4SLinus Torvalds 	 *   of fatal signals, group_exit_task processes the signal.
5851da177e4SLinus Torvalds 	 */
5861da177e4SLinus Torvalds 	int			notify_count;
58707dd20e0SRichard Kennedy 	struct task_struct	*group_exit_task;
5881da177e4SLinus Torvalds 
5891da177e4SLinus Torvalds 	/* thread group stop support, overloads group_exit_code too */
5901da177e4SLinus Torvalds 	int			group_stop_count;
5911da177e4SLinus Torvalds 	unsigned int		flags; /* see SIGNAL_* flags below */
5921da177e4SLinus Torvalds 
593ebec18a6SLennart Poettering 	/*
594ebec18a6SLennart Poettering 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
595ebec18a6SLennart Poettering 	 * manager, to re-parent orphan (double-forking) child processes
596ebec18a6SLennart Poettering 	 * to this process instead of 'init'. The service manager is
597ebec18a6SLennart Poettering 	 * able to receive SIGCHLD signals and is able to investigate
598ebec18a6SLennart Poettering 	 * the process until it calls wait(). All children of this
599ebec18a6SLennart Poettering 	 * process will inherit a flag if they should look for a
600ebec18a6SLennart Poettering 	 * child_subreaper process at exit.
601ebec18a6SLennart Poettering 	 */
602ebec18a6SLennart Poettering 	unsigned int		is_child_subreaper:1;
603ebec18a6SLennart Poettering 	unsigned int		has_child_subreaper:1;
604ebec18a6SLennart Poettering 
6051da177e4SLinus Torvalds 	/* POSIX.1b Interval Timers */
6065ed67f05SPavel Emelyanov 	int			posix_timer_id;
6071da177e4SLinus Torvalds 	struct list_head	posix_timers;
6081da177e4SLinus Torvalds 
6091da177e4SLinus Torvalds 	/* ITIMER_REAL timer for the process */
6102ff678b8SThomas Gleixner 	struct hrtimer real_timer;
611fea9d175SOleg Nesterov 	struct pid *leader_pid;
6122ff678b8SThomas Gleixner 	ktime_t it_real_incr;
6131da177e4SLinus Torvalds 
61442c4ab41SStanislaw Gruszka 	/*
61542c4ab41SStanislaw Gruszka 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
61642c4ab41SStanislaw Gruszka 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
61742c4ab41SStanislaw Gruszka 	 * values are defined to 0 and 1 respectively
61842c4ab41SStanislaw Gruszka 	 */
61942c4ab41SStanislaw Gruszka 	struct cpu_itimer it[2];
6201da177e4SLinus Torvalds 
621f06febc9SFrank Mayhar 	/*
6224cd4c1b4SPeter Zijlstra 	 * Thread group totals for process CPU timers.
6234cd4c1b4SPeter Zijlstra 	 * See thread_group_cputimer(), et al, for details.
624f06febc9SFrank Mayhar 	 */
6254cd4c1b4SPeter Zijlstra 	struct thread_group_cputimer cputimer;
626f06febc9SFrank Mayhar 
627f06febc9SFrank Mayhar 	/* Earliest-expiration cache. */
628f06febc9SFrank Mayhar 	struct task_cputime cputime_expires;
629f06febc9SFrank Mayhar 
630f06febc9SFrank Mayhar 	struct list_head cpu_timers[3];
631f06febc9SFrank Mayhar 
632ab521dc0SEric W. Biederman 	struct pid *tty_old_pgrp;
6331ec320afSCedric Le Goater 
6341da177e4SLinus Torvalds 	/* boolean value for session group leader */
6351da177e4SLinus Torvalds 	int leader;
6361da177e4SLinus Torvalds 
6371da177e4SLinus Torvalds 	struct tty_struct *tty; /* NULL if no tty */
6381da177e4SLinus Torvalds 
6395091faa4SMike Galbraith #ifdef CONFIG_SCHED_AUTOGROUP
6405091faa4SMike Galbraith 	struct autogroup *autogroup;
6415091faa4SMike Galbraith #endif
6421da177e4SLinus Torvalds 	/*
6431da177e4SLinus Torvalds 	 * Cumulative resource counters for dead threads in the group,
6441da177e4SLinus Torvalds 	 * and for reaped dead child processes forked by this group.
6451da177e4SLinus Torvalds 	 * Live threads maintain their own counters and add to these
6461da177e4SLinus Torvalds 	 * in __exit_signal, except for the group leader.
6471da177e4SLinus Torvalds 	 */
64832bd671dSPeter Zijlstra 	cputime_t utime, stime, cutime, cstime;
6499ac52315SLaurent Vivier 	cputime_t gtime;
6509ac52315SLaurent Vivier 	cputime_t cgtime;
6519fbc42eaSFrederic Weisbecker #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
652d37f761dSFrederic Weisbecker 	struct cputime prev_cputime;
6530cf55e1eSHidetoshi Seto #endif
6541da177e4SLinus Torvalds 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
6551da177e4SLinus Torvalds 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
6566eaeeabaSEric Dumazet 	unsigned long inblock, oublock, cinblock, coublock;
6571f10206cSJiri Pirko 	unsigned long maxrss, cmaxrss;
658940389b8SAndrea Righi 	struct task_io_accounting ioac;
6591da177e4SLinus Torvalds 
6601da177e4SLinus Torvalds 	/*
66132bd671dSPeter Zijlstra 	 * Cumulative ns of schedule CPU time fo dead threads in the
66232bd671dSPeter Zijlstra 	 * group, not including a zombie group leader, (This only differs
66332bd671dSPeter Zijlstra 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
66432bd671dSPeter Zijlstra 	 * other than jiffies.)
66532bd671dSPeter Zijlstra 	 */
66632bd671dSPeter Zijlstra 	unsigned long long sum_sched_runtime;
66732bd671dSPeter Zijlstra 
66832bd671dSPeter Zijlstra 	/*
6691da177e4SLinus Torvalds 	 * We don't bother to synchronize most readers of this at all,
6701da177e4SLinus Torvalds 	 * because there is no reader checking a limit that actually needs
6711da177e4SLinus Torvalds 	 * to get both rlim_cur and rlim_max atomically, and either one
6721da177e4SLinus Torvalds 	 * alone is a single word that can safely be read normally.
6731da177e4SLinus Torvalds 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
6741da177e4SLinus Torvalds 	 * protect this instead of the siglock, because they really
6751da177e4SLinus Torvalds 	 * have no need to disable irqs.
6761da177e4SLinus Torvalds 	 */
6771da177e4SLinus Torvalds 	struct rlimit rlim[RLIM_NLIMITS];
6781da177e4SLinus Torvalds 
6790e464814SKaiGai Kohei #ifdef CONFIG_BSD_PROCESS_ACCT
6800e464814SKaiGai Kohei 	struct pacct_struct pacct;	/* per-process accounting information */
6810e464814SKaiGai Kohei #endif
682ad4ecbcbSShailabh Nagar #ifdef CONFIG_TASKSTATS
683ad4ecbcbSShailabh Nagar 	struct taskstats *stats;
684ad4ecbcbSShailabh Nagar #endif
685522ed776SMiloslav Trmac #ifdef CONFIG_AUDIT
686522ed776SMiloslav Trmac 	unsigned audit_tty;
68746e959eaSRichard Guy Briggs 	unsigned audit_tty_log_passwd;
688522ed776SMiloslav Trmac 	struct tty_audit_buf *tty_audit_buf;
689522ed776SMiloslav Trmac #endif
6904714d1d3SBen Blum #ifdef CONFIG_CGROUPS
6914714d1d3SBen Blum 	/*
69277e4ef99STejun Heo 	 * group_rwsem prevents new tasks from entering the threadgroup and
69377e4ef99STejun Heo 	 * member tasks from exiting,a more specifically, setting of
69477e4ef99STejun Heo 	 * PF_EXITING.  fork and exit paths are protected with this rwsem
69577e4ef99STejun Heo 	 * using threadgroup_change_begin/end().  Users which require
69677e4ef99STejun Heo 	 * threadgroup to remain stable should use threadgroup_[un]lock()
69777e4ef99STejun Heo 	 * which also takes care of exec path.  Currently, cgroup is the
69877e4ef99STejun Heo 	 * only user.
6994714d1d3SBen Blum 	 */
700257058aeSTejun Heo 	struct rw_semaphore group_rwsem;
7014714d1d3SBen Blum #endif
70228b83c51SKOSAKI Motohiro 
703e1e12d2fSDavid Rientjes 	oom_flags_t oom_flags;
704a9c58b90SDavid Rientjes 	short oom_score_adj;		/* OOM kill score adjustment */
705a9c58b90SDavid Rientjes 	short oom_score_adj_min;	/* OOM kill score adjustment min value.
706dabb16f6SMandeep Singh Baines 					 * Only settable by CAP_SYS_RESOURCE. */
7079b1bf12dSKOSAKI Motohiro 
7089b1bf12dSKOSAKI Motohiro 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
7099b1bf12dSKOSAKI Motohiro 					 * credential calculations
7109b1bf12dSKOSAKI Motohiro 					 * (notably. ptrace) */
7111da177e4SLinus Torvalds };
7121da177e4SLinus Torvalds 
7131da177e4SLinus Torvalds /*
7141da177e4SLinus Torvalds  * Bits in flags field of signal_struct.
7151da177e4SLinus Torvalds  */
7161da177e4SLinus Torvalds #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
717ee77f075SOleg Nesterov #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
718ee77f075SOleg Nesterov #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
719403bad72SOleg Nesterov #define SIGNAL_GROUP_COREDUMP	0x00000008 /* coredump in progress */
720e4420551SOleg Nesterov /*
721e4420551SOleg Nesterov  * Pending notifications to parent.
722e4420551SOleg Nesterov  */
723e4420551SOleg Nesterov #define SIGNAL_CLD_STOPPED	0x00000010
724e4420551SOleg Nesterov #define SIGNAL_CLD_CONTINUED	0x00000020
725e4420551SOleg Nesterov #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
7261da177e4SLinus Torvalds 
727fae5fa44SOleg Nesterov #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
728fae5fa44SOleg Nesterov 
729ed5d2cacSOleg Nesterov /* If true, all threads except ->group_exit_task have pending SIGKILL */
730ed5d2cacSOleg Nesterov static inline int signal_group_exit(const struct signal_struct *sig)
731ed5d2cacSOleg Nesterov {
732ed5d2cacSOleg Nesterov 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
733ed5d2cacSOleg Nesterov 		(sig->group_exit_task != NULL);
734ed5d2cacSOleg Nesterov }
735ed5d2cacSOleg Nesterov 
7361da177e4SLinus Torvalds /*
7371da177e4SLinus Torvalds  * Some day this will be a full-fledged user tracking system..
7381da177e4SLinus Torvalds  */
7391da177e4SLinus Torvalds struct user_struct {
7401da177e4SLinus Torvalds 	atomic_t __count;	/* reference count */
7411da177e4SLinus Torvalds 	atomic_t processes;	/* How many processes does this user have? */
7421da177e4SLinus Torvalds 	atomic_t sigpending;	/* How many pending signals does this user have? */
7432d9048e2SAmy Griffis #ifdef CONFIG_INOTIFY_USER
7440eeca283SRobert Love 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
7450eeca283SRobert Love 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
7460eeca283SRobert Love #endif
7474afeff85SEric Paris #ifdef CONFIG_FANOTIFY
7484afeff85SEric Paris 	atomic_t fanotify_listeners;
7494afeff85SEric Paris #endif
7507ef9964eSDavide Libenzi #ifdef CONFIG_EPOLL
75152bd19f7SRobin Holt 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
7527ef9964eSDavide Libenzi #endif
753970a8645SAlexey Dobriyan #ifdef CONFIG_POSIX_MQUEUE
7541da177e4SLinus Torvalds 	/* protected by mq_lock	*/
7551da177e4SLinus Torvalds 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
756970a8645SAlexey Dobriyan #endif
7571da177e4SLinus Torvalds 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
7581da177e4SLinus Torvalds 
7591da177e4SLinus Torvalds #ifdef CONFIG_KEYS
7601da177e4SLinus Torvalds 	struct key *uid_keyring;	/* UID specific keyring */
7611da177e4SLinus Torvalds 	struct key *session_keyring;	/* UID's default session keyring */
7621da177e4SLinus Torvalds #endif
7631da177e4SLinus Torvalds 
7641da177e4SLinus Torvalds 	/* Hash table maintenance information */
765735de223SPavel Emelyanov 	struct hlist_node uidhash_node;
7667b44ab97SEric W. Biederman 	kuid_t uid;
76724e377a8SSrivatsa Vaddagiri 
768cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
769789f90fcSPeter Zijlstra 	atomic_long_t locked_vm;
770789f90fcSPeter Zijlstra #endif
7711da177e4SLinus Torvalds };
7721da177e4SLinus Torvalds 
773eb41d946SKay Sievers extern int uids_sysfs_init(void);
7745cb350baSDhaval Giani 
7757b44ab97SEric W. Biederman extern struct user_struct *find_user(kuid_t);
7761da177e4SLinus Torvalds 
7771da177e4SLinus Torvalds extern struct user_struct root_user;
7781da177e4SLinus Torvalds #define INIT_USER (&root_user)
7791da177e4SLinus Torvalds 
780b6dff3ecSDavid Howells 
7811da177e4SLinus Torvalds struct backing_dev_info;
7821da177e4SLinus Torvalds struct reclaim_state;
7831da177e4SLinus Torvalds 
78452f17b6cSChandra Seetharaman #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
7851da177e4SLinus Torvalds struct sched_info {
7861da177e4SLinus Torvalds 	/* cumulative counters */
7872d72376bSIngo Molnar 	unsigned long pcount;	      /* # of times run on this cpu */
7889c2c4802SKen Chen 	unsigned long long run_delay; /* time spent waiting on a runqueue */
7891da177e4SLinus Torvalds 
7901da177e4SLinus Torvalds 	/* timestamps */
791172ba844SBalbir Singh 	unsigned long long last_arrival,/* when we last ran on a cpu */
7921da177e4SLinus Torvalds 			   last_queued;	/* when we were last queued to run */
7931da177e4SLinus Torvalds };
79452f17b6cSChandra Seetharaman #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
7951da177e4SLinus Torvalds 
796ca74e92bSShailabh Nagar #ifdef CONFIG_TASK_DELAY_ACCT
797ca74e92bSShailabh Nagar struct task_delay_info {
798ca74e92bSShailabh Nagar 	spinlock_t	lock;
799ca74e92bSShailabh Nagar 	unsigned int	flags;	/* Private per-task flags */
800ca74e92bSShailabh Nagar 
801ca74e92bSShailabh Nagar 	/* For each stat XXX, add following, aligned appropriately
802ca74e92bSShailabh Nagar 	 *
803ca74e92bSShailabh Nagar 	 * struct timespec XXX_start, XXX_end;
804ca74e92bSShailabh Nagar 	 * u64 XXX_delay;
805ca74e92bSShailabh Nagar 	 * u32 XXX_count;
806ca74e92bSShailabh Nagar 	 *
807ca74e92bSShailabh Nagar 	 * Atomicity of updates to XXX_delay, XXX_count protected by
808ca74e92bSShailabh Nagar 	 * single lock above (split into XXX_lock if contention is an issue).
809ca74e92bSShailabh Nagar 	 */
8100ff92245SShailabh Nagar 
8110ff92245SShailabh Nagar 	/*
8120ff92245SShailabh Nagar 	 * XXX_count is incremented on every XXX operation, the delay
8130ff92245SShailabh Nagar 	 * associated with the operation is added to XXX_delay.
8140ff92245SShailabh Nagar 	 * XXX_delay contains the accumulated delay time in nanoseconds.
8150ff92245SShailabh Nagar 	 */
8169667a23dSThomas Gleixner 	u64 blkio_start;	/* Shared by blkio, swapin */
8170ff92245SShailabh Nagar 	u64 blkio_delay;	/* wait for sync block io completion */
8180ff92245SShailabh Nagar 	u64 swapin_delay;	/* wait for swapin block io completion */
8190ff92245SShailabh Nagar 	u32 blkio_count;	/* total count of the number of sync block */
8200ff92245SShailabh Nagar 				/* io operations performed */
8210ff92245SShailabh Nagar 	u32 swapin_count;	/* total count of the number of swapin block */
8220ff92245SShailabh Nagar 				/* io operations performed */
823873b4771SKeika Kobayashi 
8249667a23dSThomas Gleixner 	u64 freepages_start;
825873b4771SKeika Kobayashi 	u64 freepages_delay;	/* wait for memory reclaim */
826873b4771SKeika Kobayashi 	u32 freepages_count;	/* total count of memory reclaim */
827ca74e92bSShailabh Nagar };
82852f17b6cSChandra Seetharaman #endif	/* CONFIG_TASK_DELAY_ACCT */
82952f17b6cSChandra Seetharaman 
83052f17b6cSChandra Seetharaman static inline int sched_info_on(void)
83152f17b6cSChandra Seetharaman {
83252f17b6cSChandra Seetharaman #ifdef CONFIG_SCHEDSTATS
83352f17b6cSChandra Seetharaman 	return 1;
83452f17b6cSChandra Seetharaman #elif defined(CONFIG_TASK_DELAY_ACCT)
83552f17b6cSChandra Seetharaman 	extern int delayacct_on;
83652f17b6cSChandra Seetharaman 	return delayacct_on;
83752f17b6cSChandra Seetharaman #else
83852f17b6cSChandra Seetharaman 	return 0;
839ca74e92bSShailabh Nagar #endif
84052f17b6cSChandra Seetharaman }
841ca74e92bSShailabh Nagar 
842d15bcfdbSIngo Molnar enum cpu_idle_type {
843d15bcfdbSIngo Molnar 	CPU_IDLE,
844d15bcfdbSIngo Molnar 	CPU_NOT_IDLE,
845d15bcfdbSIngo Molnar 	CPU_NEWLY_IDLE,
846d15bcfdbSIngo Molnar 	CPU_MAX_IDLE_TYPES
8471da177e4SLinus Torvalds };
8481da177e4SLinus Torvalds 
8491da177e4SLinus Torvalds /*
850ca8ce3d0SNicolas Pitre  * Increase resolution of cpu_capacity calculations
8511399fa78SNikhil Rao  */
852ca8ce3d0SNicolas Pitre #define SCHED_CAPACITY_SHIFT	10
853ca8ce3d0SNicolas Pitre #define SCHED_CAPACITY_SCALE	(1L << SCHED_CAPACITY_SHIFT)
8541da177e4SLinus Torvalds 
8551399fa78SNikhil Rao /*
8561399fa78SNikhil Rao  * sched-domains (multiprocessor balancing) declarations:
8571399fa78SNikhil Rao  */
8582dd73a4fSPeter Williams #ifdef CONFIG_SMP
859b5d978e0SPeter Zijlstra #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
860b5d978e0SPeter Zijlstra #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
861b5d978e0SPeter Zijlstra #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
862b5d978e0SPeter Zijlstra #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
863c88d5910SPeter Zijlstra #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
864b5d978e0SPeter Zijlstra #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
8655d4dfdddSNicolas Pitre #define SD_SHARE_CPUCAPACITY	0x0080	/* Domain members share cpu power */
866d77b3ed5SVincent Guittot #define SD_SHARE_POWERDOMAIN	0x0100	/* Domain members share power domain */
867b5d978e0SPeter Zijlstra #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
868b5d978e0SPeter Zijlstra #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
869532cb4c4SMichael Neuling #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
870b5d978e0SPeter Zijlstra #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
871e3589f6cSPeter Zijlstra #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
8723a7053b3SMel Gorman #define SD_NUMA			0x4000	/* cross-node balancing */
8735c45bf27SSiddha, Suresh B 
874143e1e28SVincent Guittot #ifdef CONFIG_SCHED_SMT
875b6220ad6SGuenter Roeck static inline int cpu_smt_flags(void)
876143e1e28SVincent Guittot {
8775d4dfdddSNicolas Pitre 	return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
878143e1e28SVincent Guittot }
879143e1e28SVincent Guittot #endif
880143e1e28SVincent Guittot 
881143e1e28SVincent Guittot #ifdef CONFIG_SCHED_MC
882b6220ad6SGuenter Roeck static inline int cpu_core_flags(void)
883143e1e28SVincent Guittot {
884143e1e28SVincent Guittot 	return SD_SHARE_PKG_RESOURCES;
885143e1e28SVincent Guittot }
886143e1e28SVincent Guittot #endif
887143e1e28SVincent Guittot 
888143e1e28SVincent Guittot #ifdef CONFIG_NUMA
889b6220ad6SGuenter Roeck static inline int cpu_numa_flags(void)
890143e1e28SVincent Guittot {
891143e1e28SVincent Guittot 	return SD_NUMA;
892143e1e28SVincent Guittot }
893143e1e28SVincent Guittot #endif
894532cb4c4SMichael Neuling 
8951d3504fcSHidetoshi Seto struct sched_domain_attr {
8961d3504fcSHidetoshi Seto 	int relax_domain_level;
8971d3504fcSHidetoshi Seto };
8981d3504fcSHidetoshi Seto 
8991d3504fcSHidetoshi Seto #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
9001d3504fcSHidetoshi Seto 	.relax_domain_level = -1,			\
9011d3504fcSHidetoshi Seto }
9021d3504fcSHidetoshi Seto 
90360495e77SPeter Zijlstra extern int sched_domain_level_max;
90460495e77SPeter Zijlstra 
9055e6521eaSLi Zefan struct sched_group;
9065e6521eaSLi Zefan 
9071da177e4SLinus Torvalds struct sched_domain {
9081da177e4SLinus Torvalds 	/* These fields must be setup */
9091da177e4SLinus Torvalds 	struct sched_domain *parent;	/* top domain must be null terminated */
9101a848870SSiddha, Suresh B 	struct sched_domain *child;	/* bottom domain must be null terminated */
9111da177e4SLinus Torvalds 	struct sched_group *groups;	/* the balancing groups of the domain */
9121da177e4SLinus Torvalds 	unsigned long min_interval;	/* Minimum balance interval ms */
9131da177e4SLinus Torvalds 	unsigned long max_interval;	/* Maximum balance interval ms */
9141da177e4SLinus Torvalds 	unsigned int busy_factor;	/* less balancing by factor if busy */
9151da177e4SLinus Torvalds 	unsigned int imbalance_pct;	/* No balance until over watermark */
9161da177e4SLinus Torvalds 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
9177897986bSNick Piggin 	unsigned int busy_idx;
9187897986bSNick Piggin 	unsigned int idle_idx;
9197897986bSNick Piggin 	unsigned int newidle_idx;
9207897986bSNick Piggin 	unsigned int wake_idx;
921147cbb4bSNick Piggin 	unsigned int forkexec_idx;
922a52bfd73SPeter Zijlstra 	unsigned int smt_gain;
92325f55d9dSVincent Guittot 
92425f55d9dSVincent Guittot 	int nohz_idle;			/* NOHZ IDLE status */
9251da177e4SLinus Torvalds 	int flags;			/* See SD_* */
92660495e77SPeter Zijlstra 	int level;
9271da177e4SLinus Torvalds 
9281da177e4SLinus Torvalds 	/* Runtime fields. */
9291da177e4SLinus Torvalds 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
9301da177e4SLinus Torvalds 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
9311da177e4SLinus Torvalds 	unsigned int nr_balance_failed; /* initialise to 0 */
9321da177e4SLinus Torvalds 
933f48627e6SJason Low 	/* idle_balance() stats */
9349bd721c5SJason Low 	u64 max_newidle_lb_cost;
935f48627e6SJason Low 	unsigned long next_decay_max_lb_cost;
9362398f2c6SPeter Zijlstra 
9371da177e4SLinus Torvalds #ifdef CONFIG_SCHEDSTATS
9381da177e4SLinus Torvalds 	/* load_balance() stats */
939480b9434SKen Chen 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
940480b9434SKen Chen 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
941480b9434SKen Chen 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
942480b9434SKen Chen 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
943480b9434SKen Chen 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
944480b9434SKen Chen 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
945480b9434SKen Chen 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
946480b9434SKen Chen 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
9471da177e4SLinus Torvalds 
9481da177e4SLinus Torvalds 	/* Active load balancing */
949480b9434SKen Chen 	unsigned int alb_count;
950480b9434SKen Chen 	unsigned int alb_failed;
951480b9434SKen Chen 	unsigned int alb_pushed;
9521da177e4SLinus Torvalds 
95368767a0aSNick Piggin 	/* SD_BALANCE_EXEC stats */
954480b9434SKen Chen 	unsigned int sbe_count;
955480b9434SKen Chen 	unsigned int sbe_balanced;
956480b9434SKen Chen 	unsigned int sbe_pushed;
9571da177e4SLinus Torvalds 
95868767a0aSNick Piggin 	/* SD_BALANCE_FORK stats */
959480b9434SKen Chen 	unsigned int sbf_count;
960480b9434SKen Chen 	unsigned int sbf_balanced;
961480b9434SKen Chen 	unsigned int sbf_pushed;
96268767a0aSNick Piggin 
9631da177e4SLinus Torvalds 	/* try_to_wake_up() stats */
964480b9434SKen Chen 	unsigned int ttwu_wake_remote;
965480b9434SKen Chen 	unsigned int ttwu_move_affine;
966480b9434SKen Chen 	unsigned int ttwu_move_balance;
9671da177e4SLinus Torvalds #endif
968a5d8c348SIngo Molnar #ifdef CONFIG_SCHED_DEBUG
969a5d8c348SIngo Molnar 	char *name;
970a5d8c348SIngo Molnar #endif
971dce840a0SPeter Zijlstra 	union {
972dce840a0SPeter Zijlstra 		void *private;		/* used during construction */
973dce840a0SPeter Zijlstra 		struct rcu_head rcu;	/* used during destruction */
974dce840a0SPeter Zijlstra 	};
9756c99e9adSRusty Russell 
976669c55e9SPeter Zijlstra 	unsigned int span_weight;
9774200efd9SIngo Molnar 	/*
9784200efd9SIngo Molnar 	 * Span of all CPUs in this domain.
9794200efd9SIngo Molnar 	 *
9804200efd9SIngo Molnar 	 * NOTE: this field is variable length. (Allocated dynamically
9814200efd9SIngo Molnar 	 * by attaching extra space to the end of the structure,
9824200efd9SIngo Molnar 	 * depending on how many CPUs the kernel has booted up with)
9834200efd9SIngo Molnar 	 */
9844200efd9SIngo Molnar 	unsigned long span[0];
9851da177e4SLinus Torvalds };
9861da177e4SLinus Torvalds 
987758b2cdcSRusty Russell static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
988758b2cdcSRusty Russell {
9896c99e9adSRusty Russell 	return to_cpumask(sd->span);
990758b2cdcSRusty Russell }
991758b2cdcSRusty Russell 
992acc3f5d7SRusty Russell extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
9931d3504fcSHidetoshi Seto 				    struct sched_domain_attr *dattr_new);
994029190c5SPaul Jackson 
995acc3f5d7SRusty Russell /* Allocate an array of sched domains, for partition_sched_domains(). */
996acc3f5d7SRusty Russell cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
997acc3f5d7SRusty Russell void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
998acc3f5d7SRusty Russell 
99939be3501SPeter Zijlstra bool cpus_share_cache(int this_cpu, int that_cpu);
100039be3501SPeter Zijlstra 
1001143e1e28SVincent Guittot typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1002b6220ad6SGuenter Roeck typedef int (*sched_domain_flags_f)(void);
1003143e1e28SVincent Guittot 
1004143e1e28SVincent Guittot #define SDTL_OVERLAP	0x01
1005143e1e28SVincent Guittot 
1006143e1e28SVincent Guittot struct sd_data {
1007143e1e28SVincent Guittot 	struct sched_domain **__percpu sd;
1008143e1e28SVincent Guittot 	struct sched_group **__percpu sg;
100963b2ca30SNicolas Pitre 	struct sched_group_capacity **__percpu sgc;
1010143e1e28SVincent Guittot };
1011143e1e28SVincent Guittot 
1012143e1e28SVincent Guittot struct sched_domain_topology_level {
1013143e1e28SVincent Guittot 	sched_domain_mask_f mask;
1014143e1e28SVincent Guittot 	sched_domain_flags_f sd_flags;
1015143e1e28SVincent Guittot 	int		    flags;
1016143e1e28SVincent Guittot 	int		    numa_level;
1017143e1e28SVincent Guittot 	struct sd_data      data;
1018143e1e28SVincent Guittot #ifdef CONFIG_SCHED_DEBUG
1019143e1e28SVincent Guittot 	char                *name;
1020143e1e28SVincent Guittot #endif
1021143e1e28SVincent Guittot };
1022143e1e28SVincent Guittot 
1023143e1e28SVincent Guittot extern struct sched_domain_topology_level *sched_domain_topology;
1024143e1e28SVincent Guittot 
1025143e1e28SVincent Guittot extern void set_sched_topology(struct sched_domain_topology_level *tl);
1026143e1e28SVincent Guittot 
1027143e1e28SVincent Guittot #ifdef CONFIG_SCHED_DEBUG
1028143e1e28SVincent Guittot # define SD_INIT_NAME(type)		.name = #type
1029143e1e28SVincent Guittot #else
1030143e1e28SVincent Guittot # define SD_INIT_NAME(type)
1031143e1e28SVincent Guittot #endif
1032143e1e28SVincent Guittot 
10331b427c15SIngo Molnar #else /* CONFIG_SMP */
10341da177e4SLinus Torvalds 
10351b427c15SIngo Molnar struct sched_domain_attr;
10361b427c15SIngo Molnar 
10371b427c15SIngo Molnar static inline void
1038acc3f5d7SRusty Russell partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
10391b427c15SIngo Molnar 			struct sched_domain_attr *dattr_new)
1040d02c7a8cSCon Kolivas {
1041d02c7a8cSCon Kolivas }
104239be3501SPeter Zijlstra 
104339be3501SPeter Zijlstra static inline bool cpus_share_cache(int this_cpu, int that_cpu)
104439be3501SPeter Zijlstra {
104539be3501SPeter Zijlstra 	return true;
104639be3501SPeter Zijlstra }
104739be3501SPeter Zijlstra 
10481b427c15SIngo Molnar #endif	/* !CONFIG_SMP */
10491da177e4SLinus Torvalds 
105047fe38fcSPeter Zijlstra 
10511da177e4SLinus Torvalds struct io_context;			/* See blkdev.h */
10521da177e4SLinus Torvalds 
10531da177e4SLinus Torvalds 
1054383f2835SChen, Kenneth W #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
105536c8b586SIngo Molnar extern void prefetch_stack(struct task_struct *t);
1056383f2835SChen, Kenneth W #else
1057383f2835SChen, Kenneth W static inline void prefetch_stack(struct task_struct *t) { }
1058383f2835SChen, Kenneth W #endif
10591da177e4SLinus Torvalds 
10601da177e4SLinus Torvalds struct audit_context;		/* See audit.c */
10611da177e4SLinus Torvalds struct mempolicy;
1062b92ce558SJens Axboe struct pipe_inode_info;
10634865ecf1SSerge E. Hallyn struct uts_namespace;
10641da177e4SLinus Torvalds 
106520b8a59fSIngo Molnar struct load_weight {
10669dbdb155SPeter Zijlstra 	unsigned long weight;
10679dbdb155SPeter Zijlstra 	u32 inv_weight;
106820b8a59fSIngo Molnar };
106920b8a59fSIngo Molnar 
10709d85f21cSPaul Turner struct sched_avg {
10719d85f21cSPaul Turner 	/*
10729d85f21cSPaul Turner 	 * These sums represent an infinite geometric series and so are bound
1073239003eaSKamalesh Babulal 	 * above by 1024/(1-y).  Thus we only need a u32 to store them for all
10749d85f21cSPaul Turner 	 * choices of y < 1-2^(-32)*1024.
10759d85f21cSPaul Turner 	 */
10769d85f21cSPaul Turner 	u32 runnable_avg_sum, runnable_avg_period;
10779d85f21cSPaul Turner 	u64 last_runnable_update;
10789ee474f5SPaul Turner 	s64 decay_count;
10792dac754eSPaul Turner 	unsigned long load_avg_contrib;
10809d85f21cSPaul Turner };
10819d85f21cSPaul Turner 
108294c18227SIngo Molnar #ifdef CONFIG_SCHEDSTATS
108341acab88SLucas De Marchi struct sched_statistics {
108494c18227SIngo Molnar 	u64			wait_start;
108594c18227SIngo Molnar 	u64			wait_max;
10866d082592SArjan van de Ven 	u64			wait_count;
10876d082592SArjan van de Ven 	u64			wait_sum;
10888f0dfc34SArjan van de Ven 	u64			iowait_count;
10898f0dfc34SArjan van de Ven 	u64			iowait_sum;
109094c18227SIngo Molnar 
109194c18227SIngo Molnar 	u64			sleep_start;
109220b8a59fSIngo Molnar 	u64			sleep_max;
109394c18227SIngo Molnar 	s64			sum_sleep_runtime;
109494c18227SIngo Molnar 
109594c18227SIngo Molnar 	u64			block_start;
109620b8a59fSIngo Molnar 	u64			block_max;
109720b8a59fSIngo Molnar 	u64			exec_max;
1098eba1ed4bSIngo Molnar 	u64			slice_max;
1099cc367732SIngo Molnar 
1100cc367732SIngo Molnar 	u64			nr_migrations_cold;
1101cc367732SIngo Molnar 	u64			nr_failed_migrations_affine;
1102cc367732SIngo Molnar 	u64			nr_failed_migrations_running;
1103cc367732SIngo Molnar 	u64			nr_failed_migrations_hot;
1104cc367732SIngo Molnar 	u64			nr_forced_migrations;
1105cc367732SIngo Molnar 
1106cc367732SIngo Molnar 	u64			nr_wakeups;
1107cc367732SIngo Molnar 	u64			nr_wakeups_sync;
1108cc367732SIngo Molnar 	u64			nr_wakeups_migrate;
1109cc367732SIngo Molnar 	u64			nr_wakeups_local;
1110cc367732SIngo Molnar 	u64			nr_wakeups_remote;
1111cc367732SIngo Molnar 	u64			nr_wakeups_affine;
1112cc367732SIngo Molnar 	u64			nr_wakeups_affine_attempts;
1113cc367732SIngo Molnar 	u64			nr_wakeups_passive;
1114cc367732SIngo Molnar 	u64			nr_wakeups_idle;
111541acab88SLucas De Marchi };
111641acab88SLucas De Marchi #endif
111741acab88SLucas De Marchi 
111841acab88SLucas De Marchi struct sched_entity {
111941acab88SLucas De Marchi 	struct load_weight	load;		/* for load-balancing */
112041acab88SLucas De Marchi 	struct rb_node		run_node;
112141acab88SLucas De Marchi 	struct list_head	group_node;
112241acab88SLucas De Marchi 	unsigned int		on_rq;
112341acab88SLucas De Marchi 
112441acab88SLucas De Marchi 	u64			exec_start;
112541acab88SLucas De Marchi 	u64			sum_exec_runtime;
112641acab88SLucas De Marchi 	u64			vruntime;
112741acab88SLucas De Marchi 	u64			prev_sum_exec_runtime;
112841acab88SLucas De Marchi 
112941acab88SLucas De Marchi 	u64			nr_migrations;
113041acab88SLucas De Marchi 
113141acab88SLucas De Marchi #ifdef CONFIG_SCHEDSTATS
113241acab88SLucas De Marchi 	struct sched_statistics statistics;
113394c18227SIngo Molnar #endif
113494c18227SIngo Molnar 
113520b8a59fSIngo Molnar #ifdef CONFIG_FAIR_GROUP_SCHED
1136fed14d45SPeter Zijlstra 	int			depth;
113720b8a59fSIngo Molnar 	struct sched_entity	*parent;
113820b8a59fSIngo Molnar 	/* rq on which this entity is (to be) queued: */
113920b8a59fSIngo Molnar 	struct cfs_rq		*cfs_rq;
114020b8a59fSIngo Molnar 	/* rq "owned" by this entity/group: */
114120b8a59fSIngo Molnar 	struct cfs_rq		*my_q;
114220b8a59fSIngo Molnar #endif
11438bd75c77SClark Williams 
1144141965c7SAlex Shi #ifdef CONFIG_SMP
1145f4e26b12SPaul Turner 	/* Per-entity load-tracking */
11469d85f21cSPaul Turner 	struct sched_avg	avg;
11479d85f21cSPaul Turner #endif
114820b8a59fSIngo Molnar };
114970b97a7fSIngo Molnar 
1150fa717060SPeter Zijlstra struct sched_rt_entity {
1151fa717060SPeter Zijlstra 	struct list_head run_list;
115278f2c7dbSPeter Zijlstra 	unsigned long timeout;
115357d2aa00SYing Xue 	unsigned long watchdog_stamp;
1154bee367edSRichard Kennedy 	unsigned int time_slice;
11556f505b16SPeter Zijlstra 
115658d6c2d7SPeter Zijlstra 	struct sched_rt_entity *back;
1157052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
11586f505b16SPeter Zijlstra 	struct sched_rt_entity	*parent;
11596f505b16SPeter Zijlstra 	/* rq on which this entity is (to be) queued: */
11606f505b16SPeter Zijlstra 	struct rt_rq		*rt_rq;
11616f505b16SPeter Zijlstra 	/* rq "owned" by this entity/group: */
11626f505b16SPeter Zijlstra 	struct rt_rq		*my_q;
11636f505b16SPeter Zijlstra #endif
1164fa717060SPeter Zijlstra };
1165fa717060SPeter Zijlstra 
1166aab03e05SDario Faggioli struct sched_dl_entity {
1167aab03e05SDario Faggioli 	struct rb_node	rb_node;
1168aab03e05SDario Faggioli 
1169aab03e05SDario Faggioli 	/*
1170aab03e05SDario Faggioli 	 * Original scheduling parameters. Copied here from sched_attr
11714027d080Sxiaofeng.yan 	 * during sched_setattr(), they will remain the same until
11724027d080Sxiaofeng.yan 	 * the next sched_setattr().
1173aab03e05SDario Faggioli 	 */
1174aab03e05SDario Faggioli 	u64 dl_runtime;		/* maximum runtime for each instance	*/
1175aab03e05SDario Faggioli 	u64 dl_deadline;	/* relative deadline of each instance	*/
1176755378a4SHarald Gustafsson 	u64 dl_period;		/* separation of two instances (period) */
1177332ac17eSDario Faggioli 	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
1178aab03e05SDario Faggioli 
1179aab03e05SDario Faggioli 	/*
1180aab03e05SDario Faggioli 	 * Actual scheduling parameters. Initialized with the values above,
1181aab03e05SDario Faggioli 	 * they are continously updated during task execution. Note that
1182aab03e05SDario Faggioli 	 * the remaining runtime could be < 0 in case we are in overrun.
1183aab03e05SDario Faggioli 	 */
1184aab03e05SDario Faggioli 	s64 runtime;		/* remaining runtime for this instance	*/
1185aab03e05SDario Faggioli 	u64 deadline;		/* absolute deadline for this instance	*/
1186aab03e05SDario Faggioli 	unsigned int flags;	/* specifying the scheduler behaviour	*/
1187aab03e05SDario Faggioli 
1188aab03e05SDario Faggioli 	/*
1189aab03e05SDario Faggioli 	 * Some bool flags:
1190aab03e05SDario Faggioli 	 *
1191aab03e05SDario Faggioli 	 * @dl_throttled tells if we exhausted the runtime. If so, the
1192aab03e05SDario Faggioli 	 * task has to wait for a replenishment to be performed at the
1193aab03e05SDario Faggioli 	 * next firing of dl_timer.
1194aab03e05SDario Faggioli 	 *
1195aab03e05SDario Faggioli 	 * @dl_new tells if a new instance arrived. If so we must
1196aab03e05SDario Faggioli 	 * start executing it with full runtime and reset its absolute
1197aab03e05SDario Faggioli 	 * deadline;
11982d3d891dSDario Faggioli 	 *
11992d3d891dSDario Faggioli 	 * @dl_boosted tells if we are boosted due to DI. If so we are
12002d3d891dSDario Faggioli 	 * outside bandwidth enforcement mechanism (but only until we
12015bfd126eSJuri Lelli 	 * exit the critical section);
12025bfd126eSJuri Lelli 	 *
12035bfd126eSJuri Lelli 	 * @dl_yielded tells if task gave up the cpu before consuming
12045bfd126eSJuri Lelli 	 * all its available runtime during the last job.
1205aab03e05SDario Faggioli 	 */
12065bfd126eSJuri Lelli 	int dl_throttled, dl_new, dl_boosted, dl_yielded;
1207aab03e05SDario Faggioli 
1208aab03e05SDario Faggioli 	/*
1209aab03e05SDario Faggioli 	 * Bandwidth enforcement timer. Each -deadline task has its
1210aab03e05SDario Faggioli 	 * own bandwidth to be enforced, thus we need one timer per task.
1211aab03e05SDario Faggioli 	 */
1212aab03e05SDario Faggioli 	struct hrtimer dl_timer;
1213aab03e05SDario Faggioli };
12148bd75c77SClark Williams 
1215*1d082fd0SPaul E. McKenney union rcu_special {
1216*1d082fd0SPaul E. McKenney 	struct {
1217*1d082fd0SPaul E. McKenney 		bool blocked;
1218*1d082fd0SPaul E. McKenney 		bool need_qs;
1219*1d082fd0SPaul E. McKenney 	} b;
1220*1d082fd0SPaul E. McKenney 	short s;
1221*1d082fd0SPaul E. McKenney };
122286848966SPaul E. McKenney struct rcu_node;
122386848966SPaul E. McKenney 
12248dc85d54SPeter Zijlstra enum perf_event_task_context {
12258dc85d54SPeter Zijlstra 	perf_invalid_context = -1,
12268dc85d54SPeter Zijlstra 	perf_hw_context = 0,
122789a1e187SPeter Zijlstra 	perf_sw_context,
12288dc85d54SPeter Zijlstra 	perf_nr_task_contexts,
12298dc85d54SPeter Zijlstra };
12308dc85d54SPeter Zijlstra 
12311da177e4SLinus Torvalds struct task_struct {
12321da177e4SLinus Torvalds 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1233f7e4217bSRoman Zippel 	void *stack;
12341da177e4SLinus Torvalds 	atomic_t usage;
123597dc32cdSWilliam Cohen 	unsigned int flags;	/* per process flags, defined below */
123697dc32cdSWilliam Cohen 	unsigned int ptrace;
12371da177e4SLinus Torvalds 
12382dd73a4fSPeter Williams #ifdef CONFIG_SMP
1239fa14ff4aSPeter Zijlstra 	struct llist_node wake_entry;
12403ca7a440SPeter Zijlstra 	int on_cpu;
124162470419SMichael Wang 	struct task_struct *last_wakee;
124262470419SMichael Wang 	unsigned long wakee_flips;
124362470419SMichael Wang 	unsigned long wakee_flip_decay_ts;
1244ac66f547SPeter Zijlstra 
1245ac66f547SPeter Zijlstra 	int wake_cpu;
12464866cde0SNick Piggin #endif
1247fd2f4419SPeter Zijlstra 	int on_rq;
124850e645a8SIngo Molnar 
1249b29739f9SIngo Molnar 	int prio, static_prio, normal_prio;
1250c7aceabaSRichard Kennedy 	unsigned int rt_priority;
12515522d5d5SIngo Molnar 	const struct sched_class *sched_class;
125220b8a59fSIngo Molnar 	struct sched_entity se;
1253fa717060SPeter Zijlstra 	struct sched_rt_entity rt;
12548323f26cSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
12558323f26cSPeter Zijlstra 	struct task_group *sched_task_group;
12568323f26cSPeter Zijlstra #endif
1257aab03e05SDario Faggioli 	struct sched_dl_entity dl;
12581da177e4SLinus Torvalds 
1259e107be36SAvi Kivity #ifdef CONFIG_PREEMPT_NOTIFIERS
1260e107be36SAvi Kivity 	/* list of struct preempt_notifier: */
1261e107be36SAvi Kivity 	struct hlist_head preempt_notifiers;
1262e107be36SAvi Kivity #endif
1263e107be36SAvi Kivity 
12646c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE
12652056a782SJens Axboe 	unsigned int btrace_seq;
12666c5c9341SAlexey Dobriyan #endif
12671da177e4SLinus Torvalds 
126897dc32cdSWilliam Cohen 	unsigned int policy;
126929baa747SPeter Zijlstra 	int nr_cpus_allowed;
12701da177e4SLinus Torvalds 	cpumask_t cpus_allowed;
12711da177e4SLinus Torvalds 
1272a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
1273e260be67SPaul E. McKenney 	int rcu_read_lock_nesting;
1274*1d082fd0SPaul E. McKenney 	union rcu_special rcu_read_unlock_special;
1275f41d911fSPaul E. McKenney 	struct list_head rcu_node_entry;
1276a57eb940SPaul E. McKenney #endif /* #ifdef CONFIG_PREEMPT_RCU */
1277a57eb940SPaul E. McKenney #ifdef CONFIG_TREE_PREEMPT_RCU
1278a57eb940SPaul E. McKenney 	struct rcu_node *rcu_blocked_node;
1279f41d911fSPaul E. McKenney #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
12808315f422SPaul E. McKenney #ifdef CONFIG_TASKS_RCU
12818315f422SPaul E. McKenney 	unsigned long rcu_tasks_nvcsw;
12828315f422SPaul E. McKenney 	bool rcu_tasks_holdout;
12838315f422SPaul E. McKenney 	struct list_head rcu_tasks_holdout_list;
1284176f8f7aSPaul E. McKenney 	int rcu_tasks_idle_cpu;
12858315f422SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_RCU */
1286e260be67SPaul E. McKenney 
128752f17b6cSChandra Seetharaman #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
12881da177e4SLinus Torvalds 	struct sched_info sched_info;
12891da177e4SLinus Torvalds #endif
12901da177e4SLinus Torvalds 
12911da177e4SLinus Torvalds 	struct list_head tasks;
1292806c09a7SDario Faggioli #ifdef CONFIG_SMP
1293917b627dSGregory Haskins 	struct plist_node pushable_tasks;
12941baca4ceSJuri Lelli 	struct rb_node pushable_dl_tasks;
1295806c09a7SDario Faggioli #endif
12961da177e4SLinus Torvalds 
12971da177e4SLinus Torvalds 	struct mm_struct *mm, *active_mm;
12984471a675SJiri Kosina #ifdef CONFIG_COMPAT_BRK
12994471a675SJiri Kosina 	unsigned brk_randomized:1;
13004471a675SJiri Kosina #endif
1301615d6e87SDavidlohr Bueso 	/* per-thread vma caching */
1302615d6e87SDavidlohr Bueso 	u32 vmacache_seqnum;
1303615d6e87SDavidlohr Bueso 	struct vm_area_struct *vmacache[VMACACHE_SIZE];
130434e55232SKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING)
130534e55232SKAMEZAWA Hiroyuki 	struct task_rss_stat	rss_stat;
130634e55232SKAMEZAWA Hiroyuki #endif
13071da177e4SLinus Torvalds /* task state */
130897dc32cdSWilliam Cohen 	int exit_state;
13091da177e4SLinus Torvalds 	int exit_code, exit_signal;
13101da177e4SLinus Torvalds 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1311a8f072c1STejun Heo 	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
13129b89f6baSAndrei Epure 
13139b89f6baSAndrei Epure 	/* Used for emulating ABI behavior of previous Linux versions */
131497dc32cdSWilliam Cohen 	unsigned int personality;
13159b89f6baSAndrei Epure 
1316f9ce1f1cSKentaro Takeda 	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1317f9ce1f1cSKentaro Takeda 				 * execve */
13188f0dfc34SArjan van de Ven 	unsigned in_iowait:1;
13198f0dfc34SArjan van de Ven 
1320ca94c442SLennart Poettering 	/* Revert to default priority/policy when forking */
1321ca94c442SLennart Poettering 	unsigned sched_reset_on_fork:1;
1322a8e4f2eaSPeter Zijlstra 	unsigned sched_contributes_to_load:1;
1323ca94c442SLennart Poettering 
13241d4457f9SKees Cook 	unsigned long atomic_flags; /* Flags needing atomic access. */
13251d4457f9SKees Cook 
13261da177e4SLinus Torvalds 	pid_t pid;
13271da177e4SLinus Torvalds 	pid_t tgid;
13280a425405SArjan van de Ven 
13291314562aSHiroshi Shimamoto #ifdef CONFIG_CC_STACKPROTECTOR
13300a425405SArjan van de Ven 	/* Canary value for the -fstack-protector gcc feature */
13310a425405SArjan van de Ven 	unsigned long stack_canary;
13321314562aSHiroshi Shimamoto #endif
13331da177e4SLinus Torvalds 	/*
13341da177e4SLinus Torvalds 	 * pointers to (original) parent process, youngest child, younger sibling,
13351da177e4SLinus Torvalds 	 * older sibling, respectively.  (p->father can be replaced with
1336f470021aSRoland McGrath 	 * p->real_parent->pid)
13371da177e4SLinus Torvalds 	 */
1338abd63bc3SKees Cook 	struct task_struct __rcu *real_parent; /* real parent process */
1339abd63bc3SKees Cook 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
13401da177e4SLinus Torvalds 	/*
1341f470021aSRoland McGrath 	 * children/sibling forms the list of my natural children
13421da177e4SLinus Torvalds 	 */
13431da177e4SLinus Torvalds 	struct list_head children;	/* list of my children */
13441da177e4SLinus Torvalds 	struct list_head sibling;	/* linkage in my parent's children list */
13451da177e4SLinus Torvalds 	struct task_struct *group_leader;	/* threadgroup leader */
13461da177e4SLinus Torvalds 
1347f470021aSRoland McGrath 	/*
1348f470021aSRoland McGrath 	 * ptraced is the list of tasks this task is using ptrace on.
1349f470021aSRoland McGrath 	 * This includes both natural children and PTRACE_ATTACH targets.
1350f470021aSRoland McGrath 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1351f470021aSRoland McGrath 	 */
1352f470021aSRoland McGrath 	struct list_head ptraced;
1353f470021aSRoland McGrath 	struct list_head ptrace_entry;
1354f470021aSRoland McGrath 
13551da177e4SLinus Torvalds 	/* PID/PID hash table linkage. */
135692476d7fSEric W. Biederman 	struct pid_link pids[PIDTYPE_MAX];
135747e65328SOleg Nesterov 	struct list_head thread_group;
13580c740d0aSOleg Nesterov 	struct list_head thread_node;
13591da177e4SLinus Torvalds 
13601da177e4SLinus Torvalds 	struct completion *vfork_done;		/* for vfork() */
13611da177e4SLinus Torvalds 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
13621da177e4SLinus Torvalds 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
13631da177e4SLinus Torvalds 
1364c66f08beSMichael Neuling 	cputime_t utime, stime, utimescaled, stimescaled;
13659ac52315SLaurent Vivier 	cputime_t gtime;
13669fbc42eaSFrederic Weisbecker #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1367d37f761dSFrederic Weisbecker 	struct cputime prev_cputime;
1368d99ca3b9SHidetoshi Seto #endif
13696a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
13706a61671bSFrederic Weisbecker 	seqlock_t vtime_seqlock;
13716a61671bSFrederic Weisbecker 	unsigned long long vtime_snap;
13726a61671bSFrederic Weisbecker 	enum {
13736a61671bSFrederic Weisbecker 		VTIME_SLEEPING = 0,
13746a61671bSFrederic Weisbecker 		VTIME_USER,
13756a61671bSFrederic Weisbecker 		VTIME_SYS,
13766a61671bSFrederic Weisbecker 	} vtime_snap_whence;
13776a61671bSFrederic Weisbecker #endif
13781da177e4SLinus Torvalds 	unsigned long nvcsw, nivcsw; /* context switch counts */
1379ccbf62d8SThomas Gleixner 	u64 start_time;		/* monotonic time in nsec */
138057e0be04SThomas Gleixner 	u64 real_start_time;	/* boot based time in nsec */
13811da177e4SLinus Torvalds /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
13821da177e4SLinus Torvalds 	unsigned long min_flt, maj_flt;
13831da177e4SLinus Torvalds 
1384f06febc9SFrank Mayhar 	struct task_cputime cputime_expires;
13851da177e4SLinus Torvalds 	struct list_head cpu_timers[3];
13861da177e4SLinus Torvalds 
13871da177e4SLinus Torvalds /* process credentials */
13881b0ba1c9SArnd Bergmann 	const struct cred __rcu *real_cred; /* objective and real subjective task
13893b11a1deSDavid Howells 					 * credentials (COW) */
13901b0ba1c9SArnd Bergmann 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
13913b11a1deSDavid Howells 					 * credentials (COW) */
139236772092SPaolo 'Blaisorblade' Giarrusso 	char comm[TASK_COMM_LEN]; /* executable name excluding path
139336772092SPaolo 'Blaisorblade' Giarrusso 				     - access with [gs]et_task_comm (which lock
139436772092SPaolo 'Blaisorblade' Giarrusso 				       it with task_lock())
1395221af7f8SLinus Torvalds 				     - initialized normally by setup_new_exec */
13961da177e4SLinus Torvalds /* file system info */
13971da177e4SLinus Torvalds 	int link_count, total_link_count;
13983d5b6fccSAlexey Dobriyan #ifdef CONFIG_SYSVIPC
13991da177e4SLinus Torvalds /* ipc stuff */
14001da177e4SLinus Torvalds 	struct sysv_sem sysvsem;
1401ab602f79SJack Miller 	struct sysv_shm sysvshm;
14023d5b6fccSAlexey Dobriyan #endif
1403e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK
140482a1fcb9SIngo Molnar /* hung task detection */
140582a1fcb9SIngo Molnar 	unsigned long last_switch_count;
140682a1fcb9SIngo Molnar #endif
14071da177e4SLinus Torvalds /* CPU-specific state of this task */
14081da177e4SLinus Torvalds 	struct thread_struct thread;
14091da177e4SLinus Torvalds /* filesystem information */
14101da177e4SLinus Torvalds 	struct fs_struct *fs;
14111da177e4SLinus Torvalds /* open file information */
14121da177e4SLinus Torvalds 	struct files_struct *files;
14131651e14eSSerge E. Hallyn /* namespaces */
1414ab516013SSerge E. Hallyn 	struct nsproxy *nsproxy;
14151da177e4SLinus Torvalds /* signal handlers */
14161da177e4SLinus Torvalds 	struct signal_struct *signal;
14171da177e4SLinus Torvalds 	struct sighand_struct *sighand;
14181da177e4SLinus Torvalds 
14191da177e4SLinus Torvalds 	sigset_t blocked, real_blocked;
1420f3de272bSRoland McGrath 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
14211da177e4SLinus Torvalds 	struct sigpending pending;
14221da177e4SLinus Torvalds 
14231da177e4SLinus Torvalds 	unsigned long sas_ss_sp;
14241da177e4SLinus Torvalds 	size_t sas_ss_size;
14251da177e4SLinus Torvalds 	int (*notifier)(void *priv);
14261da177e4SLinus Torvalds 	void *notifier_data;
14271da177e4SLinus Torvalds 	sigset_t *notifier_mask;
142867d12145SAl Viro 	struct callback_head *task_works;
1429e73f8959SOleg Nesterov 
14301da177e4SLinus Torvalds 	struct audit_context *audit_context;
1431bfef93a5SAl Viro #ifdef CONFIG_AUDITSYSCALL
1432e1760bd5SEric W. Biederman 	kuid_t loginuid;
14334746ec5bSEric Paris 	unsigned int sessionid;
1434bfef93a5SAl Viro #endif
1435932ecebbSWill Drewry 	struct seccomp seccomp;
14361da177e4SLinus Torvalds 
14371da177e4SLinus Torvalds /* Thread group tracking */
14381da177e4SLinus Torvalds    	u32 parent_exec_id;
14391da177e4SLinus Torvalds    	u32 self_exec_id;
144058568d2aSMiao Xie /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
144158568d2aSMiao Xie  * mempolicy */
14421da177e4SLinus Torvalds 	spinlock_t alloc_lock;
14431da177e4SLinus Torvalds 
1444b29739f9SIngo Molnar 	/* Protection of the PI data structures: */
14451d615482SThomas Gleixner 	raw_spinlock_t pi_lock;
1446b29739f9SIngo Molnar 
144723f78d4aSIngo Molnar #ifdef CONFIG_RT_MUTEXES
144823f78d4aSIngo Molnar 	/* PI waiters blocked on a rt_mutex held by this task */
1449fb00aca4SPeter Zijlstra 	struct rb_root pi_waiters;
1450fb00aca4SPeter Zijlstra 	struct rb_node *pi_waiters_leftmost;
145123f78d4aSIngo Molnar 	/* Deadlock detection and priority inheritance handling */
145223f78d4aSIngo Molnar 	struct rt_mutex_waiter *pi_blocked_on;
145323f78d4aSIngo Molnar #endif
145423f78d4aSIngo Molnar 
1455408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES
1456408894eeSIngo Molnar 	/* mutex deadlock detection */
1457408894eeSIngo Molnar 	struct mutex_waiter *blocked_on;
1458408894eeSIngo Molnar #endif
1459de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS
1460de30a2b3SIngo Molnar 	unsigned int irq_events;
1461de30a2b3SIngo Molnar 	unsigned long hardirq_enable_ip;
1462de30a2b3SIngo Molnar 	unsigned long hardirq_disable_ip;
1463fa1452e8SHiroshi Shimamoto 	unsigned int hardirq_enable_event;
1464de30a2b3SIngo Molnar 	unsigned int hardirq_disable_event;
1465fa1452e8SHiroshi Shimamoto 	int hardirqs_enabled;
1466de30a2b3SIngo Molnar 	int hardirq_context;
1467fa1452e8SHiroshi Shimamoto 	unsigned long softirq_disable_ip;
1468fa1452e8SHiroshi Shimamoto 	unsigned long softirq_enable_ip;
1469fa1452e8SHiroshi Shimamoto 	unsigned int softirq_disable_event;
1470fa1452e8SHiroshi Shimamoto 	unsigned int softirq_enable_event;
1471fa1452e8SHiroshi Shimamoto 	int softirqs_enabled;
1472de30a2b3SIngo Molnar 	int softirq_context;
1473de30a2b3SIngo Molnar #endif
1474fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP
1475bdb9441eSPeter Zijlstra # define MAX_LOCK_DEPTH 48UL
1476fbb9ce95SIngo Molnar 	u64 curr_chain_key;
1477fbb9ce95SIngo Molnar 	int lockdep_depth;
1478fbb9ce95SIngo Molnar 	unsigned int lockdep_recursion;
1479c7aceabaSRichard Kennedy 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1480cf40bd16SNick Piggin 	gfp_t lockdep_reclaim_gfp;
1481fbb9ce95SIngo Molnar #endif
1482408894eeSIngo Molnar 
14831da177e4SLinus Torvalds /* journalling filesystem info */
14841da177e4SLinus Torvalds 	void *journal_info;
14851da177e4SLinus Torvalds 
1486d89d8796SNeil Brown /* stacked block device info */
1487bddd87c7SAkinobu Mita 	struct bio_list *bio_list;
1488d89d8796SNeil Brown 
148973c10101SJens Axboe #ifdef CONFIG_BLOCK
149073c10101SJens Axboe /* stack plugging */
149173c10101SJens Axboe 	struct blk_plug *plug;
149273c10101SJens Axboe #endif
149373c10101SJens Axboe 
14941da177e4SLinus Torvalds /* VM state */
14951da177e4SLinus Torvalds 	struct reclaim_state *reclaim_state;
14961da177e4SLinus Torvalds 
14971da177e4SLinus Torvalds 	struct backing_dev_info *backing_dev_info;
14981da177e4SLinus Torvalds 
14991da177e4SLinus Torvalds 	struct io_context *io_context;
15001da177e4SLinus Torvalds 
15011da177e4SLinus Torvalds 	unsigned long ptrace_message;
15021da177e4SLinus Torvalds 	siginfo_t *last_siginfo; /* For ptrace use.  */
15037c3ab738SAndrew Morton 	struct task_io_accounting ioac;
15048f0ab514SJay Lan #if defined(CONFIG_TASK_XACCT)
15051da177e4SLinus Torvalds 	u64 acct_rss_mem1;	/* accumulated rss usage */
15061da177e4SLinus Torvalds 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
150749b5cf34SJonathan Lim 	cputime_t acct_timexpd;	/* stime + utime since last update */
15081da177e4SLinus Torvalds #endif
15091da177e4SLinus Torvalds #ifdef CONFIG_CPUSETS
151058568d2aSMiao Xie 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1511cc9a6c87SMel Gorman 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1512825a46afSPaul Jackson 	int cpuset_mem_spread_rotor;
15136adef3ebSJack Steiner 	int cpuset_slab_spread_rotor;
15141da177e4SLinus Torvalds #endif
1515ddbcc7e8SPaul Menage #ifdef CONFIG_CGROUPS
1516817929ecSPaul Menage 	/* Control Group info protected by css_set_lock */
15172c392b8cSArnd Bergmann 	struct css_set __rcu *cgroups;
1518817929ecSPaul Menage 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1519817929ecSPaul Menage 	struct list_head cg_list;
1520ddbcc7e8SPaul Menage #endif
152142b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX
15220771dfefSIngo Molnar 	struct robust_list_head __user *robust_list;
152334f192c6SIngo Molnar #ifdef CONFIG_COMPAT
152434f192c6SIngo Molnar 	struct compat_robust_list_head __user *compat_robust_list;
152534f192c6SIngo Molnar #endif
1526c87e2837SIngo Molnar 	struct list_head pi_state_list;
1527c87e2837SIngo Molnar 	struct futex_pi_state *pi_state_cache;
152842b2dd0aSAlexey Dobriyan #endif
1529cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
15308dc85d54SPeter Zijlstra 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1531cdd6c482SIngo Molnar 	struct mutex perf_event_mutex;
1532cdd6c482SIngo Molnar 	struct list_head perf_event_list;
1533a63eaf34SPaul Mackerras #endif
15348f47b187SThomas Gleixner #ifdef CONFIG_DEBUG_PREEMPT
15358f47b187SThomas Gleixner 	unsigned long preempt_disable_ip;
15368f47b187SThomas Gleixner #endif
1537c7aceabaSRichard Kennedy #ifdef CONFIG_NUMA
153858568d2aSMiao Xie 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1539c7aceabaSRichard Kennedy 	short il_next;
1540207205a2SEric Dumazet 	short pref_node_fork;
1541c7aceabaSRichard Kennedy #endif
1542cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
1543cbee9f88SPeter Zijlstra 	int numa_scan_seq;
1544cbee9f88SPeter Zijlstra 	unsigned int numa_scan_period;
1545598f0ec0SMel Gorman 	unsigned int numa_scan_period_max;
1546de1c9ce6SRik van Riel 	int numa_preferred_nid;
15476b9a7460SMel Gorman 	unsigned long numa_migrate_retry;
1548cbee9f88SPeter Zijlstra 	u64 node_stamp;			/* migration stamp  */
15497e2703e6SRik van Riel 	u64 last_task_numa_placement;
15507e2703e6SRik van Riel 	u64 last_sum_exec_runtime;
1551cbee9f88SPeter Zijlstra 	struct callback_head numa_work;
1552f809ca9aSMel Gorman 
15538c8a743cSPeter Zijlstra 	struct list_head numa_entry;
15548c8a743cSPeter Zijlstra 	struct numa_group *numa_group;
15558c8a743cSPeter Zijlstra 
1556745d6147SMel Gorman 	/*
1557745d6147SMel Gorman 	 * Exponential decaying average of faults on a per-node basis.
1558745d6147SMel Gorman 	 * Scheduling placement decisions are made based on the these counts.
1559745d6147SMel Gorman 	 * The values remain static for the duration of a PTE scan
1560745d6147SMel Gorman 	 */
1561ff1df896SRik van Riel 	unsigned long *numa_faults_memory;
156283e1d2cdSMel Gorman 	unsigned long total_numa_faults;
1563745d6147SMel Gorman 
1564745d6147SMel Gorman 	/*
1565745d6147SMel Gorman 	 * numa_faults_buffer records faults per node during the current
1566ff1df896SRik van Riel 	 * scan window. When the scan completes, the counts in
1567ff1df896SRik van Riel 	 * numa_faults_memory decay and these values are copied.
1568745d6147SMel Gorman 	 */
1569ff1df896SRik van Riel 	unsigned long *numa_faults_buffer_memory;
1570745d6147SMel Gorman 
157104bb2f94SRik van Riel 	/*
157250ec8a40SRik van Riel 	 * Track the nodes the process was running on when a NUMA hinting
157350ec8a40SRik van Riel 	 * fault was incurred.
157450ec8a40SRik van Riel 	 */
157550ec8a40SRik van Riel 	unsigned long *numa_faults_cpu;
157650ec8a40SRik van Riel 	unsigned long *numa_faults_buffer_cpu;
157750ec8a40SRik van Riel 
157850ec8a40SRik van Riel 	/*
157904bb2f94SRik van Riel 	 * numa_faults_locality tracks if faults recorded during the last
158004bb2f94SRik van Riel 	 * scan window were remote/local. The task scan period is adapted
158104bb2f94SRik van Riel 	 * based on the locality of the faults with different weights
158204bb2f94SRik van Riel 	 * depending on whether they were shared or private faults
158304bb2f94SRik van Riel 	 */
158404bb2f94SRik van Riel 	unsigned long numa_faults_locality[2];
158504bb2f94SRik van Riel 
1586b32e86b4SIngo Molnar 	unsigned long numa_pages_migrated;
1587cbee9f88SPeter Zijlstra #endif /* CONFIG_NUMA_BALANCING */
1588cbee9f88SPeter Zijlstra 
1589e56d0903SIngo Molnar 	struct rcu_head rcu;
1590b92ce558SJens Axboe 
1591b92ce558SJens Axboe 	/*
1592b92ce558SJens Axboe 	 * cache last used pipe for splice
1593b92ce558SJens Axboe 	 */
1594b92ce558SJens Axboe 	struct pipe_inode_info *splice_pipe;
15955640f768SEric Dumazet 
15965640f768SEric Dumazet 	struct page_frag task_frag;
15975640f768SEric Dumazet 
1598ca74e92bSShailabh Nagar #ifdef	CONFIG_TASK_DELAY_ACCT
1599ca74e92bSShailabh Nagar 	struct task_delay_info *delays;
1600ca74e92bSShailabh Nagar #endif
1601f4f154fdSAkinobu Mita #ifdef CONFIG_FAULT_INJECTION
1602f4f154fdSAkinobu Mita 	int make_it_fail;
1603f4f154fdSAkinobu Mita #endif
16049d823e8fSWu Fengguang 	/*
16059d823e8fSWu Fengguang 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
16069d823e8fSWu Fengguang 	 * balance_dirty_pages() for some dirty throttling pause
16079d823e8fSWu Fengguang 	 */
16089d823e8fSWu Fengguang 	int nr_dirtied;
16099d823e8fSWu Fengguang 	int nr_dirtied_pause;
161083712358SWu Fengguang 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
16119d823e8fSWu Fengguang 
16129745512cSArjan van de Ven #ifdef CONFIG_LATENCYTOP
16139745512cSArjan van de Ven 	int latency_record_count;
16149745512cSArjan van de Ven 	struct latency_record latency_record[LT_SAVECOUNT];
16159745512cSArjan van de Ven #endif
16166976675dSArjan van de Ven 	/*
16176976675dSArjan van de Ven 	 * time slack values; these are used to round up poll() and
16186976675dSArjan van de Ven 	 * select() etc timeout values. These are in nanoseconds.
16196976675dSArjan van de Ven 	 */
16206976675dSArjan van de Ven 	unsigned long timer_slack_ns;
16216976675dSArjan van de Ven 	unsigned long default_timer_slack_ns;
1622f8d570a4SDavid Miller 
1623fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
16243ad2f3fbSDaniel Mack 	/* Index of current stored address in ret_stack */
1625f201ae23SFrederic Weisbecker 	int curr_ret_stack;
1626f201ae23SFrederic Weisbecker 	/* Stack of return addresses for return function tracing */
1627f201ae23SFrederic Weisbecker 	struct ftrace_ret_stack	*ret_stack;
16288aef2d28SSteven Rostedt 	/* time stamp for last schedule */
16298aef2d28SSteven Rostedt 	unsigned long long ftrace_timestamp;
1630f201ae23SFrederic Weisbecker 	/*
1631f201ae23SFrederic Weisbecker 	 * Number of functions that haven't been traced
1632f201ae23SFrederic Weisbecker 	 * because of depth overrun.
1633f201ae23SFrederic Weisbecker 	 */
1634f201ae23SFrederic Weisbecker 	atomic_t trace_overrun;
1635380c4b14SFrederic Weisbecker 	/* Pause for the tracing */
1636380c4b14SFrederic Weisbecker 	atomic_t tracing_graph_pause;
1637f201ae23SFrederic Weisbecker #endif
1638ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING
1639ea4e2bc4SSteven Rostedt 	/* state flags for use by tracers */
1640ea4e2bc4SSteven Rostedt 	unsigned long trace;
1641b1cff0adSSteven Rostedt 	/* bitmask and counter of trace recursion */
1642261842b7SSteven Rostedt 	unsigned long trace_recursion;
1643261842b7SSteven Rostedt #endif /* CONFIG_TRACING */
1644c255a458SAndrew Morton #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
16450e9d92f2SGlauber Costa 	unsigned int memcg_kmem_skip_account;
1646519e5247SJohannes Weiner 	struct memcg_oom_info {
164749426420SJohannes Weiner 		struct mem_cgroup *memcg;
164849426420SJohannes Weiner 		gfp_t gfp_mask;
164949426420SJohannes Weiner 		int order;
1650519e5247SJohannes Weiner 		unsigned int may_oom:1;
1651519e5247SJohannes Weiner 	} memcg_oom;
1652569b846dSKAMEZAWA Hiroyuki #endif
16530326f5a9SSrikar Dronamraju #ifdef CONFIG_UPROBES
16540326f5a9SSrikar Dronamraju 	struct uprobe_task *utask;
16550326f5a9SSrikar Dronamraju #endif
1656cafe5635SKent Overstreet #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1657cafe5635SKent Overstreet 	unsigned int	sequential_io;
1658cafe5635SKent Overstreet 	unsigned int	sequential_io_avg;
1659cafe5635SKent Overstreet #endif
16601da177e4SLinus Torvalds };
16611da177e4SLinus Torvalds 
166276e6eee0SRusty Russell /* Future-safe accessor for struct task_struct's cpus_allowed. */
1663a4636818SRusty Russell #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
166476e6eee0SRusty Russell 
16656688cc05SPeter Zijlstra #define TNF_MIGRATED	0x01
16666688cc05SPeter Zijlstra #define TNF_NO_GROUP	0x02
1667dabe1d99SRik van Riel #define TNF_SHARED	0x04
166804bb2f94SRik van Riel #define TNF_FAULT_LOCAL	0x08
16696688cc05SPeter Zijlstra 
1670cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
16716688cc05SPeter Zijlstra extern void task_numa_fault(int last_node, int node, int pages, int flags);
1672e29cf08bSMel Gorman extern pid_t task_numa_group_id(struct task_struct *p);
16731a687c2eSMel Gorman extern void set_numabalancing_state(bool enabled);
167482727018SRik van Riel extern void task_numa_free(struct task_struct *p);
167510f39042SRik van Riel extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
167610f39042SRik van Riel 					int src_nid, int dst_cpu);
1677cbee9f88SPeter Zijlstra #else
1678ac8e895bSMel Gorman static inline void task_numa_fault(int last_node, int node, int pages,
16796688cc05SPeter Zijlstra 				   int flags)
1680cbee9f88SPeter Zijlstra {
1681cbee9f88SPeter Zijlstra }
1682e29cf08bSMel Gorman static inline pid_t task_numa_group_id(struct task_struct *p)
1683e29cf08bSMel Gorman {
1684e29cf08bSMel Gorman 	return 0;
1685e29cf08bSMel Gorman }
16861a687c2eSMel Gorman static inline void set_numabalancing_state(bool enabled)
16871a687c2eSMel Gorman {
16881a687c2eSMel Gorman }
168982727018SRik van Riel static inline void task_numa_free(struct task_struct *p)
169082727018SRik van Riel {
169182727018SRik van Riel }
169210f39042SRik van Riel static inline bool should_numa_migrate_memory(struct task_struct *p,
169310f39042SRik van Riel 				struct page *page, int src_nid, int dst_cpu)
169410f39042SRik van Riel {
169510f39042SRik van Riel 	return true;
169610f39042SRik van Riel }
1697cbee9f88SPeter Zijlstra #endif
1698cbee9f88SPeter Zijlstra 
1699e868171aSAlexey Dobriyan static inline struct pid *task_pid(struct task_struct *task)
170022c935f4SEric W. Biederman {
170122c935f4SEric W. Biederman 	return task->pids[PIDTYPE_PID].pid;
170222c935f4SEric W. Biederman }
170322c935f4SEric W. Biederman 
1704e868171aSAlexey Dobriyan static inline struct pid *task_tgid(struct task_struct *task)
170522c935f4SEric W. Biederman {
170622c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PID].pid;
170722c935f4SEric W. Biederman }
170822c935f4SEric W. Biederman 
17096dda81f4SOleg Nesterov /*
17106dda81f4SOleg Nesterov  * Without tasklist or rcu lock it is not safe to dereference
17116dda81f4SOleg Nesterov  * the result of task_pgrp/task_session even if task == current,
17126dda81f4SOleg Nesterov  * we can race with another thread doing sys_setsid/sys_setpgid.
17136dda81f4SOleg Nesterov  */
1714e868171aSAlexey Dobriyan static inline struct pid *task_pgrp(struct task_struct *task)
171522c935f4SEric W. Biederman {
171622c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PGID].pid;
171722c935f4SEric W. Biederman }
171822c935f4SEric W. Biederman 
1719e868171aSAlexey Dobriyan static inline struct pid *task_session(struct task_struct *task)
172022c935f4SEric W. Biederman {
172122c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_SID].pid;
172222c935f4SEric W. Biederman }
172322c935f4SEric W. Biederman 
17247af57294SPavel Emelyanov struct pid_namespace;
17257af57294SPavel Emelyanov 
17267af57294SPavel Emelyanov /*
17277af57294SPavel Emelyanov  * the helpers to get the task's different pids as they are seen
17287af57294SPavel Emelyanov  * from various namespaces
17297af57294SPavel Emelyanov  *
17307af57294SPavel Emelyanov  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
173144c4e1b2SEric W. Biederman  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
173244c4e1b2SEric W. Biederman  *                     current.
17337af57294SPavel Emelyanov  * task_xid_nr_ns()  : id seen from the ns specified;
17347af57294SPavel Emelyanov  *
17357af57294SPavel Emelyanov  * set_task_vxid()   : assigns a virtual id to a task;
17367af57294SPavel Emelyanov  *
17377af57294SPavel Emelyanov  * see also pid_nr() etc in include/linux/pid.h
17387af57294SPavel Emelyanov  */
173952ee2dfdSOleg Nesterov pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
174052ee2dfdSOleg Nesterov 			struct pid_namespace *ns);
17417af57294SPavel Emelyanov 
1742e868171aSAlexey Dobriyan static inline pid_t task_pid_nr(struct task_struct *tsk)
17437af57294SPavel Emelyanov {
17447af57294SPavel Emelyanov 	return tsk->pid;
17457af57294SPavel Emelyanov }
17467af57294SPavel Emelyanov 
174752ee2dfdSOleg Nesterov static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
174852ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
174952ee2dfdSOleg Nesterov {
175052ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
175152ee2dfdSOleg Nesterov }
17527af57294SPavel Emelyanov 
17537af57294SPavel Emelyanov static inline pid_t task_pid_vnr(struct task_struct *tsk)
17547af57294SPavel Emelyanov {
175552ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
17567af57294SPavel Emelyanov }
17577af57294SPavel Emelyanov 
17587af57294SPavel Emelyanov 
1759e868171aSAlexey Dobriyan static inline pid_t task_tgid_nr(struct task_struct *tsk)
17607af57294SPavel Emelyanov {
17617af57294SPavel Emelyanov 	return tsk->tgid;
17627af57294SPavel Emelyanov }
17637af57294SPavel Emelyanov 
17642f2a3a46SPavel Emelyanov pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
17657af57294SPavel Emelyanov 
17667af57294SPavel Emelyanov static inline pid_t task_tgid_vnr(struct task_struct *tsk)
17677af57294SPavel Emelyanov {
17687af57294SPavel Emelyanov 	return pid_vnr(task_tgid(tsk));
17697af57294SPavel Emelyanov }
17707af57294SPavel Emelyanov 
17717af57294SPavel Emelyanov 
177280e0b6e8SRichard Guy Briggs static inline int pid_alive(const struct task_struct *p);
1773ad36d282SRichard Guy Briggs static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1774ad36d282SRichard Guy Briggs {
1775ad36d282SRichard Guy Briggs 	pid_t pid = 0;
1776ad36d282SRichard Guy Briggs 
1777ad36d282SRichard Guy Briggs 	rcu_read_lock();
1778ad36d282SRichard Guy Briggs 	if (pid_alive(tsk))
1779ad36d282SRichard Guy Briggs 		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1780ad36d282SRichard Guy Briggs 	rcu_read_unlock();
1781ad36d282SRichard Guy Briggs 
1782ad36d282SRichard Guy Briggs 	return pid;
1783ad36d282SRichard Guy Briggs }
1784ad36d282SRichard Guy Briggs 
1785ad36d282SRichard Guy Briggs static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1786ad36d282SRichard Guy Briggs {
1787ad36d282SRichard Guy Briggs 	return task_ppid_nr_ns(tsk, &init_pid_ns);
1788ad36d282SRichard Guy Briggs }
1789ad36d282SRichard Guy Briggs 
179052ee2dfdSOleg Nesterov static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
179152ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
17927af57294SPavel Emelyanov {
179352ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
17947af57294SPavel Emelyanov }
17957af57294SPavel Emelyanov 
17967af57294SPavel Emelyanov static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
17977af57294SPavel Emelyanov {
179852ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
17997af57294SPavel Emelyanov }
18007af57294SPavel Emelyanov 
18017af57294SPavel Emelyanov 
180252ee2dfdSOleg Nesterov static inline pid_t task_session_nr_ns(struct task_struct *tsk,
180352ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
18047af57294SPavel Emelyanov {
180552ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
18067af57294SPavel Emelyanov }
18077af57294SPavel Emelyanov 
18087af57294SPavel Emelyanov static inline pid_t task_session_vnr(struct task_struct *tsk)
18097af57294SPavel Emelyanov {
181052ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
18117af57294SPavel Emelyanov }
18127af57294SPavel Emelyanov 
18131b0f7ffdSOleg Nesterov /* obsolete, do not use */
18141b0f7ffdSOleg Nesterov static inline pid_t task_pgrp_nr(struct task_struct *tsk)
18151b0f7ffdSOleg Nesterov {
18161b0f7ffdSOleg Nesterov 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
18171b0f7ffdSOleg Nesterov }
18187af57294SPavel Emelyanov 
18191da177e4SLinus Torvalds /**
18201da177e4SLinus Torvalds  * pid_alive - check that a task structure is not stale
18211da177e4SLinus Torvalds  * @p: Task structure to be checked.
18221da177e4SLinus Torvalds  *
18231da177e4SLinus Torvalds  * Test if a process is not yet dead (at most zombie state)
18241da177e4SLinus Torvalds  * If pid_alive fails, then pointers within the task structure
18251da177e4SLinus Torvalds  * can be stale and must not be dereferenced.
1826e69f6186SYacine Belkadi  *
1827e69f6186SYacine Belkadi  * Return: 1 if the process is alive. 0 otherwise.
18281da177e4SLinus Torvalds  */
1829ad36d282SRichard Guy Briggs static inline int pid_alive(const struct task_struct *p)
18301da177e4SLinus Torvalds {
183192476d7fSEric W. Biederman 	return p->pids[PIDTYPE_PID].pid != NULL;
18321da177e4SLinus Torvalds }
18331da177e4SLinus Torvalds 
1834f400e198SSukadev Bhattiprolu /**
1835b460cbc5SSerge E. Hallyn  * is_global_init - check if a task structure is init
18363260259fSHenne  * @tsk: Task structure to be checked.
18373260259fSHenne  *
18383260259fSHenne  * Check if a task structure is the first user space task the kernel created.
1839e69f6186SYacine Belkadi  *
1840e69f6186SYacine Belkadi  * Return: 1 if the task structure is init. 0 otherwise.
1841f400e198SSukadev Bhattiprolu  */
1842e868171aSAlexey Dobriyan static inline int is_global_init(struct task_struct *tsk)
1843b461cc03SPavel Emelyanov {
1844b461cc03SPavel Emelyanov 	return tsk->pid == 1;
1845b461cc03SPavel Emelyanov }
1846b460cbc5SSerge E. Hallyn 
18479ec52099SCedric Le Goater extern struct pid *cad_pid;
18489ec52099SCedric Le Goater 
18491da177e4SLinus Torvalds extern void free_task(struct task_struct *tsk);
18501da177e4SLinus Torvalds #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1851e56d0903SIngo Molnar 
1852158d9ebdSAndrew Morton extern void __put_task_struct(struct task_struct *t);
1853e56d0903SIngo Molnar 
1854e56d0903SIngo Molnar static inline void put_task_struct(struct task_struct *t)
1855e56d0903SIngo Molnar {
1856e56d0903SIngo Molnar 	if (atomic_dec_and_test(&t->usage))
18578c7904a0SEric W. Biederman 		__put_task_struct(t);
1858e56d0903SIngo Molnar }
18591da177e4SLinus Torvalds 
18606a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
18616a61671bSFrederic Weisbecker extern void task_cputime(struct task_struct *t,
18626a61671bSFrederic Weisbecker 			 cputime_t *utime, cputime_t *stime);
18636a61671bSFrederic Weisbecker extern void task_cputime_scaled(struct task_struct *t,
18646a61671bSFrederic Weisbecker 				cputime_t *utimescaled, cputime_t *stimescaled);
18656a61671bSFrederic Weisbecker extern cputime_t task_gtime(struct task_struct *t);
18666a61671bSFrederic Weisbecker #else
18676fac4829SFrederic Weisbecker static inline void task_cputime(struct task_struct *t,
18686fac4829SFrederic Weisbecker 				cputime_t *utime, cputime_t *stime)
18696fac4829SFrederic Weisbecker {
18706fac4829SFrederic Weisbecker 	if (utime)
18716fac4829SFrederic Weisbecker 		*utime = t->utime;
18726fac4829SFrederic Weisbecker 	if (stime)
18736fac4829SFrederic Weisbecker 		*stime = t->stime;
18746fac4829SFrederic Weisbecker }
18756fac4829SFrederic Weisbecker 
18766fac4829SFrederic Weisbecker static inline void task_cputime_scaled(struct task_struct *t,
18776fac4829SFrederic Weisbecker 				       cputime_t *utimescaled,
18786fac4829SFrederic Weisbecker 				       cputime_t *stimescaled)
18796fac4829SFrederic Weisbecker {
18806fac4829SFrederic Weisbecker 	if (utimescaled)
18816fac4829SFrederic Weisbecker 		*utimescaled = t->utimescaled;
18826fac4829SFrederic Weisbecker 	if (stimescaled)
18836fac4829SFrederic Weisbecker 		*stimescaled = t->stimescaled;
18846fac4829SFrederic Weisbecker }
18856a61671bSFrederic Weisbecker 
18866a61671bSFrederic Weisbecker static inline cputime_t task_gtime(struct task_struct *t)
18876a61671bSFrederic Weisbecker {
18886a61671bSFrederic Weisbecker 	return t->gtime;
18896a61671bSFrederic Weisbecker }
18906a61671bSFrederic Weisbecker #endif
1891e80d0a1aSFrederic Weisbecker extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1892e80d0a1aSFrederic Weisbecker extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
189349048622SBalbir Singh 
18941da177e4SLinus Torvalds /*
18951da177e4SLinus Torvalds  * Per process flags
18961da177e4SLinus Torvalds  */
18971da177e4SLinus Torvalds #define PF_EXITING	0x00000004	/* getting shut down */
1898778e9a9cSAlexey Kuznetsov #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
189994886b84SLaurent Vivier #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
190021aa9af0STejun Heo #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
19011da177e4SLinus Torvalds #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
19024db96cf0SAndi Kleen #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
19031da177e4SLinus Torvalds #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
19041da177e4SLinus Torvalds #define PF_DUMPCORE	0x00000200	/* dumped core */
19051da177e4SLinus Torvalds #define PF_SIGNALED	0x00000400	/* killed by a signal */
19061da177e4SLinus Torvalds #define PF_MEMALLOC	0x00000800	/* Allocating memory */
190772fa5997SVasiliy Kulikov #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
19081da177e4SLinus Torvalds #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1909774a1221STejun Heo #define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
19101da177e4SLinus Torvalds #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
19111da177e4SLinus Torvalds #define PF_FROZEN	0x00010000	/* frozen for system suspend */
19121da177e4SLinus Torvalds #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
19131da177e4SLinus Torvalds #define PF_KSWAPD	0x00040000	/* I am kswapd */
191421caf2fcSMing Lei #define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
19151da177e4SLinus Torvalds #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1916246bb0b1SOleg Nesterov #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1917b31dc66aSJens Axboe #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1918b31dc66aSJens Axboe #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1919b31dc66aSJens Axboe #define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1920b31dc66aSJens Axboe #define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
192114a40ffcSTejun Heo #define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
19224db96cf0SAndi Kleen #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
192361a87122SThomas Gleixner #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
192458a69cb4STejun Heo #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
19252b44c4dbSColin Cross #define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
19261da177e4SLinus Torvalds 
19271da177e4SLinus Torvalds /*
19281da177e4SLinus Torvalds  * Only the _current_ task can read/write to tsk->flags, but other
19291da177e4SLinus Torvalds  * tasks can access tsk->flags in readonly mode for example
19301da177e4SLinus Torvalds  * with tsk_used_math (like during threaded core dumping).
19311da177e4SLinus Torvalds  * There is however an exception to this rule during ptrace
19321da177e4SLinus Torvalds  * or during fork: the ptracer task is allowed to write to the
19331da177e4SLinus Torvalds  * child->flags of its traced child (same goes for fork, the parent
19341da177e4SLinus Torvalds  * can write to the child->flags), because we're guaranteed the
19351da177e4SLinus Torvalds  * child is not running and in turn not changing child->flags
19361da177e4SLinus Torvalds  * at the same time the parent does it.
19371da177e4SLinus Torvalds  */
19381da177e4SLinus Torvalds #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
19391da177e4SLinus Torvalds #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
19401da177e4SLinus Torvalds #define clear_used_math() clear_stopped_child_used_math(current)
19411da177e4SLinus Torvalds #define set_used_math() set_stopped_child_used_math(current)
19421da177e4SLinus Torvalds #define conditional_stopped_child_used_math(condition, child) \
19431da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
19441da177e4SLinus Torvalds #define conditional_used_math(condition) \
19451da177e4SLinus Torvalds 	conditional_stopped_child_used_math(condition, current)
19461da177e4SLinus Torvalds #define copy_to_stopped_child_used_math(child) \
19471da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
19481da177e4SLinus Torvalds /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
19491da177e4SLinus Torvalds #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
19501da177e4SLinus Torvalds #define used_math() tsk_used_math(current)
19511da177e4SLinus Torvalds 
195221caf2fcSMing Lei /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
195321caf2fcSMing Lei static inline gfp_t memalloc_noio_flags(gfp_t flags)
195421caf2fcSMing Lei {
195521caf2fcSMing Lei 	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
195621caf2fcSMing Lei 		flags &= ~__GFP_IO;
195721caf2fcSMing Lei 	return flags;
195821caf2fcSMing Lei }
195921caf2fcSMing Lei 
196021caf2fcSMing Lei static inline unsigned int memalloc_noio_save(void)
196121caf2fcSMing Lei {
196221caf2fcSMing Lei 	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
196321caf2fcSMing Lei 	current->flags |= PF_MEMALLOC_NOIO;
196421caf2fcSMing Lei 	return flags;
196521caf2fcSMing Lei }
196621caf2fcSMing Lei 
196721caf2fcSMing Lei static inline void memalloc_noio_restore(unsigned int flags)
196821caf2fcSMing Lei {
196921caf2fcSMing Lei 	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
197021caf2fcSMing Lei }
197121caf2fcSMing Lei 
19721d4457f9SKees Cook /* Per-process atomic flags. */
19731d4457f9SKees Cook #define PFA_NO_NEW_PRIVS 0x00000001	/* May not gain new privileges. */
19741d4457f9SKees Cook 
19751d4457f9SKees Cook static inline bool task_no_new_privs(struct task_struct *p)
19761d4457f9SKees Cook {
19771d4457f9SKees Cook 	return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
19781d4457f9SKees Cook }
19791d4457f9SKees Cook 
19801d4457f9SKees Cook static inline void task_set_no_new_privs(struct task_struct *p)
19811d4457f9SKees Cook {
19821d4457f9SKees Cook 	set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
19831d4457f9SKees Cook }
19841d4457f9SKees Cook 
1985e5c1902eSTejun Heo /*
1986a8f072c1STejun Heo  * task->jobctl flags
1987e5c1902eSTejun Heo  */
1988a8f072c1STejun Heo #define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
1989e5c1902eSTejun Heo 
1990a8f072c1STejun Heo #define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
1991a8f072c1STejun Heo #define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
1992a8f072c1STejun Heo #define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
199373ddff2bSTejun Heo #define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
1994fb1d910cSTejun Heo #define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
1995a8f072c1STejun Heo #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
1996544b2c91STejun Heo #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
1997a8f072c1STejun Heo 
1998a8f072c1STejun Heo #define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT)
1999a8f072c1STejun Heo #define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT)
2000a8f072c1STejun Heo #define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT)
200173ddff2bSTejun Heo #define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT)
2002fb1d910cSTejun Heo #define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT)
2003a8f072c1STejun Heo #define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT)
2004544b2c91STejun Heo #define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT)
2005a8f072c1STejun Heo 
2006fb1d910cSTejun Heo #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
200773ddff2bSTejun Heo #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
20083759a0d9STejun Heo 
20097dd3db54STejun Heo extern bool task_set_jobctl_pending(struct task_struct *task,
20107dd3db54STejun Heo 				    unsigned int mask);
201173ddff2bSTejun Heo extern void task_clear_jobctl_trapping(struct task_struct *task);
20123759a0d9STejun Heo extern void task_clear_jobctl_pending(struct task_struct *task,
20133759a0d9STejun Heo 				      unsigned int mask);
201439efa3efSTejun Heo 
2015f41d911fSPaul E. McKenney static inline void rcu_copy_process(struct task_struct *p)
2016f41d911fSPaul E. McKenney {
20178315f422SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
2018f41d911fSPaul E. McKenney 	p->rcu_read_lock_nesting = 0;
2019*1d082fd0SPaul E. McKenney 	p->rcu_read_unlock_special.s = 0;
2020dd5d19baSPaul E. McKenney 	p->rcu_blocked_node = NULL;
2021f41d911fSPaul E. McKenney 	INIT_LIST_HEAD(&p->rcu_node_entry);
20228315f422SPaul E. McKenney #endif /* #ifdef CONFIG_PREEMPT_RCU */
20238315f422SPaul E. McKenney #ifdef CONFIG_TASKS_RCU
20248315f422SPaul E. McKenney 	p->rcu_tasks_holdout = false;
20258315f422SPaul E. McKenney 	INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2026176f8f7aSPaul E. McKenney 	p->rcu_tasks_idle_cpu = -1;
20278315f422SPaul E. McKenney #endif /* #ifdef CONFIG_TASKS_RCU */
2028f41d911fSPaul E. McKenney }
2029f41d911fSPaul E. McKenney 
2030907aed48SMel Gorman static inline void tsk_restore_flags(struct task_struct *task,
2031907aed48SMel Gorman 				unsigned long orig_flags, unsigned long flags)
2032907aed48SMel Gorman {
2033907aed48SMel Gorman 	task->flags &= ~flags;
2034907aed48SMel Gorman 	task->flags |= orig_flags & flags;
2035907aed48SMel Gorman }
2036907aed48SMel Gorman 
20371da177e4SLinus Torvalds #ifdef CONFIG_SMP
20381e1b6c51SKOSAKI Motohiro extern void do_set_cpus_allowed(struct task_struct *p,
20391e1b6c51SKOSAKI Motohiro 			       const struct cpumask *new_mask);
20401e1b6c51SKOSAKI Motohiro 
2041cd8ba7cdSMike Travis extern int set_cpus_allowed_ptr(struct task_struct *p,
204296f874e2SRusty Russell 				const struct cpumask *new_mask);
20431da177e4SLinus Torvalds #else
20441e1b6c51SKOSAKI Motohiro static inline void do_set_cpus_allowed(struct task_struct *p,
20451e1b6c51SKOSAKI Motohiro 				      const struct cpumask *new_mask)
20461e1b6c51SKOSAKI Motohiro {
20471e1b6c51SKOSAKI Motohiro }
2048cd8ba7cdSMike Travis static inline int set_cpus_allowed_ptr(struct task_struct *p,
204996f874e2SRusty Russell 				       const struct cpumask *new_mask)
20501da177e4SLinus Torvalds {
205196f874e2SRusty Russell 	if (!cpumask_test_cpu(0, new_mask))
20521da177e4SLinus Torvalds 		return -EINVAL;
20531da177e4SLinus Torvalds 	return 0;
20541da177e4SLinus Torvalds }
20551da177e4SLinus Torvalds #endif
2056e0ad9556SRusty Russell 
20573451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON
20585167e8d5SPeter Zijlstra void calc_load_enter_idle(void);
20595167e8d5SPeter Zijlstra void calc_load_exit_idle(void);
20605167e8d5SPeter Zijlstra #else
20615167e8d5SPeter Zijlstra static inline void calc_load_enter_idle(void) { }
20625167e8d5SPeter Zijlstra static inline void calc_load_exit_idle(void) { }
20633451d024SFrederic Weisbecker #endif /* CONFIG_NO_HZ_COMMON */
20645167e8d5SPeter Zijlstra 
2065e0ad9556SRusty Russell #ifndef CONFIG_CPUMASK_OFFSTACK
2066cd8ba7cdSMike Travis static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
2067cd8ba7cdSMike Travis {
2068cd8ba7cdSMike Travis 	return set_cpus_allowed_ptr(p, &new_mask);
2069cd8ba7cdSMike Travis }
2070e0ad9556SRusty Russell #endif
20711da177e4SLinus Torvalds 
2072b342501cSIngo Molnar /*
2073c676329aSPeter Zijlstra  * Do not use outside of architecture code which knows its limitations.
2074c676329aSPeter Zijlstra  *
2075c676329aSPeter Zijlstra  * sched_clock() has no promise of monotonicity or bounded drift between
2076c676329aSPeter Zijlstra  * CPUs, use (which you should not) requires disabling IRQs.
2077c676329aSPeter Zijlstra  *
2078c676329aSPeter Zijlstra  * Please use one of the three interfaces below.
2079b342501cSIngo Molnar  */
20801bbfa6f2SMike Frysinger extern unsigned long long notrace sched_clock(void);
2081c676329aSPeter Zijlstra /*
2082489a71b0SHiroshi Shimamoto  * See the comment in kernel/sched/clock.c
2083c676329aSPeter Zijlstra  */
2084c676329aSPeter Zijlstra extern u64 cpu_clock(int cpu);
2085c676329aSPeter Zijlstra extern u64 local_clock(void);
2086c676329aSPeter Zijlstra extern u64 sched_clock_cpu(int cpu);
2087c676329aSPeter Zijlstra 
2088e436d800SIngo Molnar 
2089c1955a3dSPeter Zijlstra extern void sched_clock_init(void);
2090c1955a3dSPeter Zijlstra 
20913e51f33fSPeter Zijlstra #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
20923e51f33fSPeter Zijlstra static inline void sched_clock_tick(void)
20933e51f33fSPeter Zijlstra {
20943e51f33fSPeter Zijlstra }
20953e51f33fSPeter Zijlstra 
20963e51f33fSPeter Zijlstra static inline void sched_clock_idle_sleep_event(void)
20973e51f33fSPeter Zijlstra {
20983e51f33fSPeter Zijlstra }
20993e51f33fSPeter Zijlstra 
21003e51f33fSPeter Zijlstra static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
21013e51f33fSPeter Zijlstra {
21023e51f33fSPeter Zijlstra }
21033e51f33fSPeter Zijlstra #else
2104c676329aSPeter Zijlstra /*
2105c676329aSPeter Zijlstra  * Architectures can set this to 1 if they have specified
2106c676329aSPeter Zijlstra  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
2107c676329aSPeter Zijlstra  * but then during bootup it turns out that sched_clock()
2108c676329aSPeter Zijlstra  * is reliable after all:
2109c676329aSPeter Zijlstra  */
211035af99e6SPeter Zijlstra extern int sched_clock_stable(void);
211135af99e6SPeter Zijlstra extern void set_sched_clock_stable(void);
211235af99e6SPeter Zijlstra extern void clear_sched_clock_stable(void);
2113c676329aSPeter Zijlstra 
21143e51f33fSPeter Zijlstra extern void sched_clock_tick(void);
21153e51f33fSPeter Zijlstra extern void sched_clock_idle_sleep_event(void);
21163e51f33fSPeter Zijlstra extern void sched_clock_idle_wakeup_event(u64 delta_ns);
21173e51f33fSPeter Zijlstra #endif
21183e51f33fSPeter Zijlstra 
2119b52bfee4SVenkatesh Pallipadi #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2120b52bfee4SVenkatesh Pallipadi /*
2121b52bfee4SVenkatesh Pallipadi  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2122b52bfee4SVenkatesh Pallipadi  * The reason for this explicit opt-in is not to have perf penalty with
2123b52bfee4SVenkatesh Pallipadi  * slow sched_clocks.
2124b52bfee4SVenkatesh Pallipadi  */
2125b52bfee4SVenkatesh Pallipadi extern void enable_sched_clock_irqtime(void);
2126b52bfee4SVenkatesh Pallipadi extern void disable_sched_clock_irqtime(void);
2127b52bfee4SVenkatesh Pallipadi #else
2128b52bfee4SVenkatesh Pallipadi static inline void enable_sched_clock_irqtime(void) {}
2129b52bfee4SVenkatesh Pallipadi static inline void disable_sched_clock_irqtime(void) {}
2130b52bfee4SVenkatesh Pallipadi #endif
2131b52bfee4SVenkatesh Pallipadi 
213236c8b586SIngo Molnar extern unsigned long long
213341b86e9cSIngo Molnar task_sched_runtime(struct task_struct *task);
21341da177e4SLinus Torvalds 
21351da177e4SLinus Torvalds /* sched_exec is called by processes performing an exec */
21361da177e4SLinus Torvalds #ifdef CONFIG_SMP
21371da177e4SLinus Torvalds extern void sched_exec(void);
21381da177e4SLinus Torvalds #else
21391da177e4SLinus Torvalds #define sched_exec()   {}
21401da177e4SLinus Torvalds #endif
21411da177e4SLinus Torvalds 
21422aa44d05SIngo Molnar extern void sched_clock_idle_sleep_event(void);
21432aa44d05SIngo Molnar extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2144bb29ab26SIngo Molnar 
21451da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU
21461da177e4SLinus Torvalds extern void idle_task_exit(void);
21471da177e4SLinus Torvalds #else
21481da177e4SLinus Torvalds static inline void idle_task_exit(void) {}
21491da177e4SLinus Torvalds #endif
21501da177e4SLinus Torvalds 
21513451d024SFrederic Weisbecker #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
21521c20091eSFrederic Weisbecker extern void wake_up_nohz_cpu(int cpu);
215306d8308cSThomas Gleixner #else
21541c20091eSFrederic Weisbecker static inline void wake_up_nohz_cpu(int cpu) { }
215506d8308cSThomas Gleixner #endif
215606d8308cSThomas Gleixner 
2157ce831b38SFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL
2158ce831b38SFrederic Weisbecker extern bool sched_can_stop_tick(void);
2159265f22a9SFrederic Weisbecker extern u64 scheduler_tick_max_deferment(void);
2160ce831b38SFrederic Weisbecker #else
2161ce831b38SFrederic Weisbecker static inline bool sched_can_stop_tick(void) { return false; }
2162bf0f6f24SIngo Molnar #endif
2163bf0f6f24SIngo Molnar 
21645091faa4SMike Galbraith #ifdef CONFIG_SCHED_AUTOGROUP
21655091faa4SMike Galbraith extern void sched_autogroup_create_attach(struct task_struct *p);
21665091faa4SMike Galbraith extern void sched_autogroup_detach(struct task_struct *p);
21675091faa4SMike Galbraith extern void sched_autogroup_fork(struct signal_struct *sig);
21685091faa4SMike Galbraith extern void sched_autogroup_exit(struct signal_struct *sig);
21695091faa4SMike Galbraith #ifdef CONFIG_PROC_FS
21705091faa4SMike Galbraith extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
21712e5b5b3aSHiroshi Shimamoto extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
21725091faa4SMike Galbraith #endif
21735091faa4SMike Galbraith #else
21745091faa4SMike Galbraith static inline void sched_autogroup_create_attach(struct task_struct *p) { }
21755091faa4SMike Galbraith static inline void sched_autogroup_detach(struct task_struct *p) { }
21765091faa4SMike Galbraith static inline void sched_autogroup_fork(struct signal_struct *sig) { }
21775091faa4SMike Galbraith static inline void sched_autogroup_exit(struct signal_struct *sig) { }
21785091faa4SMike Galbraith #endif
21795091faa4SMike Galbraith 
2180fa93384fSDan Carpenter extern int yield_to(struct task_struct *p, bool preempt);
218136c8b586SIngo Molnar extern void set_user_nice(struct task_struct *p, long nice);
218236c8b586SIngo Molnar extern int task_prio(const struct task_struct *p);
2183d0ea0268SDongsheng Yang /**
2184d0ea0268SDongsheng Yang  * task_nice - return the nice value of a given task.
2185d0ea0268SDongsheng Yang  * @p: the task in question.
2186d0ea0268SDongsheng Yang  *
2187d0ea0268SDongsheng Yang  * Return: The nice value [ -20 ... 0 ... 19 ].
2188d0ea0268SDongsheng Yang  */
2189d0ea0268SDongsheng Yang static inline int task_nice(const struct task_struct *p)
2190d0ea0268SDongsheng Yang {
2191d0ea0268SDongsheng Yang 	return PRIO_TO_NICE((p)->static_prio);
2192d0ea0268SDongsheng Yang }
219336c8b586SIngo Molnar extern int can_nice(const struct task_struct *p, const int nice);
219436c8b586SIngo Molnar extern int task_curr(const struct task_struct *p);
21951da177e4SLinus Torvalds extern int idle_cpu(int cpu);
2196fe7de49fSKOSAKI Motohiro extern int sched_setscheduler(struct task_struct *, int,
2197fe7de49fSKOSAKI Motohiro 			      const struct sched_param *);
2198961ccdddSRusty Russell extern int sched_setscheduler_nocheck(struct task_struct *, int,
2199fe7de49fSKOSAKI Motohiro 				      const struct sched_param *);
2200d50dde5aSDario Faggioli extern int sched_setattr(struct task_struct *,
2201d50dde5aSDario Faggioli 			 const struct sched_attr *);
220236c8b586SIngo Molnar extern struct task_struct *idle_task(int cpu);
2203c4f30608SPaul E. McKenney /**
2204c4f30608SPaul E. McKenney  * is_idle_task - is the specified task an idle task?
2205fa757281SRandy Dunlap  * @p: the task in question.
2206e69f6186SYacine Belkadi  *
2207e69f6186SYacine Belkadi  * Return: 1 if @p is an idle task. 0 otherwise.
2208c4f30608SPaul E. McKenney  */
22097061ca3bSPaul E. McKenney static inline bool is_idle_task(const struct task_struct *p)
2210c4f30608SPaul E. McKenney {
2211c4f30608SPaul E. McKenney 	return p->pid == 0;
2212c4f30608SPaul E. McKenney }
221336c8b586SIngo Molnar extern struct task_struct *curr_task(int cpu);
221436c8b586SIngo Molnar extern void set_curr_task(int cpu, struct task_struct *p);
22151da177e4SLinus Torvalds 
22161da177e4SLinus Torvalds void yield(void);
22171da177e4SLinus Torvalds 
22181da177e4SLinus Torvalds /*
22191da177e4SLinus Torvalds  * The default (Linux) execution domain.
22201da177e4SLinus Torvalds  */
22211da177e4SLinus Torvalds extern struct exec_domain	default_exec_domain;
22221da177e4SLinus Torvalds 
22231da177e4SLinus Torvalds union thread_union {
22241da177e4SLinus Torvalds 	struct thread_info thread_info;
22251da177e4SLinus Torvalds 	unsigned long stack[THREAD_SIZE/sizeof(long)];
22261da177e4SLinus Torvalds };
22271da177e4SLinus Torvalds 
22281da177e4SLinus Torvalds #ifndef __HAVE_ARCH_KSTACK_END
22291da177e4SLinus Torvalds static inline int kstack_end(void *addr)
22301da177e4SLinus Torvalds {
22311da177e4SLinus Torvalds 	/* Reliable end of stack detection:
22321da177e4SLinus Torvalds 	 * Some APM bios versions misalign the stack
22331da177e4SLinus Torvalds 	 */
22341da177e4SLinus Torvalds 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
22351da177e4SLinus Torvalds }
22361da177e4SLinus Torvalds #endif
22371da177e4SLinus Torvalds 
22381da177e4SLinus Torvalds extern union thread_union init_thread_union;
22391da177e4SLinus Torvalds extern struct task_struct init_task;
22401da177e4SLinus Torvalds 
22411da177e4SLinus Torvalds extern struct   mm_struct init_mm;
22421da177e4SLinus Torvalds 
2243198fe21bSPavel Emelyanov extern struct pid_namespace init_pid_ns;
2244198fe21bSPavel Emelyanov 
2245198fe21bSPavel Emelyanov /*
2246198fe21bSPavel Emelyanov  * find a task by one of its numerical ids
2247198fe21bSPavel Emelyanov  *
2248198fe21bSPavel Emelyanov  * find_task_by_pid_ns():
2249198fe21bSPavel Emelyanov  *      finds a task by its pid in the specified namespace
2250228ebcbeSPavel Emelyanov  * find_task_by_vpid():
2251228ebcbeSPavel Emelyanov  *      finds a task by its virtual pid
2252198fe21bSPavel Emelyanov  *
2253e49859e7SPavel Emelyanov  * see also find_vpid() etc in include/linux/pid.h
2254198fe21bSPavel Emelyanov  */
2255198fe21bSPavel Emelyanov 
2256228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_vpid(pid_t nr);
2257228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2258228ebcbeSPavel Emelyanov 		struct pid_namespace *ns);
2259198fe21bSPavel Emelyanov 
22601da177e4SLinus Torvalds /* per-UID process charging. */
22617b44ab97SEric W. Biederman extern struct user_struct * alloc_uid(kuid_t);
22621da177e4SLinus Torvalds static inline struct user_struct *get_uid(struct user_struct *u)
22631da177e4SLinus Torvalds {
22641da177e4SLinus Torvalds 	atomic_inc(&u->__count);
22651da177e4SLinus Torvalds 	return u;
22661da177e4SLinus Torvalds }
22671da177e4SLinus Torvalds extern void free_uid(struct user_struct *);
22681da177e4SLinus Torvalds 
22691da177e4SLinus Torvalds #include <asm/current.h>
22701da177e4SLinus Torvalds 
2271f0af911aSTorben Hohn extern void xtime_update(unsigned long ticks);
22721da177e4SLinus Torvalds 
2273b3c97528SHarvey Harrison extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2274b3c97528SHarvey Harrison extern int wake_up_process(struct task_struct *tsk);
22753e51e3edSSamir Bellabes extern void wake_up_new_task(struct task_struct *tsk);
22761da177e4SLinus Torvalds #ifdef CONFIG_SMP
22771da177e4SLinus Torvalds  extern void kick_process(struct task_struct *tsk);
22781da177e4SLinus Torvalds #else
22791da177e4SLinus Torvalds  static inline void kick_process(struct task_struct *tsk) { }
22801da177e4SLinus Torvalds #endif
2281aab03e05SDario Faggioli extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2282ad46c2c4SIngo Molnar extern void sched_dead(struct task_struct *p);
22831da177e4SLinus Torvalds 
22841da177e4SLinus Torvalds extern void proc_caches_init(void);
22851da177e4SLinus Torvalds extern void flush_signals(struct task_struct *);
22863bcac026SDavid Howells extern void __flush_signals(struct task_struct *);
228710ab825bSOleg Nesterov extern void ignore_signals(struct task_struct *);
22881da177e4SLinus Torvalds extern void flush_signal_handlers(struct task_struct *, int force_default);
22891da177e4SLinus Torvalds extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
22901da177e4SLinus Torvalds 
22911da177e4SLinus Torvalds static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
22921da177e4SLinus Torvalds {
22931da177e4SLinus Torvalds 	unsigned long flags;
22941da177e4SLinus Torvalds 	int ret;
22951da177e4SLinus Torvalds 
22961da177e4SLinus Torvalds 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
22971da177e4SLinus Torvalds 	ret = dequeue_signal(tsk, mask, info);
22981da177e4SLinus Torvalds 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
22991da177e4SLinus Torvalds 
23001da177e4SLinus Torvalds 	return ret;
23011da177e4SLinus Torvalds }
23021da177e4SLinus Torvalds 
23031da177e4SLinus Torvalds extern void block_all_signals(int (*notifier)(void *priv), void *priv,
23041da177e4SLinus Torvalds 			      sigset_t *mask);
23051da177e4SLinus Torvalds extern void unblock_all_signals(void);
23061da177e4SLinus Torvalds extern void release_task(struct task_struct * p);
23071da177e4SLinus Torvalds extern int send_sig_info(int, struct siginfo *, struct task_struct *);
23081da177e4SLinus Torvalds extern int force_sigsegv(int, struct task_struct *);
23091da177e4SLinus Torvalds extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2310c4b92fc1SEric W. Biederman extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2311c4b92fc1SEric W. Biederman extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2312d178bc3aSSerge Hallyn extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2313d178bc3aSSerge Hallyn 				const struct cred *, u32);
2314c4b92fc1SEric W. Biederman extern int kill_pgrp(struct pid *pid, int sig, int priv);
2315c4b92fc1SEric W. Biederman extern int kill_pid(struct pid *pid, int sig, int priv);
2316c3de4b38SMatthew Wilcox extern int kill_proc_info(int, struct siginfo *, pid_t);
231786773473SOleg Nesterov extern __must_check bool do_notify_parent(struct task_struct *, int);
2318a7f0765eSOleg Nesterov extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
23191da177e4SLinus Torvalds extern void force_sig(int, struct task_struct *);
23201da177e4SLinus Torvalds extern int send_sig(int, struct task_struct *, int);
232109faef11SOleg Nesterov extern int zap_other_threads(struct task_struct *p);
23221da177e4SLinus Torvalds extern struct sigqueue *sigqueue_alloc(void);
23231da177e4SLinus Torvalds extern void sigqueue_free(struct sigqueue *);
2324ac5c2153SOleg Nesterov extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
23259ac95f2fSOleg Nesterov extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
23261da177e4SLinus Torvalds 
232751a7b448SAl Viro static inline void restore_saved_sigmask(void)
232851a7b448SAl Viro {
232951a7b448SAl Viro 	if (test_and_clear_restore_sigmask())
233077097ae5SAl Viro 		__set_current_blocked(&current->saved_sigmask);
233151a7b448SAl Viro }
233251a7b448SAl Viro 
2333b7f9a11aSAl Viro static inline sigset_t *sigmask_to_save(void)
2334b7f9a11aSAl Viro {
2335b7f9a11aSAl Viro 	sigset_t *res = &current->blocked;
2336b7f9a11aSAl Viro 	if (unlikely(test_restore_sigmask()))
2337b7f9a11aSAl Viro 		res = &current->saved_sigmask;
2338b7f9a11aSAl Viro 	return res;
2339b7f9a11aSAl Viro }
2340b7f9a11aSAl Viro 
23419ec52099SCedric Le Goater static inline int kill_cad_pid(int sig, int priv)
23429ec52099SCedric Le Goater {
23439ec52099SCedric Le Goater 	return kill_pid(cad_pid, sig, priv);
23449ec52099SCedric Le Goater }
23459ec52099SCedric Le Goater 
23461da177e4SLinus Torvalds /* These can be the second arg to send_sig_info/send_group_sig_info.  */
23471da177e4SLinus Torvalds #define SEND_SIG_NOINFO ((struct siginfo *) 0)
23481da177e4SLinus Torvalds #define SEND_SIG_PRIV	((struct siginfo *) 1)
23491da177e4SLinus Torvalds #define SEND_SIG_FORCED	((struct siginfo *) 2)
23501da177e4SLinus Torvalds 
23512a855dd0SSebastian Andrzej Siewior /*
23522a855dd0SSebastian Andrzej Siewior  * True if we are on the alternate signal stack.
23532a855dd0SSebastian Andrzej Siewior  */
23541da177e4SLinus Torvalds static inline int on_sig_stack(unsigned long sp)
23551da177e4SLinus Torvalds {
23562a855dd0SSebastian Andrzej Siewior #ifdef CONFIG_STACK_GROWSUP
23572a855dd0SSebastian Andrzej Siewior 	return sp >= current->sas_ss_sp &&
23582a855dd0SSebastian Andrzej Siewior 		sp - current->sas_ss_sp < current->sas_ss_size;
23592a855dd0SSebastian Andrzej Siewior #else
23602a855dd0SSebastian Andrzej Siewior 	return sp > current->sas_ss_sp &&
23612a855dd0SSebastian Andrzej Siewior 		sp - current->sas_ss_sp <= current->sas_ss_size;
23622a855dd0SSebastian Andrzej Siewior #endif
23631da177e4SLinus Torvalds }
23641da177e4SLinus Torvalds 
23651da177e4SLinus Torvalds static inline int sas_ss_flags(unsigned long sp)
23661da177e4SLinus Torvalds {
236772f15c03SRichard Weinberger 	if (!current->sas_ss_size)
236872f15c03SRichard Weinberger 		return SS_DISABLE;
236972f15c03SRichard Weinberger 
237072f15c03SRichard Weinberger 	return on_sig_stack(sp) ? SS_ONSTACK : 0;
23711da177e4SLinus Torvalds }
23721da177e4SLinus Torvalds 
23735a1b98d3SAl Viro static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
23745a1b98d3SAl Viro {
23755a1b98d3SAl Viro 	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
23765a1b98d3SAl Viro #ifdef CONFIG_STACK_GROWSUP
23775a1b98d3SAl Viro 		return current->sas_ss_sp;
23785a1b98d3SAl Viro #else
23795a1b98d3SAl Viro 		return current->sas_ss_sp + current->sas_ss_size;
23805a1b98d3SAl Viro #endif
23815a1b98d3SAl Viro 	return sp;
23825a1b98d3SAl Viro }
23835a1b98d3SAl Viro 
23841da177e4SLinus Torvalds /*
23851da177e4SLinus Torvalds  * Routines for handling mm_structs
23861da177e4SLinus Torvalds  */
23871da177e4SLinus Torvalds extern struct mm_struct * mm_alloc(void);
23881da177e4SLinus Torvalds 
23891da177e4SLinus Torvalds /* mmdrop drops the mm and the page tables */
2390b3c97528SHarvey Harrison extern void __mmdrop(struct mm_struct *);
23911da177e4SLinus Torvalds static inline void mmdrop(struct mm_struct * mm)
23921da177e4SLinus Torvalds {
23936fb43d7bSIngo Molnar 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
23941da177e4SLinus Torvalds 		__mmdrop(mm);
23951da177e4SLinus Torvalds }
23961da177e4SLinus Torvalds 
23971da177e4SLinus Torvalds /* mmput gets rid of the mappings and all user-space */
23981da177e4SLinus Torvalds extern void mmput(struct mm_struct *);
23991da177e4SLinus Torvalds /* Grab a reference to a task's mm, if it is not already going away */
24001da177e4SLinus Torvalds extern struct mm_struct *get_task_mm(struct task_struct *task);
24018cdb878dSChristopher Yeoh /*
24028cdb878dSChristopher Yeoh  * Grab a reference to a task's mm, if it is not already going away
24038cdb878dSChristopher Yeoh  * and ptrace_may_access with the mode parameter passed to it
24048cdb878dSChristopher Yeoh  * succeeds.
24058cdb878dSChristopher Yeoh  */
24068cdb878dSChristopher Yeoh extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
24071da177e4SLinus Torvalds /* Remove the current tasks stale references to the old mm_struct */
24081da177e4SLinus Torvalds extern void mm_release(struct task_struct *, struct mm_struct *);
24091da177e4SLinus Torvalds 
24106f2c55b8SAlexey Dobriyan extern int copy_thread(unsigned long, unsigned long, unsigned long,
2411afa86fc4SAl Viro 			struct task_struct *);
24121da177e4SLinus Torvalds extern void flush_thread(void);
24131da177e4SLinus Torvalds extern void exit_thread(void);
24141da177e4SLinus Torvalds 
24151da177e4SLinus Torvalds extern void exit_files(struct task_struct *);
2416a7e5328aSOleg Nesterov extern void __cleanup_sighand(struct sighand_struct *);
2417cbaffba1SOleg Nesterov 
24181da177e4SLinus Torvalds extern void exit_itimers(struct signal_struct *);
2419cbaffba1SOleg Nesterov extern void flush_itimer_signals(void);
24201da177e4SLinus Torvalds 
24219402c95fSJoe Perches extern void do_group_exit(int);
24221da177e4SLinus Torvalds 
2423c4ad8f98SLinus Torvalds extern int do_execve(struct filename *,
2424d7627467SDavid Howells 		     const char __user * const __user *,
2425da3d4c5fSAl Viro 		     const char __user * const __user *);
2426e80d6661SAl Viro extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
242736c8b586SIngo Molnar struct task_struct *fork_idle(int);
24282aa3a7f8SAl Viro extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
24291da177e4SLinus Torvalds 
243082b89778SAdrian Hunter extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
243182b89778SAdrian Hunter static inline void set_task_comm(struct task_struct *tsk, const char *from)
243282b89778SAdrian Hunter {
243382b89778SAdrian Hunter 	__set_task_comm(tsk, from, false);
243482b89778SAdrian Hunter }
243559714d65SAndrew Morton extern char *get_task_comm(char *to, struct task_struct *tsk);
24361da177e4SLinus Torvalds 
24371da177e4SLinus Torvalds #ifdef CONFIG_SMP
2438317f3941SPeter Zijlstra void scheduler_ipi(void);
243985ba2d86SRoland McGrath extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
24401da177e4SLinus Torvalds #else
2441184748ccSPeter Zijlstra static inline void scheduler_ipi(void) { }
244285ba2d86SRoland McGrath static inline unsigned long wait_task_inactive(struct task_struct *p,
244385ba2d86SRoland McGrath 					       long match_state)
244485ba2d86SRoland McGrath {
244585ba2d86SRoland McGrath 	return 1;
244685ba2d86SRoland McGrath }
24471da177e4SLinus Torvalds #endif
24481da177e4SLinus Torvalds 
244905725f7eSJiri Pirko #define next_task(p) \
245005725f7eSJiri Pirko 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
24511da177e4SLinus Torvalds 
24521da177e4SLinus Torvalds #define for_each_process(p) \
24531da177e4SLinus Torvalds 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
24541da177e4SLinus Torvalds 
24555bb459bbSOleg Nesterov extern bool current_is_single_threaded(void);
2456d84f4f99SDavid Howells 
24571da177e4SLinus Torvalds /*
24581da177e4SLinus Torvalds  * Careful: do_each_thread/while_each_thread is a double loop so
24591da177e4SLinus Torvalds  *          'break' will not work as expected - use goto instead.
24601da177e4SLinus Torvalds  */
24611da177e4SLinus Torvalds #define do_each_thread(g, t) \
24621da177e4SLinus Torvalds 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
24631da177e4SLinus Torvalds 
24641da177e4SLinus Torvalds #define while_each_thread(g, t) \
24651da177e4SLinus Torvalds 	while ((t = next_thread(t)) != g)
24661da177e4SLinus Torvalds 
24670c740d0aSOleg Nesterov #define __for_each_thread(signal, t)	\
24680c740d0aSOleg Nesterov 	list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
24690c740d0aSOleg Nesterov 
24700c740d0aSOleg Nesterov #define for_each_thread(p, t)		\
24710c740d0aSOleg Nesterov 	__for_each_thread((p)->signal, t)
24720c740d0aSOleg Nesterov 
24730c740d0aSOleg Nesterov /* Careful: this is a double loop, 'break' won't work as expected. */
24740c740d0aSOleg Nesterov #define for_each_process_thread(p, t)	\
24750c740d0aSOleg Nesterov 	for_each_process(p) for_each_thread(p, t)
24760c740d0aSOleg Nesterov 
24777e49827cSOleg Nesterov static inline int get_nr_threads(struct task_struct *tsk)
24787e49827cSOleg Nesterov {
2479b3ac022cSOleg Nesterov 	return tsk->signal->nr_threads;
24807e49827cSOleg Nesterov }
24817e49827cSOleg Nesterov 
2482087806b1SOleg Nesterov static inline bool thread_group_leader(struct task_struct *p)
2483087806b1SOleg Nesterov {
2484087806b1SOleg Nesterov 	return p->exit_signal >= 0;
2485087806b1SOleg Nesterov }
24861da177e4SLinus Torvalds 
24870804ef4bSEric W. Biederman /* Do to the insanities of de_thread it is possible for a process
24880804ef4bSEric W. Biederman  * to have the pid of the thread group leader without actually being
24890804ef4bSEric W. Biederman  * the thread group leader.  For iteration through the pids in proc
24900804ef4bSEric W. Biederman  * all we care about is that we have a task with the appropriate
24910804ef4bSEric W. Biederman  * pid, we don't actually care if we have the right task.
24920804ef4bSEric W. Biederman  */
2493e1403b8eSOleg Nesterov static inline bool has_group_leader_pid(struct task_struct *p)
24940804ef4bSEric W. Biederman {
2495e1403b8eSOleg Nesterov 	return task_pid(p) == p->signal->leader_pid;
24960804ef4bSEric W. Biederman }
24970804ef4bSEric W. Biederman 
2498bac0abd6SPavel Emelyanov static inline
2499e1403b8eSOleg Nesterov bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2500bac0abd6SPavel Emelyanov {
2501e1403b8eSOleg Nesterov 	return p1->signal == p2->signal;
2502bac0abd6SPavel Emelyanov }
2503bac0abd6SPavel Emelyanov 
250436c8b586SIngo Molnar static inline struct task_struct *next_thread(const struct task_struct *p)
250547e65328SOleg Nesterov {
250605725f7eSJiri Pirko 	return list_entry_rcu(p->thread_group.next,
250736c8b586SIngo Molnar 			      struct task_struct, thread_group);
250847e65328SOleg Nesterov }
250947e65328SOleg Nesterov 
2510e868171aSAlexey Dobriyan static inline int thread_group_empty(struct task_struct *p)
25111da177e4SLinus Torvalds {
251247e65328SOleg Nesterov 	return list_empty(&p->thread_group);
25131da177e4SLinus Torvalds }
25141da177e4SLinus Torvalds 
25151da177e4SLinus Torvalds #define delay_group_leader(p) \
25161da177e4SLinus Torvalds 		(thread_group_leader(p) && !thread_group_empty(p))
25171da177e4SLinus Torvalds 
25181da177e4SLinus Torvalds /*
2519260ea101SEric W. Biederman  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
252022e2c507SJens Axboe  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2521ddbcc7e8SPaul Menage  * pins the final release of task.io_context.  Also protects ->cpuset and
2522d68b46feSOleg Nesterov  * ->cgroup.subsys[]. And ->vfork_done.
25231da177e4SLinus Torvalds  *
25241da177e4SLinus Torvalds  * Nests both inside and outside of read_lock(&tasklist_lock).
25251da177e4SLinus Torvalds  * It must not be nested with write_lock_irq(&tasklist_lock),
25261da177e4SLinus Torvalds  * neither inside nor outside.
25271da177e4SLinus Torvalds  */
25281da177e4SLinus Torvalds static inline void task_lock(struct task_struct *p)
25291da177e4SLinus Torvalds {
25301da177e4SLinus Torvalds 	spin_lock(&p->alloc_lock);
25311da177e4SLinus Torvalds }
25321da177e4SLinus Torvalds 
25331da177e4SLinus Torvalds static inline void task_unlock(struct task_struct *p)
25341da177e4SLinus Torvalds {
25351da177e4SLinus Torvalds 	spin_unlock(&p->alloc_lock);
25361da177e4SLinus Torvalds }
25371da177e4SLinus Torvalds 
2538b8ed374eSNamhyung Kim extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2539f63ee72eSOleg Nesterov 							unsigned long *flags);
2540f63ee72eSOleg Nesterov 
25419388dc30SAnton Vorontsov static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
25429388dc30SAnton Vorontsov 						       unsigned long *flags)
25439388dc30SAnton Vorontsov {
25449388dc30SAnton Vorontsov 	struct sighand_struct *ret;
25459388dc30SAnton Vorontsov 
25469388dc30SAnton Vorontsov 	ret = __lock_task_sighand(tsk, flags);
25479388dc30SAnton Vorontsov 	(void)__cond_lock(&tsk->sighand->siglock, ret);
25489388dc30SAnton Vorontsov 	return ret;
25499388dc30SAnton Vorontsov }
2550b8ed374eSNamhyung Kim 
2551f63ee72eSOleg Nesterov static inline void unlock_task_sighand(struct task_struct *tsk,
2552f63ee72eSOleg Nesterov 						unsigned long *flags)
2553f63ee72eSOleg Nesterov {
2554f63ee72eSOleg Nesterov 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2555f63ee72eSOleg Nesterov }
2556f63ee72eSOleg Nesterov 
25574714d1d3SBen Blum #ifdef CONFIG_CGROUPS
2558257058aeSTejun Heo static inline void threadgroup_change_begin(struct task_struct *tsk)
25594714d1d3SBen Blum {
2560257058aeSTejun Heo 	down_read(&tsk->signal->group_rwsem);
25614714d1d3SBen Blum }
2562257058aeSTejun Heo static inline void threadgroup_change_end(struct task_struct *tsk)
25634714d1d3SBen Blum {
2564257058aeSTejun Heo 	up_read(&tsk->signal->group_rwsem);
25654714d1d3SBen Blum }
256677e4ef99STejun Heo 
256777e4ef99STejun Heo /**
256877e4ef99STejun Heo  * threadgroup_lock - lock threadgroup
256977e4ef99STejun Heo  * @tsk: member task of the threadgroup to lock
257077e4ef99STejun Heo  *
257177e4ef99STejun Heo  * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
257277e4ef99STejun Heo  * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2573e56fb287SOleg Nesterov  * change ->group_leader/pid.  This is useful for cases where the threadgroup
2574e56fb287SOleg Nesterov  * needs to stay stable across blockable operations.
257577e4ef99STejun Heo  *
257677e4ef99STejun Heo  * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
257777e4ef99STejun Heo  * synchronization.  While held, no new task will be added to threadgroup
257877e4ef99STejun Heo  * and no existing live task will have its PF_EXITING set.
257977e4ef99STejun Heo  *
2580e56fb287SOleg Nesterov  * de_thread() does threadgroup_change_{begin|end}() when a non-leader
2581e56fb287SOleg Nesterov  * sub-thread becomes a new leader.
258277e4ef99STejun Heo  */
2583257058aeSTejun Heo static inline void threadgroup_lock(struct task_struct *tsk)
25844714d1d3SBen Blum {
2585257058aeSTejun Heo 	down_write(&tsk->signal->group_rwsem);
25864714d1d3SBen Blum }
258777e4ef99STejun Heo 
258877e4ef99STejun Heo /**
258977e4ef99STejun Heo  * threadgroup_unlock - unlock threadgroup
259077e4ef99STejun Heo  * @tsk: member task of the threadgroup to unlock
259177e4ef99STejun Heo  *
259277e4ef99STejun Heo  * Reverse threadgroup_lock().
259377e4ef99STejun Heo  */
2594257058aeSTejun Heo static inline void threadgroup_unlock(struct task_struct *tsk)
25954714d1d3SBen Blum {
2596257058aeSTejun Heo 	up_write(&tsk->signal->group_rwsem);
25974714d1d3SBen Blum }
25984714d1d3SBen Blum #else
2599257058aeSTejun Heo static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2600257058aeSTejun Heo static inline void threadgroup_change_end(struct task_struct *tsk) {}
2601257058aeSTejun Heo static inline void threadgroup_lock(struct task_struct *tsk) {}
2602257058aeSTejun Heo static inline void threadgroup_unlock(struct task_struct *tsk) {}
26034714d1d3SBen Blum #endif
26044714d1d3SBen Blum 
2605f037360fSAl Viro #ifndef __HAVE_THREAD_FUNCTIONS
2606f037360fSAl Viro 
2607f7e4217bSRoman Zippel #define task_thread_info(task)	((struct thread_info *)(task)->stack)
2608f7e4217bSRoman Zippel #define task_stack_page(task)	((task)->stack)
2609a1261f54SAl Viro 
261010ebffdeSAl Viro static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
261110ebffdeSAl Viro {
261210ebffdeSAl Viro 	*task_thread_info(p) = *task_thread_info(org);
261310ebffdeSAl Viro 	task_thread_info(p)->task = p;
261410ebffdeSAl Viro }
261510ebffdeSAl Viro 
261610ebffdeSAl Viro static inline unsigned long *end_of_stack(struct task_struct *p)
261710ebffdeSAl Viro {
2618f7e4217bSRoman Zippel 	return (unsigned long *)(task_thread_info(p) + 1);
261910ebffdeSAl Viro }
262010ebffdeSAl Viro 
2621f037360fSAl Viro #endif
2622f037360fSAl Viro 
26238b05c7e6SFUJITA Tomonori static inline int object_is_on_stack(void *obj)
26248b05c7e6SFUJITA Tomonori {
26258b05c7e6SFUJITA Tomonori 	void *stack = task_stack_page(current);
26268b05c7e6SFUJITA Tomonori 
26278b05c7e6SFUJITA Tomonori 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
26288b05c7e6SFUJITA Tomonori }
26298b05c7e6SFUJITA Tomonori 
26308c9843e5SBenjamin Herrenschmidt extern void thread_info_cache_init(void);
26318c9843e5SBenjamin Herrenschmidt 
26327c9f8861SEric Sandeen #ifdef CONFIG_DEBUG_STACK_USAGE
26337c9f8861SEric Sandeen static inline unsigned long stack_not_used(struct task_struct *p)
26347c9f8861SEric Sandeen {
26357c9f8861SEric Sandeen 	unsigned long *n = end_of_stack(p);
26367c9f8861SEric Sandeen 
26377c9f8861SEric Sandeen 	do { 	/* Skip over canary */
26387c9f8861SEric Sandeen 		n++;
26397c9f8861SEric Sandeen 	} while (!*n);
26407c9f8861SEric Sandeen 
26417c9f8861SEric Sandeen 	return (unsigned long)n - (unsigned long)end_of_stack(p);
26427c9f8861SEric Sandeen }
26437c9f8861SEric Sandeen #endif
26447c9f8861SEric Sandeen 
26451da177e4SLinus Torvalds /* set thread flags in other task's structures
26461da177e4SLinus Torvalds  * - see asm/thread_info.h for TIF_xxxx flags available
26471da177e4SLinus Torvalds  */
26481da177e4SLinus Torvalds static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
26491da177e4SLinus Torvalds {
2650a1261f54SAl Viro 	set_ti_thread_flag(task_thread_info(tsk), flag);
26511da177e4SLinus Torvalds }
26521da177e4SLinus Torvalds 
26531da177e4SLinus Torvalds static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
26541da177e4SLinus Torvalds {
2655a1261f54SAl Viro 	clear_ti_thread_flag(task_thread_info(tsk), flag);
26561da177e4SLinus Torvalds }
26571da177e4SLinus Torvalds 
26581da177e4SLinus Torvalds static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
26591da177e4SLinus Torvalds {
2660a1261f54SAl Viro 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
26611da177e4SLinus Torvalds }
26621da177e4SLinus Torvalds 
26631da177e4SLinus Torvalds static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
26641da177e4SLinus Torvalds {
2665a1261f54SAl Viro 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
26661da177e4SLinus Torvalds }
26671da177e4SLinus Torvalds 
26681da177e4SLinus Torvalds static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
26691da177e4SLinus Torvalds {
2670a1261f54SAl Viro 	return test_ti_thread_flag(task_thread_info(tsk), flag);
26711da177e4SLinus Torvalds }
26721da177e4SLinus Torvalds 
26731da177e4SLinus Torvalds static inline void set_tsk_need_resched(struct task_struct *tsk)
26741da177e4SLinus Torvalds {
26751da177e4SLinus Torvalds 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
26761da177e4SLinus Torvalds }
26771da177e4SLinus Torvalds 
26781da177e4SLinus Torvalds static inline void clear_tsk_need_resched(struct task_struct *tsk)
26791da177e4SLinus Torvalds {
26801da177e4SLinus Torvalds 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
26811da177e4SLinus Torvalds }
26821da177e4SLinus Torvalds 
26838ae121acSGregory Haskins static inline int test_tsk_need_resched(struct task_struct *tsk)
26848ae121acSGregory Haskins {
26858ae121acSGregory Haskins 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
26868ae121acSGregory Haskins }
26878ae121acSGregory Haskins 
2688690cc3ffSEric W. Biederman static inline int restart_syscall(void)
2689690cc3ffSEric W. Biederman {
2690690cc3ffSEric W. Biederman 	set_tsk_thread_flag(current, TIF_SIGPENDING);
2691690cc3ffSEric W. Biederman 	return -ERESTARTNOINTR;
2692690cc3ffSEric W. Biederman }
2693690cc3ffSEric W. Biederman 
26941da177e4SLinus Torvalds static inline int signal_pending(struct task_struct *p)
26951da177e4SLinus Torvalds {
26961da177e4SLinus Torvalds 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
26971da177e4SLinus Torvalds }
26981da177e4SLinus Torvalds 
2699d9588725SRoland McGrath static inline int __fatal_signal_pending(struct task_struct *p)
2700d9588725SRoland McGrath {
2701d9588725SRoland McGrath 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2702d9588725SRoland McGrath }
2703f776d12dSMatthew Wilcox 
2704f776d12dSMatthew Wilcox static inline int fatal_signal_pending(struct task_struct *p)
2705f776d12dSMatthew Wilcox {
2706f776d12dSMatthew Wilcox 	return signal_pending(p) && __fatal_signal_pending(p);
2707f776d12dSMatthew Wilcox }
2708f776d12dSMatthew Wilcox 
270916882c1eSOleg Nesterov static inline int signal_pending_state(long state, struct task_struct *p)
271016882c1eSOleg Nesterov {
271116882c1eSOleg Nesterov 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
271216882c1eSOleg Nesterov 		return 0;
271316882c1eSOleg Nesterov 	if (!signal_pending(p))
271416882c1eSOleg Nesterov 		return 0;
271516882c1eSOleg Nesterov 
271616882c1eSOleg Nesterov 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
271716882c1eSOleg Nesterov }
271816882c1eSOleg Nesterov 
27191da177e4SLinus Torvalds /*
27201da177e4SLinus Torvalds  * cond_resched() and cond_resched_lock(): latency reduction via
27211da177e4SLinus Torvalds  * explicit rescheduling in places that are safe. The return
27221da177e4SLinus Torvalds  * value indicates whether a reschedule was done in fact.
27231da177e4SLinus Torvalds  * cond_resched_lock() will drop the spinlock before scheduling,
27241da177e4SLinus Torvalds  * cond_resched_softirq() will enable bhs before scheduling.
27251da177e4SLinus Torvalds  */
2726c3921ab7SLinus Torvalds extern int _cond_resched(void);
27276f80bd98SFrederic Weisbecker 
2728613afbf8SFrederic Weisbecker #define cond_resched() ({			\
2729613afbf8SFrederic Weisbecker 	__might_sleep(__FILE__, __LINE__, 0);	\
2730613afbf8SFrederic Weisbecker 	_cond_resched();			\
2731613afbf8SFrederic Weisbecker })
27326f80bd98SFrederic Weisbecker 
2733613afbf8SFrederic Weisbecker extern int __cond_resched_lock(spinlock_t *lock);
2734613afbf8SFrederic Weisbecker 
2735bdd4e85dSFrederic Weisbecker #ifdef CONFIG_PREEMPT_COUNT
2736716a4234SFrederic Weisbecker #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
273702b67cc3SHerbert Xu #else
2738716a4234SFrederic Weisbecker #define PREEMPT_LOCK_OFFSET	0
273902b67cc3SHerbert Xu #endif
2740716a4234SFrederic Weisbecker 
2741613afbf8SFrederic Weisbecker #define cond_resched_lock(lock) ({				\
2742716a4234SFrederic Weisbecker 	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2743613afbf8SFrederic Weisbecker 	__cond_resched_lock(lock);				\
2744613afbf8SFrederic Weisbecker })
2745613afbf8SFrederic Weisbecker 
2746613afbf8SFrederic Weisbecker extern int __cond_resched_softirq(void);
2747613afbf8SFrederic Weisbecker 
2748613afbf8SFrederic Weisbecker #define cond_resched_softirq() ({					\
274975e1056fSVenkatesh Pallipadi 	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2750613afbf8SFrederic Weisbecker 	__cond_resched_softirq();					\
2751613afbf8SFrederic Weisbecker })
27521da177e4SLinus Torvalds 
2753f6f3c437SSimon Horman static inline void cond_resched_rcu(void)
2754f6f3c437SSimon Horman {
2755f6f3c437SSimon Horman #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2756f6f3c437SSimon Horman 	rcu_read_unlock();
2757f6f3c437SSimon Horman 	cond_resched();
2758f6f3c437SSimon Horman 	rcu_read_lock();
2759f6f3c437SSimon Horman #endif
2760f6f3c437SSimon Horman }
2761f6f3c437SSimon Horman 
27621da177e4SLinus Torvalds /*
27631da177e4SLinus Torvalds  * Does a critical section need to be broken due to another
276495c354feSNick Piggin  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
276595c354feSNick Piggin  * but a general need for low latency)
27661da177e4SLinus Torvalds  */
276795c354feSNick Piggin static inline int spin_needbreak(spinlock_t *lock)
27681da177e4SLinus Torvalds {
276995c354feSNick Piggin #ifdef CONFIG_PREEMPT
277095c354feSNick Piggin 	return spin_is_contended(lock);
277195c354feSNick Piggin #else
27721da177e4SLinus Torvalds 	return 0;
277395c354feSNick Piggin #endif
27741da177e4SLinus Torvalds }
27751da177e4SLinus Torvalds 
27767bb44adeSRoland McGrath /*
2777ee761f62SThomas Gleixner  * Idle thread specific functions to determine the need_resched
277869dd0f84SPeter Zijlstra  * polling state.
2779ee761f62SThomas Gleixner  */
278069dd0f84SPeter Zijlstra #ifdef TIF_POLLING_NRFLAG
2781ee761f62SThomas Gleixner static inline int tsk_is_polling(struct task_struct *p)
2782ee761f62SThomas Gleixner {
2783ee761f62SThomas Gleixner 	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2784ee761f62SThomas Gleixner }
2785ea811747SPeter Zijlstra 
2786ea811747SPeter Zijlstra static inline void __current_set_polling(void)
27873a98f871SThomas Gleixner {
27883a98f871SThomas Gleixner 	set_thread_flag(TIF_POLLING_NRFLAG);
27893a98f871SThomas Gleixner }
27903a98f871SThomas Gleixner 
2791ea811747SPeter Zijlstra static inline bool __must_check current_set_polling_and_test(void)
2792ea811747SPeter Zijlstra {
2793ea811747SPeter Zijlstra 	__current_set_polling();
2794ea811747SPeter Zijlstra 
2795ea811747SPeter Zijlstra 	/*
2796ea811747SPeter Zijlstra 	 * Polling state must be visible before we test NEED_RESCHED,
27978875125eSKirill Tkhai 	 * paired by resched_curr()
2798ea811747SPeter Zijlstra 	 */
27994e857c58SPeter Zijlstra 	smp_mb__after_atomic();
2800ea811747SPeter Zijlstra 
2801ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2802ea811747SPeter Zijlstra }
2803ea811747SPeter Zijlstra 
2804ea811747SPeter Zijlstra static inline void __current_clr_polling(void)
28053a98f871SThomas Gleixner {
28063a98f871SThomas Gleixner 	clear_thread_flag(TIF_POLLING_NRFLAG);
28073a98f871SThomas Gleixner }
2808ea811747SPeter Zijlstra 
2809ea811747SPeter Zijlstra static inline bool __must_check current_clr_polling_and_test(void)
2810ea811747SPeter Zijlstra {
2811ea811747SPeter Zijlstra 	__current_clr_polling();
2812ea811747SPeter Zijlstra 
2813ea811747SPeter Zijlstra 	/*
2814ea811747SPeter Zijlstra 	 * Polling state must be visible before we test NEED_RESCHED,
28158875125eSKirill Tkhai 	 * paired by resched_curr()
2816ea811747SPeter Zijlstra 	 */
28174e857c58SPeter Zijlstra 	smp_mb__after_atomic();
2818ea811747SPeter Zijlstra 
2819ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2820ea811747SPeter Zijlstra }
2821ea811747SPeter Zijlstra 
2822ee761f62SThomas Gleixner #else
2823ee761f62SThomas Gleixner static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2824ea811747SPeter Zijlstra static inline void __current_set_polling(void) { }
2825ea811747SPeter Zijlstra static inline void __current_clr_polling(void) { }
2826ea811747SPeter Zijlstra 
2827ea811747SPeter Zijlstra static inline bool __must_check current_set_polling_and_test(void)
2828ea811747SPeter Zijlstra {
2829ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2830ea811747SPeter Zijlstra }
2831ea811747SPeter Zijlstra static inline bool __must_check current_clr_polling_and_test(void)
2832ea811747SPeter Zijlstra {
2833ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2834ea811747SPeter Zijlstra }
2835ee761f62SThomas Gleixner #endif
2836ee761f62SThomas Gleixner 
28378cb75e0cSPeter Zijlstra static inline void current_clr_polling(void)
28388cb75e0cSPeter Zijlstra {
28398cb75e0cSPeter Zijlstra 	__current_clr_polling();
28408cb75e0cSPeter Zijlstra 
28418cb75e0cSPeter Zijlstra 	/*
28428cb75e0cSPeter Zijlstra 	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
28438cb75e0cSPeter Zijlstra 	 * Once the bit is cleared, we'll get IPIs with every new
28448cb75e0cSPeter Zijlstra 	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
28458cb75e0cSPeter Zijlstra 	 * fold.
28468cb75e0cSPeter Zijlstra 	 */
28478875125eSKirill Tkhai 	smp_mb(); /* paired with resched_curr() */
28488cb75e0cSPeter Zijlstra 
28498cb75e0cSPeter Zijlstra 	preempt_fold_need_resched();
28508cb75e0cSPeter Zijlstra }
28518cb75e0cSPeter Zijlstra 
285275f93fedSPeter Zijlstra static __always_inline bool need_resched(void)
285375f93fedSPeter Zijlstra {
285475f93fedSPeter Zijlstra 	return unlikely(tif_need_resched());
285575f93fedSPeter Zijlstra }
285675f93fedSPeter Zijlstra 
2857ee761f62SThomas Gleixner /*
2858f06febc9SFrank Mayhar  * Thread group CPU time accounting.
2859f06febc9SFrank Mayhar  */
28604cd4c1b4SPeter Zijlstra void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
28614da94d49SPeter Zijlstra void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2862f06febc9SFrank Mayhar 
2863f06febc9SFrank Mayhar static inline void thread_group_cputime_init(struct signal_struct *sig)
2864f06febc9SFrank Mayhar {
2865ee30a7b2SThomas Gleixner 	raw_spin_lock_init(&sig->cputimer.lock);
2866f06febc9SFrank Mayhar }
2867f06febc9SFrank Mayhar 
2868f06febc9SFrank Mayhar /*
28697bb44adeSRoland McGrath  * Reevaluate whether the task has signals pending delivery.
28707bb44adeSRoland McGrath  * Wake the task if so.
28717bb44adeSRoland McGrath  * This is required every time the blocked sigset_t changes.
28727bb44adeSRoland McGrath  * callers must hold sighand->siglock.
28737bb44adeSRoland McGrath  */
28747bb44adeSRoland McGrath extern void recalc_sigpending_and_wake(struct task_struct *t);
28751da177e4SLinus Torvalds extern void recalc_sigpending(void);
28761da177e4SLinus Torvalds 
2877910ffdb1SOleg Nesterov extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2878910ffdb1SOleg Nesterov 
2879910ffdb1SOleg Nesterov static inline void signal_wake_up(struct task_struct *t, bool resume)
2880910ffdb1SOleg Nesterov {
2881910ffdb1SOleg Nesterov 	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2882910ffdb1SOleg Nesterov }
2883910ffdb1SOleg Nesterov static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2884910ffdb1SOleg Nesterov {
2885910ffdb1SOleg Nesterov 	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2886910ffdb1SOleg Nesterov }
28871da177e4SLinus Torvalds 
28881da177e4SLinus Torvalds /*
28891da177e4SLinus Torvalds  * Wrappers for p->thread_info->cpu access. No-op on UP.
28901da177e4SLinus Torvalds  */
28911da177e4SLinus Torvalds #ifdef CONFIG_SMP
28921da177e4SLinus Torvalds 
28931da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
28941da177e4SLinus Torvalds {
2895a1261f54SAl Viro 	return task_thread_info(p)->cpu;
28961da177e4SLinus Torvalds }
28971da177e4SLinus Torvalds 
2898b32e86b4SIngo Molnar static inline int task_node(const struct task_struct *p)
2899b32e86b4SIngo Molnar {
2900b32e86b4SIngo Molnar 	return cpu_to_node(task_cpu(p));
2901b32e86b4SIngo Molnar }
2902b32e86b4SIngo Molnar 
2903c65cc870SIngo Molnar extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
29041da177e4SLinus Torvalds 
29051da177e4SLinus Torvalds #else
29061da177e4SLinus Torvalds 
29071da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
29081da177e4SLinus Torvalds {
29091da177e4SLinus Torvalds 	return 0;
29101da177e4SLinus Torvalds }
29111da177e4SLinus Torvalds 
29121da177e4SLinus Torvalds static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
29131da177e4SLinus Torvalds {
29141da177e4SLinus Torvalds }
29151da177e4SLinus Torvalds 
29161da177e4SLinus Torvalds #endif /* CONFIG_SMP */
29171da177e4SLinus Torvalds 
291896f874e2SRusty Russell extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
291996f874e2SRusty Russell extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
29205c45bf27SSiddha, Suresh B 
29217c941438SDhaval Giani #ifdef CONFIG_CGROUP_SCHED
292207e06b01SYong Zhang extern struct task_group root_task_group;
29238323f26cSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */
29249b5b7751SSrivatsa Vaddagiri 
292554e99124SDhaval Giani extern int task_can_switch_user(struct user_struct *up,
292654e99124SDhaval Giani 					struct task_struct *tsk);
292754e99124SDhaval Giani 
29284b98d11bSAlexey Dobriyan #ifdef CONFIG_TASK_XACCT
29294b98d11bSAlexey Dobriyan static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
29304b98d11bSAlexey Dobriyan {
2931940389b8SAndrea Righi 	tsk->ioac.rchar += amt;
29324b98d11bSAlexey Dobriyan }
29334b98d11bSAlexey Dobriyan 
29344b98d11bSAlexey Dobriyan static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
29354b98d11bSAlexey Dobriyan {
2936940389b8SAndrea Righi 	tsk->ioac.wchar += amt;
29374b98d11bSAlexey Dobriyan }
29384b98d11bSAlexey Dobriyan 
29394b98d11bSAlexey Dobriyan static inline void inc_syscr(struct task_struct *tsk)
29404b98d11bSAlexey Dobriyan {
2941940389b8SAndrea Righi 	tsk->ioac.syscr++;
29424b98d11bSAlexey Dobriyan }
29434b98d11bSAlexey Dobriyan 
29444b98d11bSAlexey Dobriyan static inline void inc_syscw(struct task_struct *tsk)
29454b98d11bSAlexey Dobriyan {
2946940389b8SAndrea Righi 	tsk->ioac.syscw++;
29474b98d11bSAlexey Dobriyan }
29484b98d11bSAlexey Dobriyan #else
29494b98d11bSAlexey Dobriyan static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
29504b98d11bSAlexey Dobriyan {
29514b98d11bSAlexey Dobriyan }
29524b98d11bSAlexey Dobriyan 
29534b98d11bSAlexey Dobriyan static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
29544b98d11bSAlexey Dobriyan {
29554b98d11bSAlexey Dobriyan }
29564b98d11bSAlexey Dobriyan 
29574b98d11bSAlexey Dobriyan static inline void inc_syscr(struct task_struct *tsk)
29584b98d11bSAlexey Dobriyan {
29594b98d11bSAlexey Dobriyan }
29604b98d11bSAlexey Dobriyan 
29614b98d11bSAlexey Dobriyan static inline void inc_syscw(struct task_struct *tsk)
29624b98d11bSAlexey Dobriyan {
29634b98d11bSAlexey Dobriyan }
29644b98d11bSAlexey Dobriyan #endif
29654b98d11bSAlexey Dobriyan 
296682455257SDave Hansen #ifndef TASK_SIZE_OF
296782455257SDave Hansen #define TASK_SIZE_OF(tsk)	TASK_SIZE
296882455257SDave Hansen #endif
296982455257SDave Hansen 
2970f98bafa0SOleg Nesterov #ifdef CONFIG_MEMCG
2971cf475ad2SBalbir Singh extern void mm_update_next_owner(struct mm_struct *mm);
2972cf475ad2SBalbir Singh #else
2973cf475ad2SBalbir Singh static inline void mm_update_next_owner(struct mm_struct *mm)
2974cf475ad2SBalbir Singh {
2975cf475ad2SBalbir Singh }
2976f98bafa0SOleg Nesterov #endif /* CONFIG_MEMCG */
2977cf475ad2SBalbir Singh 
29783e10e716SJiri Slaby static inline unsigned long task_rlimit(const struct task_struct *tsk,
29793e10e716SJiri Slaby 		unsigned int limit)
29803e10e716SJiri Slaby {
29813e10e716SJiri Slaby 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
29823e10e716SJiri Slaby }
29833e10e716SJiri Slaby 
29843e10e716SJiri Slaby static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
29853e10e716SJiri Slaby 		unsigned int limit)
29863e10e716SJiri Slaby {
29873e10e716SJiri Slaby 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
29883e10e716SJiri Slaby }
29893e10e716SJiri Slaby 
29903e10e716SJiri Slaby static inline unsigned long rlimit(unsigned int limit)
29913e10e716SJiri Slaby {
29923e10e716SJiri Slaby 	return task_rlimit(current, limit);
29933e10e716SJiri Slaby }
29943e10e716SJiri Slaby 
29953e10e716SJiri Slaby static inline unsigned long rlimit_max(unsigned int limit)
29963e10e716SJiri Slaby {
29973e10e716SJiri Slaby 	return task_rlimit_max(current, limit);
29983e10e716SJiri Slaby }
29993e10e716SJiri Slaby 
30001da177e4SLinus Torvalds #endif
3001