xref: /linux/include/linux/sched.h (revision fed14d45f945042a15b09de48d7d3d58d9455fc4)
11da177e4SLinus Torvalds #ifndef _LINUX_SCHED_H
21da177e4SLinus Torvalds #define _LINUX_SCHED_H
31da177e4SLinus Torvalds 
4607ca46eSDavid Howells #include <uapi/linux/sched.h>
5b7b3c76aSDavid Woodhouse 
65c228079SDongsheng Yang #include <linux/sched/prio.h>
75c228079SDongsheng Yang 
8b7b3c76aSDavid Woodhouse 
9b7b3c76aSDavid Woodhouse struct sched_param {
10b7b3c76aSDavid Woodhouse 	int sched_priority;
11b7b3c76aSDavid Woodhouse };
12b7b3c76aSDavid Woodhouse 
131da177e4SLinus Torvalds #include <asm/param.h>	/* for HZ */
141da177e4SLinus Torvalds 
151da177e4SLinus Torvalds #include <linux/capability.h>
161da177e4SLinus Torvalds #include <linux/threads.h>
171da177e4SLinus Torvalds #include <linux/kernel.h>
181da177e4SLinus Torvalds #include <linux/types.h>
191da177e4SLinus Torvalds #include <linux/timex.h>
201da177e4SLinus Torvalds #include <linux/jiffies.h>
21fb00aca4SPeter Zijlstra #include <linux/plist.h>
221da177e4SLinus Torvalds #include <linux/rbtree.h>
231da177e4SLinus Torvalds #include <linux/thread_info.h>
241da177e4SLinus Torvalds #include <linux/cpumask.h>
251da177e4SLinus Torvalds #include <linux/errno.h>
261da177e4SLinus Torvalds #include <linux/nodemask.h>
27c92ff1bdSMartin Schwidefsky #include <linux/mm_types.h>
2800d1a39eSThomas Gleixner #include <linux/preempt_mask.h>
291da177e4SLinus Torvalds 
301da177e4SLinus Torvalds #include <asm/page.h>
311da177e4SLinus Torvalds #include <asm/ptrace.h>
321da177e4SLinus Torvalds #include <asm/cputime.h>
331da177e4SLinus Torvalds 
341da177e4SLinus Torvalds #include <linux/smp.h>
351da177e4SLinus Torvalds #include <linux/sem.h>
361da177e4SLinus Torvalds #include <linux/signal.h>
371da177e4SLinus Torvalds #include <linux/compiler.h>
381da177e4SLinus Torvalds #include <linux/completion.h>
391da177e4SLinus Torvalds #include <linux/pid.h>
401da177e4SLinus Torvalds #include <linux/percpu.h>
411da177e4SLinus Torvalds #include <linux/topology.h>
423e26c149SPeter Zijlstra #include <linux/proportions.h>
431da177e4SLinus Torvalds #include <linux/seccomp.h>
44e56d0903SIngo Molnar #include <linux/rcupdate.h>
4505725f7eSJiri Pirko #include <linux/rculist.h>
4623f78d4aSIngo Molnar #include <linux/rtmutex.h>
471da177e4SLinus Torvalds 
48a3b6714eSDavid Woodhouse #include <linux/time.h>
49a3b6714eSDavid Woodhouse #include <linux/param.h>
50a3b6714eSDavid Woodhouse #include <linux/resource.h>
51a3b6714eSDavid Woodhouse #include <linux/timer.h>
52a3b6714eSDavid Woodhouse #include <linux/hrtimer.h>
537c3ab738SAndrew Morton #include <linux/task_io_accounting.h>
549745512cSArjan van de Ven #include <linux/latencytop.h>
559e2b2dc4SDavid Howells #include <linux/cred.h>
56fa14ff4aSPeter Zijlstra #include <linux/llist.h>
577b44ab97SEric W. Biederman #include <linux/uidgid.h>
5821caf2fcSMing Lei #include <linux/gfp.h>
59a3b6714eSDavid Woodhouse 
60a3b6714eSDavid Woodhouse #include <asm/processor.h>
6136d57ac4SH. J. Lu 
62d50dde5aSDario Faggioli #define SCHED_ATTR_SIZE_VER0	48	/* sizeof first published struct */
63d50dde5aSDario Faggioli 
64d50dde5aSDario Faggioli /*
65d50dde5aSDario Faggioli  * Extended scheduling parameters data structure.
66d50dde5aSDario Faggioli  *
67d50dde5aSDario Faggioli  * This is needed because the original struct sched_param can not be
68d50dde5aSDario Faggioli  * altered without introducing ABI issues with legacy applications
69d50dde5aSDario Faggioli  * (e.g., in sched_getparam()).
70d50dde5aSDario Faggioli  *
71d50dde5aSDario Faggioli  * However, the possibility of specifying more than just a priority for
72d50dde5aSDario Faggioli  * the tasks may be useful for a wide variety of application fields, e.g.,
73d50dde5aSDario Faggioli  * multimedia, streaming, automation and control, and many others.
74d50dde5aSDario Faggioli  *
75d50dde5aSDario Faggioli  * This variant (sched_attr) is meant at describing a so-called
76d50dde5aSDario Faggioli  * sporadic time-constrained task. In such model a task is specified by:
77d50dde5aSDario Faggioli  *  - the activation period or minimum instance inter-arrival time;
78d50dde5aSDario Faggioli  *  - the maximum (or average, depending on the actual scheduling
79d50dde5aSDario Faggioli  *    discipline) computation time of all instances, a.k.a. runtime;
80d50dde5aSDario Faggioli  *  - the deadline (relative to the actual activation time) of each
81d50dde5aSDario Faggioli  *    instance.
82d50dde5aSDario Faggioli  * Very briefly, a periodic (sporadic) task asks for the execution of
83d50dde5aSDario Faggioli  * some specific computation --which is typically called an instance--
84d50dde5aSDario Faggioli  * (at most) every period. Moreover, each instance typically lasts no more
85d50dde5aSDario Faggioli  * than the runtime and must be completed by time instant t equal to
86d50dde5aSDario Faggioli  * the instance activation time + the deadline.
87d50dde5aSDario Faggioli  *
88d50dde5aSDario Faggioli  * This is reflected by the actual fields of the sched_attr structure:
89d50dde5aSDario Faggioli  *
90d50dde5aSDario Faggioli  *  @size		size of the structure, for fwd/bwd compat.
91d50dde5aSDario Faggioli  *
92d50dde5aSDario Faggioli  *  @sched_policy	task's scheduling policy
93d50dde5aSDario Faggioli  *  @sched_flags	for customizing the scheduler behaviour
94d50dde5aSDario Faggioli  *  @sched_nice		task's nice value      (SCHED_NORMAL/BATCH)
95d50dde5aSDario Faggioli  *  @sched_priority	task's static priority (SCHED_FIFO/RR)
96d50dde5aSDario Faggioli  *  @sched_deadline	representative of the task's deadline
97d50dde5aSDario Faggioli  *  @sched_runtime	representative of the task's runtime
98d50dde5aSDario Faggioli  *  @sched_period	representative of the task's period
99d50dde5aSDario Faggioli  *
100d50dde5aSDario Faggioli  * Given this task model, there are a multiplicity of scheduling algorithms
101d50dde5aSDario Faggioli  * and policies, that can be used to ensure all the tasks will make their
102d50dde5aSDario Faggioli  * timing constraints.
103aab03e05SDario Faggioli  *
104aab03e05SDario Faggioli  * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
105aab03e05SDario Faggioli  * only user of this new interface. More information about the algorithm
106aab03e05SDario Faggioli  * available in the scheduling class file or in Documentation/.
107d50dde5aSDario Faggioli  */
108d50dde5aSDario Faggioli struct sched_attr {
109d50dde5aSDario Faggioli 	u32 size;
110d50dde5aSDario Faggioli 
111d50dde5aSDario Faggioli 	u32 sched_policy;
112d50dde5aSDario Faggioli 	u64 sched_flags;
113d50dde5aSDario Faggioli 
114d50dde5aSDario Faggioli 	/* SCHED_NORMAL, SCHED_BATCH */
115d50dde5aSDario Faggioli 	s32 sched_nice;
116d50dde5aSDario Faggioli 
117d50dde5aSDario Faggioli 	/* SCHED_FIFO, SCHED_RR */
118d50dde5aSDario Faggioli 	u32 sched_priority;
119d50dde5aSDario Faggioli 
120d50dde5aSDario Faggioli 	/* SCHED_DEADLINE */
121d50dde5aSDario Faggioli 	u64 sched_runtime;
122d50dde5aSDario Faggioli 	u64 sched_deadline;
123d50dde5aSDario Faggioli 	u64 sched_period;
124d50dde5aSDario Faggioli };
125d50dde5aSDario Faggioli 
1261da177e4SLinus Torvalds struct exec_domain;
127c87e2837SIngo Molnar struct futex_pi_state;
128286100a6SAlexey Dobriyan struct robust_list_head;
129bddd87c7SAkinobu Mita struct bio_list;
1305ad4e53bSAl Viro struct fs_struct;
131cdd6c482SIngo Molnar struct perf_event_context;
13273c10101SJens Axboe struct blk_plug;
1331da177e4SLinus Torvalds 
1341da177e4SLinus Torvalds /*
1351da177e4SLinus Torvalds  * List of flags we want to share for kernel threads,
1361da177e4SLinus Torvalds  * if only because they are not used by them anyway.
1371da177e4SLinus Torvalds  */
1381da177e4SLinus Torvalds #define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
1391da177e4SLinus Torvalds 
1401da177e4SLinus Torvalds /*
1411da177e4SLinus Torvalds  * These are the constant used to fake the fixed-point load-average
1421da177e4SLinus Torvalds  * counting. Some notes:
1431da177e4SLinus Torvalds  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
1441da177e4SLinus Torvalds  *    a load-average precision of 10 bits integer + 11 bits fractional
1451da177e4SLinus Torvalds  *  - if you want to count load-averages more often, you need more
1461da177e4SLinus Torvalds  *    precision, or rounding will get you. With 2-second counting freq,
1471da177e4SLinus Torvalds  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
1481da177e4SLinus Torvalds  *    11 bit fractions.
1491da177e4SLinus Torvalds  */
1501da177e4SLinus Torvalds extern unsigned long avenrun[];		/* Load averages */
1512d02494fSThomas Gleixner extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
1521da177e4SLinus Torvalds 
1531da177e4SLinus Torvalds #define FSHIFT		11		/* nr of bits of precision */
1541da177e4SLinus Torvalds #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
1550c2043abSLinus Torvalds #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
1561da177e4SLinus Torvalds #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
1571da177e4SLinus Torvalds #define EXP_5		2014		/* 1/exp(5sec/5min) */
1581da177e4SLinus Torvalds #define EXP_15		2037		/* 1/exp(5sec/15min) */
1591da177e4SLinus Torvalds 
1601da177e4SLinus Torvalds #define CALC_LOAD(load,exp,n) \
1611da177e4SLinus Torvalds 	load *= exp; \
1621da177e4SLinus Torvalds 	load += n*(FIXED_1-exp); \
1631da177e4SLinus Torvalds 	load >>= FSHIFT;
1641da177e4SLinus Torvalds 
1651da177e4SLinus Torvalds extern unsigned long total_forks;
1661da177e4SLinus Torvalds extern int nr_threads;
1671da177e4SLinus Torvalds DECLARE_PER_CPU(unsigned long, process_counts);
1681da177e4SLinus Torvalds extern int nr_processes(void);
1691da177e4SLinus Torvalds extern unsigned long nr_running(void);
1701da177e4SLinus Torvalds extern unsigned long nr_iowait(void);
1718c215bd3SPeter Zijlstra extern unsigned long nr_iowait_cpu(int cpu);
17269d25870SArjan van de Ven extern unsigned long this_cpu_load(void);
17369d25870SArjan van de Ven 
17469d25870SArjan van de Ven 
1750f004f5aSPeter Zijlstra extern void calc_global_load(unsigned long ticks);
1765aaa0b7aSPeter Zijlstra extern void update_cpu_load_nohz(void);
1771da177e4SLinus Torvalds 
1787e49fcceSSteven Rostedt extern unsigned long get_parent_ip(unsigned long addr);
1797e49fcceSSteven Rostedt 
180b637a328SPaul E. McKenney extern void dump_cpu_task(int cpu);
181b637a328SPaul E. McKenney 
18243ae34cbSIngo Molnar struct seq_file;
18343ae34cbSIngo Molnar struct cfs_rq;
1844cf86d77SIngo Molnar struct task_group;
18543ae34cbSIngo Molnar #ifdef CONFIG_SCHED_DEBUG
18643ae34cbSIngo Molnar extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
18743ae34cbSIngo Molnar extern void proc_sched_set_task(struct task_struct *p);
18843ae34cbSIngo Molnar extern void
1895cef9ecaSIngo Molnar print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
19043ae34cbSIngo Molnar #endif
1911da177e4SLinus Torvalds 
1924a8342d2SLinus Torvalds /*
1934a8342d2SLinus Torvalds  * Task state bitmask. NOTE! These bits are also
1944a8342d2SLinus Torvalds  * encoded in fs/proc/array.c: get_task_state().
1954a8342d2SLinus Torvalds  *
1964a8342d2SLinus Torvalds  * We have two separate sets of flags: task->state
1974a8342d2SLinus Torvalds  * is about runnability, while task->exit_state are
1984a8342d2SLinus Torvalds  * about the task exiting. Confusing, but this way
1994a8342d2SLinus Torvalds  * modifying one set can't modify the other one by
2004a8342d2SLinus Torvalds  * mistake.
2014a8342d2SLinus Torvalds  */
2021da177e4SLinus Torvalds #define TASK_RUNNING		0
2031da177e4SLinus Torvalds #define TASK_INTERRUPTIBLE	1
2041da177e4SLinus Torvalds #define TASK_UNINTERRUPTIBLE	2
205f021a3c2SMatthew Wilcox #define __TASK_STOPPED		4
206f021a3c2SMatthew Wilcox #define __TASK_TRACED		8
2074a8342d2SLinus Torvalds /* in tsk->exit_state */
2084a8342d2SLinus Torvalds #define EXIT_ZOMBIE		16
2094a8342d2SLinus Torvalds #define EXIT_DEAD		32
2104a8342d2SLinus Torvalds /* in tsk->state again */
211af927232SMike Galbraith #define TASK_DEAD		64
212f021a3c2SMatthew Wilcox #define TASK_WAKEKILL		128
213e9c84311SPeter Zijlstra #define TASK_WAKING		256
214f2530dc7SThomas Gleixner #define TASK_PARKED		512
215f2530dc7SThomas Gleixner #define TASK_STATE_MAX		1024
216f021a3c2SMatthew Wilcox 
217f2530dc7SThomas Gleixner #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
21873342151SPeter Zijlstra 
219e1781538SPeter Zijlstra extern char ___assert_task_state[1 - 2*!!(
220e1781538SPeter Zijlstra 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
221f021a3c2SMatthew Wilcox 
222f021a3c2SMatthew Wilcox /* Convenience macros for the sake of set_task_state */
223f021a3c2SMatthew Wilcox #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
224f021a3c2SMatthew Wilcox #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
225f021a3c2SMatthew Wilcox #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
2261da177e4SLinus Torvalds 
22792a1f4bcSMatthew Wilcox /* Convenience macros for the sake of wake_up */
22892a1f4bcSMatthew Wilcox #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
229f021a3c2SMatthew Wilcox #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
23092a1f4bcSMatthew Wilcox 
23192a1f4bcSMatthew Wilcox /* get_task_state() */
23292a1f4bcSMatthew Wilcox #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
233f021a3c2SMatthew Wilcox 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
23474e37200SOleg Nesterov 				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
23592a1f4bcSMatthew Wilcox 
236f021a3c2SMatthew Wilcox #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
237f021a3c2SMatthew Wilcox #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
23892a1f4bcSMatthew Wilcox #define task_is_stopped_or_traced(task)	\
239f021a3c2SMatthew Wilcox 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
24092a1f4bcSMatthew Wilcox #define task_contributes_to_load(task)	\
241e3c8ca83SNathan Lynch 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
242376fede8STejun Heo 				 (task->flags & PF_FROZEN) == 0)
2431da177e4SLinus Torvalds 
2441da177e4SLinus Torvalds #define __set_task_state(tsk, state_value)		\
2451da177e4SLinus Torvalds 	do { (tsk)->state = (state_value); } while (0)
2461da177e4SLinus Torvalds #define set_task_state(tsk, state_value)		\
2471da177e4SLinus Torvalds 	set_mb((tsk)->state, (state_value))
2481da177e4SLinus Torvalds 
249498d0c57SAndrew Morton /*
250498d0c57SAndrew Morton  * set_current_state() includes a barrier so that the write of current->state
251498d0c57SAndrew Morton  * is correctly serialised wrt the caller's subsequent test of whether to
252498d0c57SAndrew Morton  * actually sleep:
253498d0c57SAndrew Morton  *
254498d0c57SAndrew Morton  *	set_current_state(TASK_UNINTERRUPTIBLE);
255498d0c57SAndrew Morton  *	if (do_i_need_to_sleep())
256498d0c57SAndrew Morton  *		schedule();
257498d0c57SAndrew Morton  *
258498d0c57SAndrew Morton  * If the caller does not need such serialisation then use __set_current_state()
259498d0c57SAndrew Morton  */
2601da177e4SLinus Torvalds #define __set_current_state(state_value)			\
2611da177e4SLinus Torvalds 	do { current->state = (state_value); } while (0)
2621da177e4SLinus Torvalds #define set_current_state(state_value)		\
2631da177e4SLinus Torvalds 	set_mb(current->state, (state_value))
2641da177e4SLinus Torvalds 
2651da177e4SLinus Torvalds /* Task command name length */
2661da177e4SLinus Torvalds #define TASK_COMM_LEN 16
2671da177e4SLinus Torvalds 
2681da177e4SLinus Torvalds #include <linux/spinlock.h>
2691da177e4SLinus Torvalds 
2701da177e4SLinus Torvalds /*
2711da177e4SLinus Torvalds  * This serializes "schedule()" and also protects
2721da177e4SLinus Torvalds  * the run-queue from deletions/modifications (but
2731da177e4SLinus Torvalds  * _adding_ to the beginning of the run-queue has
2741da177e4SLinus Torvalds  * a separate lock).
2751da177e4SLinus Torvalds  */
2761da177e4SLinus Torvalds extern rwlock_t tasklist_lock;
2771da177e4SLinus Torvalds extern spinlock_t mmlist_lock;
2781da177e4SLinus Torvalds 
27936c8b586SIngo Molnar struct task_struct;
2801da177e4SLinus Torvalds 
281db1466b3SPaul E. McKenney #ifdef CONFIG_PROVE_RCU
282db1466b3SPaul E. McKenney extern int lockdep_tasklist_lock_is_held(void);
283db1466b3SPaul E. McKenney #endif /* #ifdef CONFIG_PROVE_RCU */
284db1466b3SPaul E. McKenney 
2851da177e4SLinus Torvalds extern void sched_init(void);
2861da177e4SLinus Torvalds extern void sched_init_smp(void);
2872d07b255SHarvey Harrison extern asmlinkage void schedule_tail(struct task_struct *prev);
28836c8b586SIngo Molnar extern void init_idle(struct task_struct *idle, int cpu);
2891df21055SIngo Molnar extern void init_idle_bootup_task(struct task_struct *idle);
2901da177e4SLinus Torvalds 
29189f19f04SAndrew Morton extern int runqueue_is_locked(int cpu);
292017730c1SIngo Molnar 
2933451d024SFrederic Weisbecker #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
294c1cc017cSAlex Shi extern void nohz_balance_enter_idle(int cpu);
29569e1e811SSuresh Siddha extern void set_cpu_sd_state_idle(void);
29683cd4fe2SVenkatesh Pallipadi extern int get_nohz_timer_target(void);
29746cb4b7cSSiddha, Suresh B #else
298c1cc017cSAlex Shi static inline void nohz_balance_enter_idle(int cpu) { }
299fdaabd80SPeter Zijlstra static inline void set_cpu_sd_state_idle(void) { }
30046cb4b7cSSiddha, Suresh B #endif
3011da177e4SLinus Torvalds 
302e59e2ae2SIngo Molnar /*
30339bc89fdSIngo Molnar  * Only dump TASK_* tasks. (0 for all tasks)
304e59e2ae2SIngo Molnar  */
305e59e2ae2SIngo Molnar extern void show_state_filter(unsigned long state_filter);
306e59e2ae2SIngo Molnar 
307e59e2ae2SIngo Molnar static inline void show_state(void)
308e59e2ae2SIngo Molnar {
30939bc89fdSIngo Molnar 	show_state_filter(0);
310e59e2ae2SIngo Molnar }
311e59e2ae2SIngo Molnar 
3121da177e4SLinus Torvalds extern void show_regs(struct pt_regs *);
3131da177e4SLinus Torvalds 
3141da177e4SLinus Torvalds /*
3151da177e4SLinus Torvalds  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
3161da177e4SLinus Torvalds  * task), SP is the stack pointer of the first frame that should be shown in the back
3171da177e4SLinus Torvalds  * trace (or NULL if the entire call-chain of the task should be shown).
3181da177e4SLinus Torvalds  */
3191da177e4SLinus Torvalds extern void show_stack(struct task_struct *task, unsigned long *sp);
3201da177e4SLinus Torvalds 
3211da177e4SLinus Torvalds void io_schedule(void);
3221da177e4SLinus Torvalds long io_schedule_timeout(long timeout);
3231da177e4SLinus Torvalds 
3241da177e4SLinus Torvalds extern void cpu_init (void);
3251da177e4SLinus Torvalds extern void trap_init(void);
3261da177e4SLinus Torvalds extern void update_process_times(int user);
3271da177e4SLinus Torvalds extern void scheduler_tick(void);
3281da177e4SLinus Torvalds 
32982a1fcb9SIngo Molnar extern void sched_show_task(struct task_struct *p);
33082a1fcb9SIngo Molnar 
33119cc36c0SFrederic Weisbecker #ifdef CONFIG_LOCKUP_DETECTOR
3328446f1d3SIngo Molnar extern void touch_softlockup_watchdog(void);
333d6ad3e28SJason Wessel extern void touch_softlockup_watchdog_sync(void);
33404c9167fSJeremy Fitzhardinge extern void touch_all_softlockup_watchdogs(void);
335332fbdbcSDon Zickus extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
3368d65af78SAlexey Dobriyan 				  void __user *buffer,
337baf48f65SMandeep Singh Baines 				  size_t *lenp, loff_t *ppos);
3389c44bc03SIngo Molnar extern unsigned int  softlockup_panic;
339004417a6SPeter Zijlstra void lockup_detector_init(void);
3408446f1d3SIngo Molnar #else
3418446f1d3SIngo Molnar static inline void touch_softlockup_watchdog(void)
3428446f1d3SIngo Molnar {
3438446f1d3SIngo Molnar }
344d6ad3e28SJason Wessel static inline void touch_softlockup_watchdog_sync(void)
345d6ad3e28SJason Wessel {
346d6ad3e28SJason Wessel }
34704c9167fSJeremy Fitzhardinge static inline void touch_all_softlockup_watchdogs(void)
34804c9167fSJeremy Fitzhardinge {
34904c9167fSJeremy Fitzhardinge }
350004417a6SPeter Zijlstra static inline void lockup_detector_init(void)
351004417a6SPeter Zijlstra {
352004417a6SPeter Zijlstra }
3538446f1d3SIngo Molnar #endif
3548446f1d3SIngo Molnar 
3558b414521SMarcelo Tosatti #ifdef CONFIG_DETECT_HUNG_TASK
3568b414521SMarcelo Tosatti void reset_hung_task_detector(void);
3578b414521SMarcelo Tosatti #else
3588b414521SMarcelo Tosatti static inline void reset_hung_task_detector(void)
3598b414521SMarcelo Tosatti {
3608b414521SMarcelo Tosatti }
3618b414521SMarcelo Tosatti #endif
3628b414521SMarcelo Tosatti 
3631da177e4SLinus Torvalds /* Attach to any functions which should be ignored in wchan output. */
3641da177e4SLinus Torvalds #define __sched		__attribute__((__section__(".sched.text")))
365deaf2227SIngo Molnar 
366deaf2227SIngo Molnar /* Linker adds these: start and end of __sched functions */
367deaf2227SIngo Molnar extern char __sched_text_start[], __sched_text_end[];
368deaf2227SIngo Molnar 
3691da177e4SLinus Torvalds /* Is this address in the __sched functions? */
3701da177e4SLinus Torvalds extern int in_sched_functions(unsigned long addr);
3711da177e4SLinus Torvalds 
3721da177e4SLinus Torvalds #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
373b3c97528SHarvey Harrison extern signed long schedule_timeout(signed long timeout);
37464ed93a2SNishanth Aravamudan extern signed long schedule_timeout_interruptible(signed long timeout);
375294d5cc2SMatthew Wilcox extern signed long schedule_timeout_killable(signed long timeout);
37664ed93a2SNishanth Aravamudan extern signed long schedule_timeout_uninterruptible(signed long timeout);
3771da177e4SLinus Torvalds asmlinkage void schedule(void);
378c5491ea7SThomas Gleixner extern void schedule_preempt_disabled(void);
3791da177e4SLinus Torvalds 
380ab516013SSerge E. Hallyn struct nsproxy;
381acce292cSCedric Le Goater struct user_namespace;
3821da177e4SLinus Torvalds 
383efc1a3b1SDavid Howells #ifdef CONFIG_MMU
384efc1a3b1SDavid Howells extern void arch_pick_mmap_layout(struct mm_struct *mm);
3851da177e4SLinus Torvalds extern unsigned long
3861da177e4SLinus Torvalds arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
3871da177e4SLinus Torvalds 		       unsigned long, unsigned long);
3881da177e4SLinus Torvalds extern unsigned long
3891da177e4SLinus Torvalds arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
3901da177e4SLinus Torvalds 			  unsigned long len, unsigned long pgoff,
3911da177e4SLinus Torvalds 			  unsigned long flags);
392efc1a3b1SDavid Howells #else
393efc1a3b1SDavid Howells static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
394efc1a3b1SDavid Howells #endif
3951da177e4SLinus Torvalds 
396d049f74fSKees Cook #define SUID_DUMP_DISABLE	0	/* No setuid dumping */
397d049f74fSKees Cook #define SUID_DUMP_USER		1	/* Dump as user of process */
398d049f74fSKees Cook #define SUID_DUMP_ROOT		2	/* Dump as root */
399d049f74fSKees Cook 
4006c5d5238SKawai, Hidehiro /* mm flags */
401f8af4da3SHugh Dickins 
4027288e118SOleg Nesterov /* for SUID_DUMP_* above */
4033cb4a0bbSKawai, Hidehiro #define MMF_DUMPABLE_BITS 2
404f8af4da3SHugh Dickins #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
4053cb4a0bbSKawai, Hidehiro 
406942be387SOleg Nesterov extern void set_dumpable(struct mm_struct *mm, int value);
407942be387SOleg Nesterov /*
408942be387SOleg Nesterov  * This returns the actual value of the suid_dumpable flag. For things
409942be387SOleg Nesterov  * that are using this for checking for privilege transitions, it must
410942be387SOleg Nesterov  * test against SUID_DUMP_USER rather than treating it as a boolean
411942be387SOleg Nesterov  * value.
412942be387SOleg Nesterov  */
413942be387SOleg Nesterov static inline int __get_dumpable(unsigned long mm_flags)
414942be387SOleg Nesterov {
415942be387SOleg Nesterov 	return mm_flags & MMF_DUMPABLE_MASK;
416942be387SOleg Nesterov }
417942be387SOleg Nesterov 
418942be387SOleg Nesterov static inline int get_dumpable(struct mm_struct *mm)
419942be387SOleg Nesterov {
420942be387SOleg Nesterov 	return __get_dumpable(mm->flags);
421942be387SOleg Nesterov }
422942be387SOleg Nesterov 
4233cb4a0bbSKawai, Hidehiro /* coredump filter bits */
4243cb4a0bbSKawai, Hidehiro #define MMF_DUMP_ANON_PRIVATE	2
4253cb4a0bbSKawai, Hidehiro #define MMF_DUMP_ANON_SHARED	3
4263cb4a0bbSKawai, Hidehiro #define MMF_DUMP_MAPPED_PRIVATE	4
4273cb4a0bbSKawai, Hidehiro #define MMF_DUMP_MAPPED_SHARED	5
42882df3973SRoland McGrath #define MMF_DUMP_ELF_HEADERS	6
429e575f111SKOSAKI Motohiro #define MMF_DUMP_HUGETLB_PRIVATE 7
430e575f111SKOSAKI Motohiro #define MMF_DUMP_HUGETLB_SHARED  8
431f8af4da3SHugh Dickins 
4323cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
433e575f111SKOSAKI Motohiro #define MMF_DUMP_FILTER_BITS	7
4343cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_MASK \
4353cb4a0bbSKawai, Hidehiro 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
4363cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_DEFAULT \
437e575f111SKOSAKI Motohiro 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
438656eb2cdSRoland McGrath 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
439656eb2cdSRoland McGrath 
440656eb2cdSRoland McGrath #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
441656eb2cdSRoland McGrath # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
442656eb2cdSRoland McGrath #else
443656eb2cdSRoland McGrath # define MMF_DUMP_MASK_DEFAULT_ELF	0
444656eb2cdSRoland McGrath #endif
445f8af4da3SHugh Dickins 					/* leave room for more dump flags */
446f8af4da3SHugh Dickins #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
447ba76149fSAndrea Arcangeli #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
448bafb282dSKonstantin Khlebnikov #define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
449f8af4da3SHugh Dickins 
4509f68f672SOleg Nesterov #define MMF_HAS_UPROBES		19	/* has uprobes */
4519f68f672SOleg Nesterov #define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
452f8ac4ec9SOleg Nesterov 
453f8af4da3SHugh Dickins #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
4546c5d5238SKawai, Hidehiro 
4551da177e4SLinus Torvalds struct sighand_struct {
4561da177e4SLinus Torvalds 	atomic_t		count;
4571da177e4SLinus Torvalds 	struct k_sigaction	action[_NSIG];
4581da177e4SLinus Torvalds 	spinlock_t		siglock;
459b8fceee1SDavide Libenzi 	wait_queue_head_t	signalfd_wqh;
4601da177e4SLinus Torvalds };
4611da177e4SLinus Torvalds 
4620e464814SKaiGai Kohei struct pacct_struct {
463f6ec29a4SKaiGai Kohei 	int			ac_flag;
464f6ec29a4SKaiGai Kohei 	long			ac_exitcode;
4650e464814SKaiGai Kohei 	unsigned long		ac_mem;
46677787bfbSKaiGai Kohei 	cputime_t		ac_utime, ac_stime;
46777787bfbSKaiGai Kohei 	unsigned long		ac_minflt, ac_majflt;
4680e464814SKaiGai Kohei };
4690e464814SKaiGai Kohei 
47042c4ab41SStanislaw Gruszka struct cpu_itimer {
47142c4ab41SStanislaw Gruszka 	cputime_t expires;
47242c4ab41SStanislaw Gruszka 	cputime_t incr;
4738356b5f9SStanislaw Gruszka 	u32 error;
4748356b5f9SStanislaw Gruszka 	u32 incr_error;
47542c4ab41SStanislaw Gruszka };
47642c4ab41SStanislaw Gruszka 
477f06febc9SFrank Mayhar /**
478d37f761dSFrederic Weisbecker  * struct cputime - snaphsot of system and user cputime
479d37f761dSFrederic Weisbecker  * @utime: time spent in user mode
480d37f761dSFrederic Weisbecker  * @stime: time spent in system mode
481d37f761dSFrederic Weisbecker  *
482d37f761dSFrederic Weisbecker  * Gathers a generic snapshot of user and system time.
483d37f761dSFrederic Weisbecker  */
484d37f761dSFrederic Weisbecker struct cputime {
485d37f761dSFrederic Weisbecker 	cputime_t utime;
486d37f761dSFrederic Weisbecker 	cputime_t stime;
487d37f761dSFrederic Weisbecker };
488d37f761dSFrederic Weisbecker 
489d37f761dSFrederic Weisbecker /**
490f06febc9SFrank Mayhar  * struct task_cputime - collected CPU time counts
491f06febc9SFrank Mayhar  * @utime:		time spent in user mode, in &cputime_t units
492f06febc9SFrank Mayhar  * @stime:		time spent in kernel mode, in &cputime_t units
493f06febc9SFrank Mayhar  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
494f06febc9SFrank Mayhar  *
495d37f761dSFrederic Weisbecker  * This is an extension of struct cputime that includes the total runtime
496d37f761dSFrederic Weisbecker  * spent by the task from the scheduler point of view.
497d37f761dSFrederic Weisbecker  *
498d37f761dSFrederic Weisbecker  * As a result, this structure groups together three kinds of CPU time
499d37f761dSFrederic Weisbecker  * that are tracked for threads and thread groups.  Most things considering
500f06febc9SFrank Mayhar  * CPU time want to group these counts together and treat all three
501f06febc9SFrank Mayhar  * of them in parallel.
502f06febc9SFrank Mayhar  */
503f06febc9SFrank Mayhar struct task_cputime {
504f06febc9SFrank Mayhar 	cputime_t utime;
505f06febc9SFrank Mayhar 	cputime_t stime;
506f06febc9SFrank Mayhar 	unsigned long long sum_exec_runtime;
507f06febc9SFrank Mayhar };
508f06febc9SFrank Mayhar /* Alternate field names when used to cache expirations. */
509f06febc9SFrank Mayhar #define prof_exp	stime
510f06febc9SFrank Mayhar #define virt_exp	utime
511f06febc9SFrank Mayhar #define sched_exp	sum_exec_runtime
512f06febc9SFrank Mayhar 
5134cd4c1b4SPeter Zijlstra #define INIT_CPUTIME	\
5144cd4c1b4SPeter Zijlstra 	(struct task_cputime) {					\
51564861634SMartin Schwidefsky 		.utime = 0,					\
51664861634SMartin Schwidefsky 		.stime = 0,					\
5174cd4c1b4SPeter Zijlstra 		.sum_exec_runtime = 0,				\
5184cd4c1b4SPeter Zijlstra 	}
5194cd4c1b4SPeter Zijlstra 
520a233f112SPeter Zijlstra #ifdef CONFIG_PREEMPT_COUNT
521a233f112SPeter Zijlstra #define PREEMPT_DISABLED	(1 + PREEMPT_ENABLED)
522a233f112SPeter Zijlstra #else
523a233f112SPeter Zijlstra #define PREEMPT_DISABLED	PREEMPT_ENABLED
524a233f112SPeter Zijlstra #endif
525a233f112SPeter Zijlstra 
526c99e6efeSPeter Zijlstra /*
527c99e6efeSPeter Zijlstra  * Disable preemption until the scheduler is running.
528c99e6efeSPeter Zijlstra  * Reset by start_kernel()->sched_init()->init_idle().
529d86ee480SPeter Zijlstra  *
530d86ee480SPeter Zijlstra  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
531d86ee480SPeter Zijlstra  * before the scheduler is active -- see should_resched().
532c99e6efeSPeter Zijlstra  */
533a233f112SPeter Zijlstra #define INIT_PREEMPT_COUNT	(PREEMPT_DISABLED + PREEMPT_ACTIVE)
534c99e6efeSPeter Zijlstra 
535f06febc9SFrank Mayhar /**
5364cd4c1b4SPeter Zijlstra  * struct thread_group_cputimer - thread group interval timer counts
5374cd4c1b4SPeter Zijlstra  * @cputime:		thread group interval timers.
5384cd4c1b4SPeter Zijlstra  * @running:		non-zero when there are timers running and
5394cd4c1b4SPeter Zijlstra  * 			@cputime receives updates.
5404cd4c1b4SPeter Zijlstra  * @lock:		lock for fields in this struct.
541f06febc9SFrank Mayhar  *
542f06febc9SFrank Mayhar  * This structure contains the version of task_cputime, above, that is
5434cd4c1b4SPeter Zijlstra  * used for thread group CPU timer calculations.
544f06febc9SFrank Mayhar  */
5454cd4c1b4SPeter Zijlstra struct thread_group_cputimer {
5464cd4c1b4SPeter Zijlstra 	struct task_cputime cputime;
5474cd4c1b4SPeter Zijlstra 	int running;
548ee30a7b2SThomas Gleixner 	raw_spinlock_t lock;
549f06febc9SFrank Mayhar };
550f06febc9SFrank Mayhar 
5514714d1d3SBen Blum #include <linux/rwsem.h>
5525091faa4SMike Galbraith struct autogroup;
5535091faa4SMike Galbraith 
5541da177e4SLinus Torvalds /*
555e815f0a8SJonathan Neuschäfer  * NOTE! "signal_struct" does not have its own
5561da177e4SLinus Torvalds  * locking, because a shared signal_struct always
5571da177e4SLinus Torvalds  * implies a shared sighand_struct, so locking
5581da177e4SLinus Torvalds  * sighand_struct is always a proper superset of
5591da177e4SLinus Torvalds  * the locking of signal_struct.
5601da177e4SLinus Torvalds  */
5611da177e4SLinus Torvalds struct signal_struct {
562ea6d290cSOleg Nesterov 	atomic_t		sigcnt;
5631da177e4SLinus Torvalds 	atomic_t		live;
564b3ac022cSOleg Nesterov 	int			nr_threads;
5650c740d0aSOleg Nesterov 	struct list_head	thread_head;
5661da177e4SLinus Torvalds 
5671da177e4SLinus Torvalds 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
5681da177e4SLinus Torvalds 
5691da177e4SLinus Torvalds 	/* current thread group signal load-balancing target: */
57036c8b586SIngo Molnar 	struct task_struct	*curr_target;
5711da177e4SLinus Torvalds 
5721da177e4SLinus Torvalds 	/* shared signal handling: */
5731da177e4SLinus Torvalds 	struct sigpending	shared_pending;
5741da177e4SLinus Torvalds 
5751da177e4SLinus Torvalds 	/* thread group exit support */
5761da177e4SLinus Torvalds 	int			group_exit_code;
5771da177e4SLinus Torvalds 	/* overloaded:
5781da177e4SLinus Torvalds 	 * - notify group_exit_task when ->count is equal to notify_count
5791da177e4SLinus Torvalds 	 * - everyone except group_exit_task is stopped during signal delivery
5801da177e4SLinus Torvalds 	 *   of fatal signals, group_exit_task processes the signal.
5811da177e4SLinus Torvalds 	 */
5821da177e4SLinus Torvalds 	int			notify_count;
58307dd20e0SRichard Kennedy 	struct task_struct	*group_exit_task;
5841da177e4SLinus Torvalds 
5851da177e4SLinus Torvalds 	/* thread group stop support, overloads group_exit_code too */
5861da177e4SLinus Torvalds 	int			group_stop_count;
5871da177e4SLinus Torvalds 	unsigned int		flags; /* see SIGNAL_* flags below */
5881da177e4SLinus Torvalds 
589ebec18a6SLennart Poettering 	/*
590ebec18a6SLennart Poettering 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
591ebec18a6SLennart Poettering 	 * manager, to re-parent orphan (double-forking) child processes
592ebec18a6SLennart Poettering 	 * to this process instead of 'init'. The service manager is
593ebec18a6SLennart Poettering 	 * able to receive SIGCHLD signals and is able to investigate
594ebec18a6SLennart Poettering 	 * the process until it calls wait(). All children of this
595ebec18a6SLennart Poettering 	 * process will inherit a flag if they should look for a
596ebec18a6SLennart Poettering 	 * child_subreaper process at exit.
597ebec18a6SLennart Poettering 	 */
598ebec18a6SLennart Poettering 	unsigned int		is_child_subreaper:1;
599ebec18a6SLennart Poettering 	unsigned int		has_child_subreaper:1;
600ebec18a6SLennart Poettering 
6011da177e4SLinus Torvalds 	/* POSIX.1b Interval Timers */
6025ed67f05SPavel Emelyanov 	int			posix_timer_id;
6031da177e4SLinus Torvalds 	struct list_head	posix_timers;
6041da177e4SLinus Torvalds 
6051da177e4SLinus Torvalds 	/* ITIMER_REAL timer for the process */
6062ff678b8SThomas Gleixner 	struct hrtimer real_timer;
607fea9d175SOleg Nesterov 	struct pid *leader_pid;
6082ff678b8SThomas Gleixner 	ktime_t it_real_incr;
6091da177e4SLinus Torvalds 
61042c4ab41SStanislaw Gruszka 	/*
61142c4ab41SStanislaw Gruszka 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
61242c4ab41SStanislaw Gruszka 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
61342c4ab41SStanislaw Gruszka 	 * values are defined to 0 and 1 respectively
61442c4ab41SStanislaw Gruszka 	 */
61542c4ab41SStanislaw Gruszka 	struct cpu_itimer it[2];
6161da177e4SLinus Torvalds 
617f06febc9SFrank Mayhar 	/*
6184cd4c1b4SPeter Zijlstra 	 * Thread group totals for process CPU timers.
6194cd4c1b4SPeter Zijlstra 	 * See thread_group_cputimer(), et al, for details.
620f06febc9SFrank Mayhar 	 */
6214cd4c1b4SPeter Zijlstra 	struct thread_group_cputimer cputimer;
622f06febc9SFrank Mayhar 
623f06febc9SFrank Mayhar 	/* Earliest-expiration cache. */
624f06febc9SFrank Mayhar 	struct task_cputime cputime_expires;
625f06febc9SFrank Mayhar 
626f06febc9SFrank Mayhar 	struct list_head cpu_timers[3];
627f06febc9SFrank Mayhar 
628ab521dc0SEric W. Biederman 	struct pid *tty_old_pgrp;
6291ec320afSCedric Le Goater 
6301da177e4SLinus Torvalds 	/* boolean value for session group leader */
6311da177e4SLinus Torvalds 	int leader;
6321da177e4SLinus Torvalds 
6331da177e4SLinus Torvalds 	struct tty_struct *tty; /* NULL if no tty */
6341da177e4SLinus Torvalds 
6355091faa4SMike Galbraith #ifdef CONFIG_SCHED_AUTOGROUP
6365091faa4SMike Galbraith 	struct autogroup *autogroup;
6375091faa4SMike Galbraith #endif
6381da177e4SLinus Torvalds 	/*
6391da177e4SLinus Torvalds 	 * Cumulative resource counters for dead threads in the group,
6401da177e4SLinus Torvalds 	 * and for reaped dead child processes forked by this group.
6411da177e4SLinus Torvalds 	 * Live threads maintain their own counters and add to these
6421da177e4SLinus Torvalds 	 * in __exit_signal, except for the group leader.
6431da177e4SLinus Torvalds 	 */
64432bd671dSPeter Zijlstra 	cputime_t utime, stime, cutime, cstime;
6459ac52315SLaurent Vivier 	cputime_t gtime;
6469ac52315SLaurent Vivier 	cputime_t cgtime;
6479fbc42eaSFrederic Weisbecker #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
648d37f761dSFrederic Weisbecker 	struct cputime prev_cputime;
6490cf55e1eSHidetoshi Seto #endif
6501da177e4SLinus Torvalds 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
6511da177e4SLinus Torvalds 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
6526eaeeabaSEric Dumazet 	unsigned long inblock, oublock, cinblock, coublock;
6531f10206cSJiri Pirko 	unsigned long maxrss, cmaxrss;
654940389b8SAndrea Righi 	struct task_io_accounting ioac;
6551da177e4SLinus Torvalds 
6561da177e4SLinus Torvalds 	/*
65732bd671dSPeter Zijlstra 	 * Cumulative ns of schedule CPU time fo dead threads in the
65832bd671dSPeter Zijlstra 	 * group, not including a zombie group leader, (This only differs
65932bd671dSPeter Zijlstra 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
66032bd671dSPeter Zijlstra 	 * other than jiffies.)
66132bd671dSPeter Zijlstra 	 */
66232bd671dSPeter Zijlstra 	unsigned long long sum_sched_runtime;
66332bd671dSPeter Zijlstra 
66432bd671dSPeter Zijlstra 	/*
6651da177e4SLinus Torvalds 	 * We don't bother to synchronize most readers of this at all,
6661da177e4SLinus Torvalds 	 * because there is no reader checking a limit that actually needs
6671da177e4SLinus Torvalds 	 * to get both rlim_cur and rlim_max atomically, and either one
6681da177e4SLinus Torvalds 	 * alone is a single word that can safely be read normally.
6691da177e4SLinus Torvalds 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
6701da177e4SLinus Torvalds 	 * protect this instead of the siglock, because they really
6711da177e4SLinus Torvalds 	 * have no need to disable irqs.
6721da177e4SLinus Torvalds 	 */
6731da177e4SLinus Torvalds 	struct rlimit rlim[RLIM_NLIMITS];
6741da177e4SLinus Torvalds 
6750e464814SKaiGai Kohei #ifdef CONFIG_BSD_PROCESS_ACCT
6760e464814SKaiGai Kohei 	struct pacct_struct pacct;	/* per-process accounting information */
6770e464814SKaiGai Kohei #endif
678ad4ecbcbSShailabh Nagar #ifdef CONFIG_TASKSTATS
679ad4ecbcbSShailabh Nagar 	struct taskstats *stats;
680ad4ecbcbSShailabh Nagar #endif
681522ed776SMiloslav Trmac #ifdef CONFIG_AUDIT
682522ed776SMiloslav Trmac 	unsigned audit_tty;
68346e959eaSRichard Guy Briggs 	unsigned audit_tty_log_passwd;
684522ed776SMiloslav Trmac 	struct tty_audit_buf *tty_audit_buf;
685522ed776SMiloslav Trmac #endif
6864714d1d3SBen Blum #ifdef CONFIG_CGROUPS
6874714d1d3SBen Blum 	/*
68877e4ef99STejun Heo 	 * group_rwsem prevents new tasks from entering the threadgroup and
68977e4ef99STejun Heo 	 * member tasks from exiting,a more specifically, setting of
69077e4ef99STejun Heo 	 * PF_EXITING.  fork and exit paths are protected with this rwsem
69177e4ef99STejun Heo 	 * using threadgroup_change_begin/end().  Users which require
69277e4ef99STejun Heo 	 * threadgroup to remain stable should use threadgroup_[un]lock()
69377e4ef99STejun Heo 	 * which also takes care of exec path.  Currently, cgroup is the
69477e4ef99STejun Heo 	 * only user.
6954714d1d3SBen Blum 	 */
696257058aeSTejun Heo 	struct rw_semaphore group_rwsem;
6974714d1d3SBen Blum #endif
69828b83c51SKOSAKI Motohiro 
699e1e12d2fSDavid Rientjes 	oom_flags_t oom_flags;
700a9c58b90SDavid Rientjes 	short oom_score_adj;		/* OOM kill score adjustment */
701a9c58b90SDavid Rientjes 	short oom_score_adj_min;	/* OOM kill score adjustment min value.
702dabb16f6SMandeep Singh Baines 					 * Only settable by CAP_SYS_RESOURCE. */
7039b1bf12dSKOSAKI Motohiro 
7049b1bf12dSKOSAKI Motohiro 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
7059b1bf12dSKOSAKI Motohiro 					 * credential calculations
7069b1bf12dSKOSAKI Motohiro 					 * (notably. ptrace) */
7071da177e4SLinus Torvalds };
7081da177e4SLinus Torvalds 
7091da177e4SLinus Torvalds /*
7101da177e4SLinus Torvalds  * Bits in flags field of signal_struct.
7111da177e4SLinus Torvalds  */
7121da177e4SLinus Torvalds #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
713ee77f075SOleg Nesterov #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
714ee77f075SOleg Nesterov #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
715403bad72SOleg Nesterov #define SIGNAL_GROUP_COREDUMP	0x00000008 /* coredump in progress */
716e4420551SOleg Nesterov /*
717e4420551SOleg Nesterov  * Pending notifications to parent.
718e4420551SOleg Nesterov  */
719e4420551SOleg Nesterov #define SIGNAL_CLD_STOPPED	0x00000010
720e4420551SOleg Nesterov #define SIGNAL_CLD_CONTINUED	0x00000020
721e4420551SOleg Nesterov #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
7221da177e4SLinus Torvalds 
723fae5fa44SOleg Nesterov #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
724fae5fa44SOleg Nesterov 
725ed5d2cacSOleg Nesterov /* If true, all threads except ->group_exit_task have pending SIGKILL */
726ed5d2cacSOleg Nesterov static inline int signal_group_exit(const struct signal_struct *sig)
727ed5d2cacSOleg Nesterov {
728ed5d2cacSOleg Nesterov 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
729ed5d2cacSOleg Nesterov 		(sig->group_exit_task != NULL);
730ed5d2cacSOleg Nesterov }
731ed5d2cacSOleg Nesterov 
7321da177e4SLinus Torvalds /*
7331da177e4SLinus Torvalds  * Some day this will be a full-fledged user tracking system..
7341da177e4SLinus Torvalds  */
7351da177e4SLinus Torvalds struct user_struct {
7361da177e4SLinus Torvalds 	atomic_t __count;	/* reference count */
7371da177e4SLinus Torvalds 	atomic_t processes;	/* How many processes does this user have? */
7381da177e4SLinus Torvalds 	atomic_t files;		/* How many open files does this user have? */
7391da177e4SLinus Torvalds 	atomic_t sigpending;	/* How many pending signals does this user have? */
7402d9048e2SAmy Griffis #ifdef CONFIG_INOTIFY_USER
7410eeca283SRobert Love 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
7420eeca283SRobert Love 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
7430eeca283SRobert Love #endif
7444afeff85SEric Paris #ifdef CONFIG_FANOTIFY
7454afeff85SEric Paris 	atomic_t fanotify_listeners;
7464afeff85SEric Paris #endif
7477ef9964eSDavide Libenzi #ifdef CONFIG_EPOLL
74852bd19f7SRobin Holt 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
7497ef9964eSDavide Libenzi #endif
750970a8645SAlexey Dobriyan #ifdef CONFIG_POSIX_MQUEUE
7511da177e4SLinus Torvalds 	/* protected by mq_lock	*/
7521da177e4SLinus Torvalds 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
753970a8645SAlexey Dobriyan #endif
7541da177e4SLinus Torvalds 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
7551da177e4SLinus Torvalds 
7561da177e4SLinus Torvalds #ifdef CONFIG_KEYS
7571da177e4SLinus Torvalds 	struct key *uid_keyring;	/* UID specific keyring */
7581da177e4SLinus Torvalds 	struct key *session_keyring;	/* UID's default session keyring */
7591da177e4SLinus Torvalds #endif
7601da177e4SLinus Torvalds 
7611da177e4SLinus Torvalds 	/* Hash table maintenance information */
762735de223SPavel Emelyanov 	struct hlist_node uidhash_node;
7637b44ab97SEric W. Biederman 	kuid_t uid;
76424e377a8SSrivatsa Vaddagiri 
765cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
766789f90fcSPeter Zijlstra 	atomic_long_t locked_vm;
767789f90fcSPeter Zijlstra #endif
7681da177e4SLinus Torvalds };
7691da177e4SLinus Torvalds 
770eb41d946SKay Sievers extern int uids_sysfs_init(void);
7715cb350baSDhaval Giani 
7727b44ab97SEric W. Biederman extern struct user_struct *find_user(kuid_t);
7731da177e4SLinus Torvalds 
7741da177e4SLinus Torvalds extern struct user_struct root_user;
7751da177e4SLinus Torvalds #define INIT_USER (&root_user)
7761da177e4SLinus Torvalds 
777b6dff3ecSDavid Howells 
7781da177e4SLinus Torvalds struct backing_dev_info;
7791da177e4SLinus Torvalds struct reclaim_state;
7801da177e4SLinus Torvalds 
78152f17b6cSChandra Seetharaman #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
7821da177e4SLinus Torvalds struct sched_info {
7831da177e4SLinus Torvalds 	/* cumulative counters */
7842d72376bSIngo Molnar 	unsigned long pcount;	      /* # of times run on this cpu */
7859c2c4802SKen Chen 	unsigned long long run_delay; /* time spent waiting on a runqueue */
7861da177e4SLinus Torvalds 
7871da177e4SLinus Torvalds 	/* timestamps */
788172ba844SBalbir Singh 	unsigned long long last_arrival,/* when we last ran on a cpu */
7891da177e4SLinus Torvalds 			   last_queued;	/* when we were last queued to run */
7901da177e4SLinus Torvalds };
79152f17b6cSChandra Seetharaman #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
7921da177e4SLinus Torvalds 
793ca74e92bSShailabh Nagar #ifdef CONFIG_TASK_DELAY_ACCT
794ca74e92bSShailabh Nagar struct task_delay_info {
795ca74e92bSShailabh Nagar 	spinlock_t	lock;
796ca74e92bSShailabh Nagar 	unsigned int	flags;	/* Private per-task flags */
797ca74e92bSShailabh Nagar 
798ca74e92bSShailabh Nagar 	/* For each stat XXX, add following, aligned appropriately
799ca74e92bSShailabh Nagar 	 *
800ca74e92bSShailabh Nagar 	 * struct timespec XXX_start, XXX_end;
801ca74e92bSShailabh Nagar 	 * u64 XXX_delay;
802ca74e92bSShailabh Nagar 	 * u32 XXX_count;
803ca74e92bSShailabh Nagar 	 *
804ca74e92bSShailabh Nagar 	 * Atomicity of updates to XXX_delay, XXX_count protected by
805ca74e92bSShailabh Nagar 	 * single lock above (split into XXX_lock if contention is an issue).
806ca74e92bSShailabh Nagar 	 */
8070ff92245SShailabh Nagar 
8080ff92245SShailabh Nagar 	/*
8090ff92245SShailabh Nagar 	 * XXX_count is incremented on every XXX operation, the delay
8100ff92245SShailabh Nagar 	 * associated with the operation is added to XXX_delay.
8110ff92245SShailabh Nagar 	 * XXX_delay contains the accumulated delay time in nanoseconds.
8120ff92245SShailabh Nagar 	 */
8130ff92245SShailabh Nagar 	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
8140ff92245SShailabh Nagar 	u64 blkio_delay;	/* wait for sync block io completion */
8150ff92245SShailabh Nagar 	u64 swapin_delay;	/* wait for swapin block io completion */
8160ff92245SShailabh Nagar 	u32 blkio_count;	/* total count of the number of sync block */
8170ff92245SShailabh Nagar 				/* io operations performed */
8180ff92245SShailabh Nagar 	u32 swapin_count;	/* total count of the number of swapin block */
8190ff92245SShailabh Nagar 				/* io operations performed */
820873b4771SKeika Kobayashi 
821873b4771SKeika Kobayashi 	struct timespec freepages_start, freepages_end;
822873b4771SKeika Kobayashi 	u64 freepages_delay;	/* wait for memory reclaim */
823873b4771SKeika Kobayashi 	u32 freepages_count;	/* total count of memory reclaim */
824ca74e92bSShailabh Nagar };
82552f17b6cSChandra Seetharaman #endif	/* CONFIG_TASK_DELAY_ACCT */
82652f17b6cSChandra Seetharaman 
82752f17b6cSChandra Seetharaman static inline int sched_info_on(void)
82852f17b6cSChandra Seetharaman {
82952f17b6cSChandra Seetharaman #ifdef CONFIG_SCHEDSTATS
83052f17b6cSChandra Seetharaman 	return 1;
83152f17b6cSChandra Seetharaman #elif defined(CONFIG_TASK_DELAY_ACCT)
83252f17b6cSChandra Seetharaman 	extern int delayacct_on;
83352f17b6cSChandra Seetharaman 	return delayacct_on;
83452f17b6cSChandra Seetharaman #else
83552f17b6cSChandra Seetharaman 	return 0;
836ca74e92bSShailabh Nagar #endif
83752f17b6cSChandra Seetharaman }
838ca74e92bSShailabh Nagar 
839d15bcfdbSIngo Molnar enum cpu_idle_type {
840d15bcfdbSIngo Molnar 	CPU_IDLE,
841d15bcfdbSIngo Molnar 	CPU_NOT_IDLE,
842d15bcfdbSIngo Molnar 	CPU_NEWLY_IDLE,
843d15bcfdbSIngo Molnar 	CPU_MAX_IDLE_TYPES
8441da177e4SLinus Torvalds };
8451da177e4SLinus Torvalds 
8461da177e4SLinus Torvalds /*
8471399fa78SNikhil Rao  * Increase resolution of cpu_power calculations
8481399fa78SNikhil Rao  */
8491399fa78SNikhil Rao #define SCHED_POWER_SHIFT	10
8501399fa78SNikhil Rao #define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT)
8511da177e4SLinus Torvalds 
8521399fa78SNikhil Rao /*
8531399fa78SNikhil Rao  * sched-domains (multiprocessor balancing) declarations:
8541399fa78SNikhil Rao  */
8552dd73a4fSPeter Williams #ifdef CONFIG_SMP
856b5d978e0SPeter Zijlstra #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
857b5d978e0SPeter Zijlstra #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
858b5d978e0SPeter Zijlstra #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
859b5d978e0SPeter Zijlstra #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
860c88d5910SPeter Zijlstra #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
861b5d978e0SPeter Zijlstra #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
862b5d978e0SPeter Zijlstra #define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
863b5d978e0SPeter Zijlstra #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
864b5d978e0SPeter Zijlstra #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
865532cb4c4SMichael Neuling #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
866b5d978e0SPeter Zijlstra #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
867e3589f6cSPeter Zijlstra #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
8683a7053b3SMel Gorman #define SD_NUMA			0x4000	/* cross-node balancing */
8695c45bf27SSiddha, Suresh B 
870532cb4c4SMichael Neuling extern int __weak arch_sd_sibiling_asym_packing(void);
871532cb4c4SMichael Neuling 
8721d3504fcSHidetoshi Seto struct sched_domain_attr {
8731d3504fcSHidetoshi Seto 	int relax_domain_level;
8741d3504fcSHidetoshi Seto };
8751d3504fcSHidetoshi Seto 
8761d3504fcSHidetoshi Seto #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
8771d3504fcSHidetoshi Seto 	.relax_domain_level = -1,			\
8781d3504fcSHidetoshi Seto }
8791d3504fcSHidetoshi Seto 
88060495e77SPeter Zijlstra extern int sched_domain_level_max;
88160495e77SPeter Zijlstra 
8825e6521eaSLi Zefan struct sched_group;
8835e6521eaSLi Zefan 
8841da177e4SLinus Torvalds struct sched_domain {
8851da177e4SLinus Torvalds 	/* These fields must be setup */
8861da177e4SLinus Torvalds 	struct sched_domain *parent;	/* top domain must be null terminated */
8871a848870SSiddha, Suresh B 	struct sched_domain *child;	/* bottom domain must be null terminated */
8881da177e4SLinus Torvalds 	struct sched_group *groups;	/* the balancing groups of the domain */
8891da177e4SLinus Torvalds 	unsigned long min_interval;	/* Minimum balance interval ms */
8901da177e4SLinus Torvalds 	unsigned long max_interval;	/* Maximum balance interval ms */
8911da177e4SLinus Torvalds 	unsigned int busy_factor;	/* less balancing by factor if busy */
8921da177e4SLinus Torvalds 	unsigned int imbalance_pct;	/* No balance until over watermark */
8931da177e4SLinus Torvalds 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
8947897986bSNick Piggin 	unsigned int busy_idx;
8957897986bSNick Piggin 	unsigned int idle_idx;
8967897986bSNick Piggin 	unsigned int newidle_idx;
8977897986bSNick Piggin 	unsigned int wake_idx;
898147cbb4bSNick Piggin 	unsigned int forkexec_idx;
899a52bfd73SPeter Zijlstra 	unsigned int smt_gain;
90025f55d9dSVincent Guittot 
90125f55d9dSVincent Guittot 	int nohz_idle;			/* NOHZ IDLE status */
9021da177e4SLinus Torvalds 	int flags;			/* See SD_* */
90360495e77SPeter Zijlstra 	int level;
9041da177e4SLinus Torvalds 
9051da177e4SLinus Torvalds 	/* Runtime fields. */
9061da177e4SLinus Torvalds 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
9071da177e4SLinus Torvalds 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
9081da177e4SLinus Torvalds 	unsigned int nr_balance_failed; /* initialise to 0 */
9091da177e4SLinus Torvalds 
910f48627e6SJason Low 	/* idle_balance() stats */
9119bd721c5SJason Low 	u64 max_newidle_lb_cost;
912f48627e6SJason Low 	unsigned long next_decay_max_lb_cost;
9132398f2c6SPeter Zijlstra 
9141da177e4SLinus Torvalds #ifdef CONFIG_SCHEDSTATS
9151da177e4SLinus Torvalds 	/* load_balance() stats */
916480b9434SKen Chen 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
917480b9434SKen Chen 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
918480b9434SKen Chen 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
919480b9434SKen Chen 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
920480b9434SKen Chen 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
921480b9434SKen Chen 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
922480b9434SKen Chen 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
923480b9434SKen Chen 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
9241da177e4SLinus Torvalds 
9251da177e4SLinus Torvalds 	/* Active load balancing */
926480b9434SKen Chen 	unsigned int alb_count;
927480b9434SKen Chen 	unsigned int alb_failed;
928480b9434SKen Chen 	unsigned int alb_pushed;
9291da177e4SLinus Torvalds 
93068767a0aSNick Piggin 	/* SD_BALANCE_EXEC stats */
931480b9434SKen Chen 	unsigned int sbe_count;
932480b9434SKen Chen 	unsigned int sbe_balanced;
933480b9434SKen Chen 	unsigned int sbe_pushed;
9341da177e4SLinus Torvalds 
93568767a0aSNick Piggin 	/* SD_BALANCE_FORK stats */
936480b9434SKen Chen 	unsigned int sbf_count;
937480b9434SKen Chen 	unsigned int sbf_balanced;
938480b9434SKen Chen 	unsigned int sbf_pushed;
93968767a0aSNick Piggin 
9401da177e4SLinus Torvalds 	/* try_to_wake_up() stats */
941480b9434SKen Chen 	unsigned int ttwu_wake_remote;
942480b9434SKen Chen 	unsigned int ttwu_move_affine;
943480b9434SKen Chen 	unsigned int ttwu_move_balance;
9441da177e4SLinus Torvalds #endif
945a5d8c348SIngo Molnar #ifdef CONFIG_SCHED_DEBUG
946a5d8c348SIngo Molnar 	char *name;
947a5d8c348SIngo Molnar #endif
948dce840a0SPeter Zijlstra 	union {
949dce840a0SPeter Zijlstra 		void *private;		/* used during construction */
950dce840a0SPeter Zijlstra 		struct rcu_head rcu;	/* used during destruction */
951dce840a0SPeter Zijlstra 	};
9526c99e9adSRusty Russell 
953669c55e9SPeter Zijlstra 	unsigned int span_weight;
9544200efd9SIngo Molnar 	/*
9554200efd9SIngo Molnar 	 * Span of all CPUs in this domain.
9564200efd9SIngo Molnar 	 *
9574200efd9SIngo Molnar 	 * NOTE: this field is variable length. (Allocated dynamically
9584200efd9SIngo Molnar 	 * by attaching extra space to the end of the structure,
9594200efd9SIngo Molnar 	 * depending on how many CPUs the kernel has booted up with)
9604200efd9SIngo Molnar 	 */
9614200efd9SIngo Molnar 	unsigned long span[0];
9621da177e4SLinus Torvalds };
9631da177e4SLinus Torvalds 
964758b2cdcSRusty Russell static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
965758b2cdcSRusty Russell {
9666c99e9adSRusty Russell 	return to_cpumask(sd->span);
967758b2cdcSRusty Russell }
968758b2cdcSRusty Russell 
969acc3f5d7SRusty Russell extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
9701d3504fcSHidetoshi Seto 				    struct sched_domain_attr *dattr_new);
971029190c5SPaul Jackson 
972acc3f5d7SRusty Russell /* Allocate an array of sched domains, for partition_sched_domains(). */
973acc3f5d7SRusty Russell cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
974acc3f5d7SRusty Russell void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
975acc3f5d7SRusty Russell 
97639be3501SPeter Zijlstra bool cpus_share_cache(int this_cpu, int that_cpu);
97739be3501SPeter Zijlstra 
9781b427c15SIngo Molnar #else /* CONFIG_SMP */
9791da177e4SLinus Torvalds 
9801b427c15SIngo Molnar struct sched_domain_attr;
9811b427c15SIngo Molnar 
9821b427c15SIngo Molnar static inline void
983acc3f5d7SRusty Russell partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
9841b427c15SIngo Molnar 			struct sched_domain_attr *dattr_new)
985d02c7a8cSCon Kolivas {
986d02c7a8cSCon Kolivas }
98739be3501SPeter Zijlstra 
98839be3501SPeter Zijlstra static inline bool cpus_share_cache(int this_cpu, int that_cpu)
98939be3501SPeter Zijlstra {
99039be3501SPeter Zijlstra 	return true;
99139be3501SPeter Zijlstra }
99239be3501SPeter Zijlstra 
9931b427c15SIngo Molnar #endif	/* !CONFIG_SMP */
9941da177e4SLinus Torvalds 
99547fe38fcSPeter Zijlstra 
9961da177e4SLinus Torvalds struct io_context;			/* See blkdev.h */
9971da177e4SLinus Torvalds 
9981da177e4SLinus Torvalds 
999383f2835SChen, Kenneth W #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
100036c8b586SIngo Molnar extern void prefetch_stack(struct task_struct *t);
1001383f2835SChen, Kenneth W #else
1002383f2835SChen, Kenneth W static inline void prefetch_stack(struct task_struct *t) { }
1003383f2835SChen, Kenneth W #endif
10041da177e4SLinus Torvalds 
10051da177e4SLinus Torvalds struct audit_context;		/* See audit.c */
10061da177e4SLinus Torvalds struct mempolicy;
1007b92ce558SJens Axboe struct pipe_inode_info;
10084865ecf1SSerge E. Hallyn struct uts_namespace;
10091da177e4SLinus Torvalds 
101020b8a59fSIngo Molnar struct load_weight {
10119dbdb155SPeter Zijlstra 	unsigned long weight;
10129dbdb155SPeter Zijlstra 	u32 inv_weight;
101320b8a59fSIngo Molnar };
101420b8a59fSIngo Molnar 
10159d85f21cSPaul Turner struct sched_avg {
10169d85f21cSPaul Turner 	/*
10179d85f21cSPaul Turner 	 * These sums represent an infinite geometric series and so are bound
1018239003eaSKamalesh Babulal 	 * above by 1024/(1-y).  Thus we only need a u32 to store them for all
10199d85f21cSPaul Turner 	 * choices of y < 1-2^(-32)*1024.
10209d85f21cSPaul Turner 	 */
10219d85f21cSPaul Turner 	u32 runnable_avg_sum, runnable_avg_period;
10229d85f21cSPaul Turner 	u64 last_runnable_update;
10239ee474f5SPaul Turner 	s64 decay_count;
10242dac754eSPaul Turner 	unsigned long load_avg_contrib;
10259d85f21cSPaul Turner };
10269d85f21cSPaul Turner 
102794c18227SIngo Molnar #ifdef CONFIG_SCHEDSTATS
102841acab88SLucas De Marchi struct sched_statistics {
102994c18227SIngo Molnar 	u64			wait_start;
103094c18227SIngo Molnar 	u64			wait_max;
10316d082592SArjan van de Ven 	u64			wait_count;
10326d082592SArjan van de Ven 	u64			wait_sum;
10338f0dfc34SArjan van de Ven 	u64			iowait_count;
10348f0dfc34SArjan van de Ven 	u64			iowait_sum;
103594c18227SIngo Molnar 
103694c18227SIngo Molnar 	u64			sleep_start;
103720b8a59fSIngo Molnar 	u64			sleep_max;
103894c18227SIngo Molnar 	s64			sum_sleep_runtime;
103994c18227SIngo Molnar 
104094c18227SIngo Molnar 	u64			block_start;
104120b8a59fSIngo Molnar 	u64			block_max;
104220b8a59fSIngo Molnar 	u64			exec_max;
1043eba1ed4bSIngo Molnar 	u64			slice_max;
1044cc367732SIngo Molnar 
1045cc367732SIngo Molnar 	u64			nr_migrations_cold;
1046cc367732SIngo Molnar 	u64			nr_failed_migrations_affine;
1047cc367732SIngo Molnar 	u64			nr_failed_migrations_running;
1048cc367732SIngo Molnar 	u64			nr_failed_migrations_hot;
1049cc367732SIngo Molnar 	u64			nr_forced_migrations;
1050cc367732SIngo Molnar 
1051cc367732SIngo Molnar 	u64			nr_wakeups;
1052cc367732SIngo Molnar 	u64			nr_wakeups_sync;
1053cc367732SIngo Molnar 	u64			nr_wakeups_migrate;
1054cc367732SIngo Molnar 	u64			nr_wakeups_local;
1055cc367732SIngo Molnar 	u64			nr_wakeups_remote;
1056cc367732SIngo Molnar 	u64			nr_wakeups_affine;
1057cc367732SIngo Molnar 	u64			nr_wakeups_affine_attempts;
1058cc367732SIngo Molnar 	u64			nr_wakeups_passive;
1059cc367732SIngo Molnar 	u64			nr_wakeups_idle;
106041acab88SLucas De Marchi };
106141acab88SLucas De Marchi #endif
106241acab88SLucas De Marchi 
106341acab88SLucas De Marchi struct sched_entity {
106441acab88SLucas De Marchi 	struct load_weight	load;		/* for load-balancing */
106541acab88SLucas De Marchi 	struct rb_node		run_node;
106641acab88SLucas De Marchi 	struct list_head	group_node;
106741acab88SLucas De Marchi 	unsigned int		on_rq;
106841acab88SLucas De Marchi 
106941acab88SLucas De Marchi 	u64			exec_start;
107041acab88SLucas De Marchi 	u64			sum_exec_runtime;
107141acab88SLucas De Marchi 	u64			vruntime;
107241acab88SLucas De Marchi 	u64			prev_sum_exec_runtime;
107341acab88SLucas De Marchi 
107441acab88SLucas De Marchi 	u64			nr_migrations;
107541acab88SLucas De Marchi 
107641acab88SLucas De Marchi #ifdef CONFIG_SCHEDSTATS
107741acab88SLucas De Marchi 	struct sched_statistics statistics;
107894c18227SIngo Molnar #endif
107994c18227SIngo Molnar 
108020b8a59fSIngo Molnar #ifdef CONFIG_FAIR_GROUP_SCHED
1081*fed14d45SPeter Zijlstra 	int			depth;
108220b8a59fSIngo Molnar 	struct sched_entity	*parent;
108320b8a59fSIngo Molnar 	/* rq on which this entity is (to be) queued: */
108420b8a59fSIngo Molnar 	struct cfs_rq		*cfs_rq;
108520b8a59fSIngo Molnar 	/* rq "owned" by this entity/group: */
108620b8a59fSIngo Molnar 	struct cfs_rq		*my_q;
108720b8a59fSIngo Molnar #endif
10888bd75c77SClark Williams 
1089141965c7SAlex Shi #ifdef CONFIG_SMP
1090f4e26b12SPaul Turner 	/* Per-entity load-tracking */
10919d85f21cSPaul Turner 	struct sched_avg	avg;
10929d85f21cSPaul Turner #endif
109320b8a59fSIngo Molnar };
109470b97a7fSIngo Molnar 
1095fa717060SPeter Zijlstra struct sched_rt_entity {
1096fa717060SPeter Zijlstra 	struct list_head run_list;
109778f2c7dbSPeter Zijlstra 	unsigned long timeout;
109857d2aa00SYing Xue 	unsigned long watchdog_stamp;
1099bee367edSRichard Kennedy 	unsigned int time_slice;
11006f505b16SPeter Zijlstra 
110158d6c2d7SPeter Zijlstra 	struct sched_rt_entity *back;
1102052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
11036f505b16SPeter Zijlstra 	struct sched_rt_entity	*parent;
11046f505b16SPeter Zijlstra 	/* rq on which this entity is (to be) queued: */
11056f505b16SPeter Zijlstra 	struct rt_rq		*rt_rq;
11066f505b16SPeter Zijlstra 	/* rq "owned" by this entity/group: */
11076f505b16SPeter Zijlstra 	struct rt_rq		*my_q;
11086f505b16SPeter Zijlstra #endif
1109fa717060SPeter Zijlstra };
1110fa717060SPeter Zijlstra 
1111aab03e05SDario Faggioli struct sched_dl_entity {
1112aab03e05SDario Faggioli 	struct rb_node	rb_node;
1113aab03e05SDario Faggioli 
1114aab03e05SDario Faggioli 	/*
1115aab03e05SDario Faggioli 	 * Original scheduling parameters. Copied here from sched_attr
1116aab03e05SDario Faggioli 	 * during sched_setscheduler2(), they will remain the same until
1117aab03e05SDario Faggioli 	 * the next sched_setscheduler2().
1118aab03e05SDario Faggioli 	 */
1119aab03e05SDario Faggioli 	u64 dl_runtime;		/* maximum runtime for each instance	*/
1120aab03e05SDario Faggioli 	u64 dl_deadline;	/* relative deadline of each instance	*/
1121755378a4SHarald Gustafsson 	u64 dl_period;		/* separation of two instances (period) */
1122332ac17eSDario Faggioli 	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
1123aab03e05SDario Faggioli 
1124aab03e05SDario Faggioli 	/*
1125aab03e05SDario Faggioli 	 * Actual scheduling parameters. Initialized with the values above,
1126aab03e05SDario Faggioli 	 * they are continously updated during task execution. Note that
1127aab03e05SDario Faggioli 	 * the remaining runtime could be < 0 in case we are in overrun.
1128aab03e05SDario Faggioli 	 */
1129aab03e05SDario Faggioli 	s64 runtime;		/* remaining runtime for this instance	*/
1130aab03e05SDario Faggioli 	u64 deadline;		/* absolute deadline for this instance	*/
1131aab03e05SDario Faggioli 	unsigned int flags;	/* specifying the scheduler behaviour	*/
1132aab03e05SDario Faggioli 
1133aab03e05SDario Faggioli 	/*
1134aab03e05SDario Faggioli 	 * Some bool flags:
1135aab03e05SDario Faggioli 	 *
1136aab03e05SDario Faggioli 	 * @dl_throttled tells if we exhausted the runtime. If so, the
1137aab03e05SDario Faggioli 	 * task has to wait for a replenishment to be performed at the
1138aab03e05SDario Faggioli 	 * next firing of dl_timer.
1139aab03e05SDario Faggioli 	 *
1140aab03e05SDario Faggioli 	 * @dl_new tells if a new instance arrived. If so we must
1141aab03e05SDario Faggioli 	 * start executing it with full runtime and reset its absolute
1142aab03e05SDario Faggioli 	 * deadline;
11432d3d891dSDario Faggioli 	 *
11442d3d891dSDario Faggioli 	 * @dl_boosted tells if we are boosted due to DI. If so we are
11452d3d891dSDario Faggioli 	 * outside bandwidth enforcement mechanism (but only until we
11462d3d891dSDario Faggioli 	 * exit the critical section).
1147aab03e05SDario Faggioli 	 */
11482d3d891dSDario Faggioli 	int dl_throttled, dl_new, dl_boosted;
1149aab03e05SDario Faggioli 
1150aab03e05SDario Faggioli 	/*
1151aab03e05SDario Faggioli 	 * Bandwidth enforcement timer. Each -deadline task has its
1152aab03e05SDario Faggioli 	 * own bandwidth to be enforced, thus we need one timer per task.
1153aab03e05SDario Faggioli 	 */
1154aab03e05SDario Faggioli 	struct hrtimer dl_timer;
1155aab03e05SDario Faggioli };
11568bd75c77SClark Williams 
115786848966SPaul E. McKenney struct rcu_node;
115886848966SPaul E. McKenney 
11598dc85d54SPeter Zijlstra enum perf_event_task_context {
11608dc85d54SPeter Zijlstra 	perf_invalid_context = -1,
11618dc85d54SPeter Zijlstra 	perf_hw_context = 0,
116289a1e187SPeter Zijlstra 	perf_sw_context,
11638dc85d54SPeter Zijlstra 	perf_nr_task_contexts,
11648dc85d54SPeter Zijlstra };
11658dc85d54SPeter Zijlstra 
11661da177e4SLinus Torvalds struct task_struct {
11671da177e4SLinus Torvalds 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1168f7e4217bSRoman Zippel 	void *stack;
11691da177e4SLinus Torvalds 	atomic_t usage;
117097dc32cdSWilliam Cohen 	unsigned int flags;	/* per process flags, defined below */
117197dc32cdSWilliam Cohen 	unsigned int ptrace;
11721da177e4SLinus Torvalds 
11732dd73a4fSPeter Williams #ifdef CONFIG_SMP
1174fa14ff4aSPeter Zijlstra 	struct llist_node wake_entry;
11753ca7a440SPeter Zijlstra 	int on_cpu;
117662470419SMichael Wang 	struct task_struct *last_wakee;
117762470419SMichael Wang 	unsigned long wakee_flips;
117862470419SMichael Wang 	unsigned long wakee_flip_decay_ts;
1179ac66f547SPeter Zijlstra 
1180ac66f547SPeter Zijlstra 	int wake_cpu;
11814866cde0SNick Piggin #endif
1182fd2f4419SPeter Zijlstra 	int on_rq;
118350e645a8SIngo Molnar 
1184b29739f9SIngo Molnar 	int prio, static_prio, normal_prio;
1185c7aceabaSRichard Kennedy 	unsigned int rt_priority;
11865522d5d5SIngo Molnar 	const struct sched_class *sched_class;
118720b8a59fSIngo Molnar 	struct sched_entity se;
1188fa717060SPeter Zijlstra 	struct sched_rt_entity rt;
11898323f26cSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
11908323f26cSPeter Zijlstra 	struct task_group *sched_task_group;
11918323f26cSPeter Zijlstra #endif
1192aab03e05SDario Faggioli 	struct sched_dl_entity dl;
11931da177e4SLinus Torvalds 
1194e107be36SAvi Kivity #ifdef CONFIG_PREEMPT_NOTIFIERS
1195e107be36SAvi Kivity 	/* list of struct preempt_notifier: */
1196e107be36SAvi Kivity 	struct hlist_head preempt_notifiers;
1197e107be36SAvi Kivity #endif
1198e107be36SAvi Kivity 
11996c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE
12002056a782SJens Axboe 	unsigned int btrace_seq;
12016c5c9341SAlexey Dobriyan #endif
12021da177e4SLinus Torvalds 
120397dc32cdSWilliam Cohen 	unsigned int policy;
120429baa747SPeter Zijlstra 	int nr_cpus_allowed;
12051da177e4SLinus Torvalds 	cpumask_t cpus_allowed;
12061da177e4SLinus Torvalds 
1207a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
1208e260be67SPaul E. McKenney 	int rcu_read_lock_nesting;
1209f41d911fSPaul E. McKenney 	char rcu_read_unlock_special;
1210f41d911fSPaul E. McKenney 	struct list_head rcu_node_entry;
1211a57eb940SPaul E. McKenney #endif /* #ifdef CONFIG_PREEMPT_RCU */
1212a57eb940SPaul E. McKenney #ifdef CONFIG_TREE_PREEMPT_RCU
1213a57eb940SPaul E. McKenney 	struct rcu_node *rcu_blocked_node;
1214f41d911fSPaul E. McKenney #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
121524278d14SPaul E. McKenney #ifdef CONFIG_RCU_BOOST
121624278d14SPaul E. McKenney 	struct rt_mutex *rcu_boost_mutex;
121724278d14SPaul E. McKenney #endif /* #ifdef CONFIG_RCU_BOOST */
1218e260be67SPaul E. McKenney 
121952f17b6cSChandra Seetharaman #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
12201da177e4SLinus Torvalds 	struct sched_info sched_info;
12211da177e4SLinus Torvalds #endif
12221da177e4SLinus Torvalds 
12231da177e4SLinus Torvalds 	struct list_head tasks;
1224806c09a7SDario Faggioli #ifdef CONFIG_SMP
1225917b627dSGregory Haskins 	struct plist_node pushable_tasks;
12261baca4ceSJuri Lelli 	struct rb_node pushable_dl_tasks;
1227806c09a7SDario Faggioli #endif
12281da177e4SLinus Torvalds 
12291da177e4SLinus Torvalds 	struct mm_struct *mm, *active_mm;
12304471a675SJiri Kosina #ifdef CONFIG_COMPAT_BRK
12314471a675SJiri Kosina 	unsigned brk_randomized:1;
12324471a675SJiri Kosina #endif
123334e55232SKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING)
123434e55232SKAMEZAWA Hiroyuki 	struct task_rss_stat	rss_stat;
123534e55232SKAMEZAWA Hiroyuki #endif
12361da177e4SLinus Torvalds /* task state */
123797dc32cdSWilliam Cohen 	int exit_state;
12381da177e4SLinus Torvalds 	int exit_code, exit_signal;
12391da177e4SLinus Torvalds 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1240a8f072c1STejun Heo 	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
12419b89f6baSAndrei Epure 
12429b89f6baSAndrei Epure 	/* Used for emulating ABI behavior of previous Linux versions */
124397dc32cdSWilliam Cohen 	unsigned int personality;
12449b89f6baSAndrei Epure 
1245f9ce1f1cSKentaro Takeda 	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1246f9ce1f1cSKentaro Takeda 				 * execve */
12478f0dfc34SArjan van de Ven 	unsigned in_iowait:1;
12488f0dfc34SArjan van de Ven 
1249259e5e6cSAndy Lutomirski 	/* task may not gain privileges */
1250259e5e6cSAndy Lutomirski 	unsigned no_new_privs:1;
1251ca94c442SLennart Poettering 
1252ca94c442SLennart Poettering 	/* Revert to default priority/policy when forking */
1253ca94c442SLennart Poettering 	unsigned sched_reset_on_fork:1;
1254a8e4f2eaSPeter Zijlstra 	unsigned sched_contributes_to_load:1;
1255ca94c442SLennart Poettering 
12561da177e4SLinus Torvalds 	pid_t pid;
12571da177e4SLinus Torvalds 	pid_t tgid;
12580a425405SArjan van de Ven 
12591314562aSHiroshi Shimamoto #ifdef CONFIG_CC_STACKPROTECTOR
12600a425405SArjan van de Ven 	/* Canary value for the -fstack-protector gcc feature */
12610a425405SArjan van de Ven 	unsigned long stack_canary;
12621314562aSHiroshi Shimamoto #endif
12631da177e4SLinus Torvalds 	/*
12641da177e4SLinus Torvalds 	 * pointers to (original) parent process, youngest child, younger sibling,
12651da177e4SLinus Torvalds 	 * older sibling, respectively.  (p->father can be replaced with
1266f470021aSRoland McGrath 	 * p->real_parent->pid)
12671da177e4SLinus Torvalds 	 */
1268abd63bc3SKees Cook 	struct task_struct __rcu *real_parent; /* real parent process */
1269abd63bc3SKees Cook 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
12701da177e4SLinus Torvalds 	/*
1271f470021aSRoland McGrath 	 * children/sibling forms the list of my natural children
12721da177e4SLinus Torvalds 	 */
12731da177e4SLinus Torvalds 	struct list_head children;	/* list of my children */
12741da177e4SLinus Torvalds 	struct list_head sibling;	/* linkage in my parent's children list */
12751da177e4SLinus Torvalds 	struct task_struct *group_leader;	/* threadgroup leader */
12761da177e4SLinus Torvalds 
1277f470021aSRoland McGrath 	/*
1278f470021aSRoland McGrath 	 * ptraced is the list of tasks this task is using ptrace on.
1279f470021aSRoland McGrath 	 * This includes both natural children and PTRACE_ATTACH targets.
1280f470021aSRoland McGrath 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1281f470021aSRoland McGrath 	 */
1282f470021aSRoland McGrath 	struct list_head ptraced;
1283f470021aSRoland McGrath 	struct list_head ptrace_entry;
1284f470021aSRoland McGrath 
12851da177e4SLinus Torvalds 	/* PID/PID hash table linkage. */
128692476d7fSEric W. Biederman 	struct pid_link pids[PIDTYPE_MAX];
128747e65328SOleg Nesterov 	struct list_head thread_group;
12880c740d0aSOleg Nesterov 	struct list_head thread_node;
12891da177e4SLinus Torvalds 
12901da177e4SLinus Torvalds 	struct completion *vfork_done;		/* for vfork() */
12911da177e4SLinus Torvalds 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
12921da177e4SLinus Torvalds 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
12931da177e4SLinus Torvalds 
1294c66f08beSMichael Neuling 	cputime_t utime, stime, utimescaled, stimescaled;
12959ac52315SLaurent Vivier 	cputime_t gtime;
12969fbc42eaSFrederic Weisbecker #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1297d37f761dSFrederic Weisbecker 	struct cputime prev_cputime;
1298d99ca3b9SHidetoshi Seto #endif
12996a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
13006a61671bSFrederic Weisbecker 	seqlock_t vtime_seqlock;
13016a61671bSFrederic Weisbecker 	unsigned long long vtime_snap;
13026a61671bSFrederic Weisbecker 	enum {
13036a61671bSFrederic Weisbecker 		VTIME_SLEEPING = 0,
13046a61671bSFrederic Weisbecker 		VTIME_USER,
13056a61671bSFrederic Weisbecker 		VTIME_SYS,
13066a61671bSFrederic Weisbecker 	} vtime_snap_whence;
13076a61671bSFrederic Weisbecker #endif
13081da177e4SLinus Torvalds 	unsigned long nvcsw, nivcsw; /* context switch counts */
1309924b42d5STomas Janousek 	struct timespec start_time; 		/* monotonic time */
1310924b42d5STomas Janousek 	struct timespec real_start_time;	/* boot based time */
13111da177e4SLinus Torvalds /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
13121da177e4SLinus Torvalds 	unsigned long min_flt, maj_flt;
13131da177e4SLinus Torvalds 
1314f06febc9SFrank Mayhar 	struct task_cputime cputime_expires;
13151da177e4SLinus Torvalds 	struct list_head cpu_timers[3];
13161da177e4SLinus Torvalds 
13171da177e4SLinus Torvalds /* process credentials */
13181b0ba1c9SArnd Bergmann 	const struct cred __rcu *real_cred; /* objective and real subjective task
13193b11a1deSDavid Howells 					 * credentials (COW) */
13201b0ba1c9SArnd Bergmann 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
13213b11a1deSDavid Howells 					 * credentials (COW) */
132236772092SPaolo 'Blaisorblade' Giarrusso 	char comm[TASK_COMM_LEN]; /* executable name excluding path
132336772092SPaolo 'Blaisorblade' Giarrusso 				     - access with [gs]et_task_comm (which lock
132436772092SPaolo 'Blaisorblade' Giarrusso 				       it with task_lock())
1325221af7f8SLinus Torvalds 				     - initialized normally by setup_new_exec */
13261da177e4SLinus Torvalds /* file system info */
13271da177e4SLinus Torvalds 	int link_count, total_link_count;
13283d5b6fccSAlexey Dobriyan #ifdef CONFIG_SYSVIPC
13291da177e4SLinus Torvalds /* ipc stuff */
13301da177e4SLinus Torvalds 	struct sysv_sem sysvsem;
13313d5b6fccSAlexey Dobriyan #endif
1332e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK
133382a1fcb9SIngo Molnar /* hung task detection */
133482a1fcb9SIngo Molnar 	unsigned long last_switch_count;
133582a1fcb9SIngo Molnar #endif
13361da177e4SLinus Torvalds /* CPU-specific state of this task */
13371da177e4SLinus Torvalds 	struct thread_struct thread;
13381da177e4SLinus Torvalds /* filesystem information */
13391da177e4SLinus Torvalds 	struct fs_struct *fs;
13401da177e4SLinus Torvalds /* open file information */
13411da177e4SLinus Torvalds 	struct files_struct *files;
13421651e14eSSerge E. Hallyn /* namespaces */
1343ab516013SSerge E. Hallyn 	struct nsproxy *nsproxy;
13441da177e4SLinus Torvalds /* signal handlers */
13451da177e4SLinus Torvalds 	struct signal_struct *signal;
13461da177e4SLinus Torvalds 	struct sighand_struct *sighand;
13471da177e4SLinus Torvalds 
13481da177e4SLinus Torvalds 	sigset_t blocked, real_blocked;
1349f3de272bSRoland McGrath 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
13501da177e4SLinus Torvalds 	struct sigpending pending;
13511da177e4SLinus Torvalds 
13521da177e4SLinus Torvalds 	unsigned long sas_ss_sp;
13531da177e4SLinus Torvalds 	size_t sas_ss_size;
13541da177e4SLinus Torvalds 	int (*notifier)(void *priv);
13551da177e4SLinus Torvalds 	void *notifier_data;
13561da177e4SLinus Torvalds 	sigset_t *notifier_mask;
135767d12145SAl Viro 	struct callback_head *task_works;
1358e73f8959SOleg Nesterov 
13591da177e4SLinus Torvalds 	struct audit_context *audit_context;
1360bfef93a5SAl Viro #ifdef CONFIG_AUDITSYSCALL
1361e1760bd5SEric W. Biederman 	kuid_t loginuid;
13624746ec5bSEric Paris 	unsigned int sessionid;
1363bfef93a5SAl Viro #endif
1364932ecebbSWill Drewry 	struct seccomp seccomp;
13651da177e4SLinus Torvalds 
13661da177e4SLinus Torvalds /* Thread group tracking */
13671da177e4SLinus Torvalds    	u32 parent_exec_id;
13681da177e4SLinus Torvalds    	u32 self_exec_id;
136958568d2aSMiao Xie /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
137058568d2aSMiao Xie  * mempolicy */
13711da177e4SLinus Torvalds 	spinlock_t alloc_lock;
13721da177e4SLinus Torvalds 
1373b29739f9SIngo Molnar 	/* Protection of the PI data structures: */
13741d615482SThomas Gleixner 	raw_spinlock_t pi_lock;
1375b29739f9SIngo Molnar 
137623f78d4aSIngo Molnar #ifdef CONFIG_RT_MUTEXES
137723f78d4aSIngo Molnar 	/* PI waiters blocked on a rt_mutex held by this task */
1378fb00aca4SPeter Zijlstra 	struct rb_root pi_waiters;
1379fb00aca4SPeter Zijlstra 	struct rb_node *pi_waiters_leftmost;
138023f78d4aSIngo Molnar 	/* Deadlock detection and priority inheritance handling */
138123f78d4aSIngo Molnar 	struct rt_mutex_waiter *pi_blocked_on;
13822d3d891dSDario Faggioli 	/* Top pi_waiters task */
13832d3d891dSDario Faggioli 	struct task_struct *pi_top_task;
138423f78d4aSIngo Molnar #endif
138523f78d4aSIngo Molnar 
1386408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES
1387408894eeSIngo Molnar 	/* mutex deadlock detection */
1388408894eeSIngo Molnar 	struct mutex_waiter *blocked_on;
1389408894eeSIngo Molnar #endif
1390de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS
1391de30a2b3SIngo Molnar 	unsigned int irq_events;
1392de30a2b3SIngo Molnar 	unsigned long hardirq_enable_ip;
1393de30a2b3SIngo Molnar 	unsigned long hardirq_disable_ip;
1394fa1452e8SHiroshi Shimamoto 	unsigned int hardirq_enable_event;
1395de30a2b3SIngo Molnar 	unsigned int hardirq_disable_event;
1396fa1452e8SHiroshi Shimamoto 	int hardirqs_enabled;
1397de30a2b3SIngo Molnar 	int hardirq_context;
1398fa1452e8SHiroshi Shimamoto 	unsigned long softirq_disable_ip;
1399fa1452e8SHiroshi Shimamoto 	unsigned long softirq_enable_ip;
1400fa1452e8SHiroshi Shimamoto 	unsigned int softirq_disable_event;
1401fa1452e8SHiroshi Shimamoto 	unsigned int softirq_enable_event;
1402fa1452e8SHiroshi Shimamoto 	int softirqs_enabled;
1403de30a2b3SIngo Molnar 	int softirq_context;
1404de30a2b3SIngo Molnar #endif
1405fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP
1406bdb9441eSPeter Zijlstra # define MAX_LOCK_DEPTH 48UL
1407fbb9ce95SIngo Molnar 	u64 curr_chain_key;
1408fbb9ce95SIngo Molnar 	int lockdep_depth;
1409fbb9ce95SIngo Molnar 	unsigned int lockdep_recursion;
1410c7aceabaSRichard Kennedy 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1411cf40bd16SNick Piggin 	gfp_t lockdep_reclaim_gfp;
1412fbb9ce95SIngo Molnar #endif
1413408894eeSIngo Molnar 
14141da177e4SLinus Torvalds /* journalling filesystem info */
14151da177e4SLinus Torvalds 	void *journal_info;
14161da177e4SLinus Torvalds 
1417d89d8796SNeil Brown /* stacked block device info */
1418bddd87c7SAkinobu Mita 	struct bio_list *bio_list;
1419d89d8796SNeil Brown 
142073c10101SJens Axboe #ifdef CONFIG_BLOCK
142173c10101SJens Axboe /* stack plugging */
142273c10101SJens Axboe 	struct blk_plug *plug;
142373c10101SJens Axboe #endif
142473c10101SJens Axboe 
14251da177e4SLinus Torvalds /* VM state */
14261da177e4SLinus Torvalds 	struct reclaim_state *reclaim_state;
14271da177e4SLinus Torvalds 
14281da177e4SLinus Torvalds 	struct backing_dev_info *backing_dev_info;
14291da177e4SLinus Torvalds 
14301da177e4SLinus Torvalds 	struct io_context *io_context;
14311da177e4SLinus Torvalds 
14321da177e4SLinus Torvalds 	unsigned long ptrace_message;
14331da177e4SLinus Torvalds 	siginfo_t *last_siginfo; /* For ptrace use.  */
14347c3ab738SAndrew Morton 	struct task_io_accounting ioac;
14358f0ab514SJay Lan #if defined(CONFIG_TASK_XACCT)
14361da177e4SLinus Torvalds 	u64 acct_rss_mem1;	/* accumulated rss usage */
14371da177e4SLinus Torvalds 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
143849b5cf34SJonathan Lim 	cputime_t acct_timexpd;	/* stime + utime since last update */
14391da177e4SLinus Torvalds #endif
14401da177e4SLinus Torvalds #ifdef CONFIG_CPUSETS
144158568d2aSMiao Xie 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1442cc9a6c87SMel Gorman 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1443825a46afSPaul Jackson 	int cpuset_mem_spread_rotor;
14446adef3ebSJack Steiner 	int cpuset_slab_spread_rotor;
14451da177e4SLinus Torvalds #endif
1446ddbcc7e8SPaul Menage #ifdef CONFIG_CGROUPS
1447817929ecSPaul Menage 	/* Control Group info protected by css_set_lock */
14482c392b8cSArnd Bergmann 	struct css_set __rcu *cgroups;
1449817929ecSPaul Menage 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1450817929ecSPaul Menage 	struct list_head cg_list;
1451ddbcc7e8SPaul Menage #endif
145242b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX
14530771dfefSIngo Molnar 	struct robust_list_head __user *robust_list;
145434f192c6SIngo Molnar #ifdef CONFIG_COMPAT
145534f192c6SIngo Molnar 	struct compat_robust_list_head __user *compat_robust_list;
145634f192c6SIngo Molnar #endif
1457c87e2837SIngo Molnar 	struct list_head pi_state_list;
1458c87e2837SIngo Molnar 	struct futex_pi_state *pi_state_cache;
145942b2dd0aSAlexey Dobriyan #endif
1460cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
14618dc85d54SPeter Zijlstra 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1462cdd6c482SIngo Molnar 	struct mutex perf_event_mutex;
1463cdd6c482SIngo Molnar 	struct list_head perf_event_list;
1464a63eaf34SPaul Mackerras #endif
1465c7aceabaSRichard Kennedy #ifdef CONFIG_NUMA
146658568d2aSMiao Xie 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1467c7aceabaSRichard Kennedy 	short il_next;
1468207205a2SEric Dumazet 	short pref_node_fork;
1469c7aceabaSRichard Kennedy #endif
1470cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
1471cbee9f88SPeter Zijlstra 	int numa_scan_seq;
1472cbee9f88SPeter Zijlstra 	unsigned int numa_scan_period;
1473598f0ec0SMel Gorman 	unsigned int numa_scan_period_max;
1474de1c9ce6SRik van Riel 	int numa_preferred_nid;
14756b9a7460SMel Gorman 	unsigned long numa_migrate_retry;
1476cbee9f88SPeter Zijlstra 	u64 node_stamp;			/* migration stamp  */
14777e2703e6SRik van Riel 	u64 last_task_numa_placement;
14787e2703e6SRik van Riel 	u64 last_sum_exec_runtime;
1479cbee9f88SPeter Zijlstra 	struct callback_head numa_work;
1480f809ca9aSMel Gorman 
14818c8a743cSPeter Zijlstra 	struct list_head numa_entry;
14828c8a743cSPeter Zijlstra 	struct numa_group *numa_group;
14838c8a743cSPeter Zijlstra 
1484745d6147SMel Gorman 	/*
1485745d6147SMel Gorman 	 * Exponential decaying average of faults on a per-node basis.
1486745d6147SMel Gorman 	 * Scheduling placement decisions are made based on the these counts.
1487745d6147SMel Gorman 	 * The values remain static for the duration of a PTE scan
1488745d6147SMel Gorman 	 */
1489ff1df896SRik van Riel 	unsigned long *numa_faults_memory;
149083e1d2cdSMel Gorman 	unsigned long total_numa_faults;
1491745d6147SMel Gorman 
1492745d6147SMel Gorman 	/*
1493745d6147SMel Gorman 	 * numa_faults_buffer records faults per node during the current
1494ff1df896SRik van Riel 	 * scan window. When the scan completes, the counts in
1495ff1df896SRik van Riel 	 * numa_faults_memory decay and these values are copied.
1496745d6147SMel Gorman 	 */
1497ff1df896SRik van Riel 	unsigned long *numa_faults_buffer_memory;
1498745d6147SMel Gorman 
149904bb2f94SRik van Riel 	/*
150050ec8a40SRik van Riel 	 * Track the nodes the process was running on when a NUMA hinting
150150ec8a40SRik van Riel 	 * fault was incurred.
150250ec8a40SRik van Riel 	 */
150350ec8a40SRik van Riel 	unsigned long *numa_faults_cpu;
150450ec8a40SRik van Riel 	unsigned long *numa_faults_buffer_cpu;
150550ec8a40SRik van Riel 
150650ec8a40SRik van Riel 	/*
150704bb2f94SRik van Riel 	 * numa_faults_locality tracks if faults recorded during the last
150804bb2f94SRik van Riel 	 * scan window were remote/local. The task scan period is adapted
150904bb2f94SRik van Riel 	 * based on the locality of the faults with different weights
151004bb2f94SRik van Riel 	 * depending on whether they were shared or private faults
151104bb2f94SRik van Riel 	 */
151204bb2f94SRik van Riel 	unsigned long numa_faults_locality[2];
151304bb2f94SRik van Riel 
1514b32e86b4SIngo Molnar 	unsigned long numa_pages_migrated;
1515cbee9f88SPeter Zijlstra #endif /* CONFIG_NUMA_BALANCING */
1516cbee9f88SPeter Zijlstra 
1517e56d0903SIngo Molnar 	struct rcu_head rcu;
1518b92ce558SJens Axboe 
1519b92ce558SJens Axboe 	/*
1520b92ce558SJens Axboe 	 * cache last used pipe for splice
1521b92ce558SJens Axboe 	 */
1522b92ce558SJens Axboe 	struct pipe_inode_info *splice_pipe;
15235640f768SEric Dumazet 
15245640f768SEric Dumazet 	struct page_frag task_frag;
15255640f768SEric Dumazet 
1526ca74e92bSShailabh Nagar #ifdef	CONFIG_TASK_DELAY_ACCT
1527ca74e92bSShailabh Nagar 	struct task_delay_info *delays;
1528ca74e92bSShailabh Nagar #endif
1529f4f154fdSAkinobu Mita #ifdef CONFIG_FAULT_INJECTION
1530f4f154fdSAkinobu Mita 	int make_it_fail;
1531f4f154fdSAkinobu Mita #endif
15329d823e8fSWu Fengguang 	/*
15339d823e8fSWu Fengguang 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
15349d823e8fSWu Fengguang 	 * balance_dirty_pages() for some dirty throttling pause
15359d823e8fSWu Fengguang 	 */
15369d823e8fSWu Fengguang 	int nr_dirtied;
15379d823e8fSWu Fengguang 	int nr_dirtied_pause;
153883712358SWu Fengguang 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
15399d823e8fSWu Fengguang 
15409745512cSArjan van de Ven #ifdef CONFIG_LATENCYTOP
15419745512cSArjan van de Ven 	int latency_record_count;
15429745512cSArjan van de Ven 	struct latency_record latency_record[LT_SAVECOUNT];
15439745512cSArjan van de Ven #endif
15446976675dSArjan van de Ven 	/*
15456976675dSArjan van de Ven 	 * time slack values; these are used to round up poll() and
15466976675dSArjan van de Ven 	 * select() etc timeout values. These are in nanoseconds.
15476976675dSArjan van de Ven 	 */
15486976675dSArjan van de Ven 	unsigned long timer_slack_ns;
15496976675dSArjan van de Ven 	unsigned long default_timer_slack_ns;
1550f8d570a4SDavid Miller 
1551fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
15523ad2f3fbSDaniel Mack 	/* Index of current stored address in ret_stack */
1553f201ae23SFrederic Weisbecker 	int curr_ret_stack;
1554f201ae23SFrederic Weisbecker 	/* Stack of return addresses for return function tracing */
1555f201ae23SFrederic Weisbecker 	struct ftrace_ret_stack	*ret_stack;
15568aef2d28SSteven Rostedt 	/* time stamp for last schedule */
15578aef2d28SSteven Rostedt 	unsigned long long ftrace_timestamp;
1558f201ae23SFrederic Weisbecker 	/*
1559f201ae23SFrederic Weisbecker 	 * Number of functions that haven't been traced
1560f201ae23SFrederic Weisbecker 	 * because of depth overrun.
1561f201ae23SFrederic Weisbecker 	 */
1562f201ae23SFrederic Weisbecker 	atomic_t trace_overrun;
1563380c4b14SFrederic Weisbecker 	/* Pause for the tracing */
1564380c4b14SFrederic Weisbecker 	atomic_t tracing_graph_pause;
1565f201ae23SFrederic Weisbecker #endif
1566ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING
1567ea4e2bc4SSteven Rostedt 	/* state flags for use by tracers */
1568ea4e2bc4SSteven Rostedt 	unsigned long trace;
1569b1cff0adSSteven Rostedt 	/* bitmask and counter of trace recursion */
1570261842b7SSteven Rostedt 	unsigned long trace_recursion;
1571261842b7SSteven Rostedt #endif /* CONFIG_TRACING */
1572c255a458SAndrew Morton #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
1573569b846dSKAMEZAWA Hiroyuki 	struct memcg_batch_info {
1574569b846dSKAMEZAWA Hiroyuki 		int do_batch;	/* incremented when batch uncharge started */
1575569b846dSKAMEZAWA Hiroyuki 		struct mem_cgroup *memcg; /* target memcg of uncharge */
15767ffd4ca7SJohannes Weiner 		unsigned long nr_pages;	/* uncharged usage */
15777ffd4ca7SJohannes Weiner 		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1578569b846dSKAMEZAWA Hiroyuki 	} memcg_batch;
15790e9d92f2SGlauber Costa 	unsigned int memcg_kmem_skip_account;
1580519e5247SJohannes Weiner 	struct memcg_oom_info {
158149426420SJohannes Weiner 		struct mem_cgroup *memcg;
158249426420SJohannes Weiner 		gfp_t gfp_mask;
158349426420SJohannes Weiner 		int order;
1584519e5247SJohannes Weiner 		unsigned int may_oom:1;
1585519e5247SJohannes Weiner 	} memcg_oom;
1586569b846dSKAMEZAWA Hiroyuki #endif
15870326f5a9SSrikar Dronamraju #ifdef CONFIG_UPROBES
15880326f5a9SSrikar Dronamraju 	struct uprobe_task *utask;
15890326f5a9SSrikar Dronamraju #endif
1590cafe5635SKent Overstreet #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1591cafe5635SKent Overstreet 	unsigned int	sequential_io;
1592cafe5635SKent Overstreet 	unsigned int	sequential_io_avg;
1593cafe5635SKent Overstreet #endif
15941da177e4SLinus Torvalds };
15951da177e4SLinus Torvalds 
159676e6eee0SRusty Russell /* Future-safe accessor for struct task_struct's cpus_allowed. */
1597a4636818SRusty Russell #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
159876e6eee0SRusty Russell 
15996688cc05SPeter Zijlstra #define TNF_MIGRATED	0x01
16006688cc05SPeter Zijlstra #define TNF_NO_GROUP	0x02
1601dabe1d99SRik van Riel #define TNF_SHARED	0x04
160204bb2f94SRik van Riel #define TNF_FAULT_LOCAL	0x08
16036688cc05SPeter Zijlstra 
1604cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
16056688cc05SPeter Zijlstra extern void task_numa_fault(int last_node, int node, int pages, int flags);
1606e29cf08bSMel Gorman extern pid_t task_numa_group_id(struct task_struct *p);
16071a687c2eSMel Gorman extern void set_numabalancing_state(bool enabled);
160882727018SRik van Riel extern void task_numa_free(struct task_struct *p);
160910f39042SRik van Riel extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
161010f39042SRik van Riel 					int src_nid, int dst_cpu);
1611cbee9f88SPeter Zijlstra #else
1612ac8e895bSMel Gorman static inline void task_numa_fault(int last_node, int node, int pages,
16136688cc05SPeter Zijlstra 				   int flags)
1614cbee9f88SPeter Zijlstra {
1615cbee9f88SPeter Zijlstra }
1616e29cf08bSMel Gorman static inline pid_t task_numa_group_id(struct task_struct *p)
1617e29cf08bSMel Gorman {
1618e29cf08bSMel Gorman 	return 0;
1619e29cf08bSMel Gorman }
16201a687c2eSMel Gorman static inline void set_numabalancing_state(bool enabled)
16211a687c2eSMel Gorman {
16221a687c2eSMel Gorman }
162382727018SRik van Riel static inline void task_numa_free(struct task_struct *p)
162482727018SRik van Riel {
162582727018SRik van Riel }
162610f39042SRik van Riel static inline bool should_numa_migrate_memory(struct task_struct *p,
162710f39042SRik van Riel 				struct page *page, int src_nid, int dst_cpu)
162810f39042SRik van Riel {
162910f39042SRik van Riel 	return true;
163010f39042SRik van Riel }
1631cbee9f88SPeter Zijlstra #endif
1632cbee9f88SPeter Zijlstra 
1633e868171aSAlexey Dobriyan static inline struct pid *task_pid(struct task_struct *task)
163422c935f4SEric W. Biederman {
163522c935f4SEric W. Biederman 	return task->pids[PIDTYPE_PID].pid;
163622c935f4SEric W. Biederman }
163722c935f4SEric W. Biederman 
1638e868171aSAlexey Dobriyan static inline struct pid *task_tgid(struct task_struct *task)
163922c935f4SEric W. Biederman {
164022c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PID].pid;
164122c935f4SEric W. Biederman }
164222c935f4SEric W. Biederman 
16436dda81f4SOleg Nesterov /*
16446dda81f4SOleg Nesterov  * Without tasklist or rcu lock it is not safe to dereference
16456dda81f4SOleg Nesterov  * the result of task_pgrp/task_session even if task == current,
16466dda81f4SOleg Nesterov  * we can race with another thread doing sys_setsid/sys_setpgid.
16476dda81f4SOleg Nesterov  */
1648e868171aSAlexey Dobriyan static inline struct pid *task_pgrp(struct task_struct *task)
164922c935f4SEric W. Biederman {
165022c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PGID].pid;
165122c935f4SEric W. Biederman }
165222c935f4SEric W. Biederman 
1653e868171aSAlexey Dobriyan static inline struct pid *task_session(struct task_struct *task)
165422c935f4SEric W. Biederman {
165522c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_SID].pid;
165622c935f4SEric W. Biederman }
165722c935f4SEric W. Biederman 
16587af57294SPavel Emelyanov struct pid_namespace;
16597af57294SPavel Emelyanov 
16607af57294SPavel Emelyanov /*
16617af57294SPavel Emelyanov  * the helpers to get the task's different pids as they are seen
16627af57294SPavel Emelyanov  * from various namespaces
16637af57294SPavel Emelyanov  *
16647af57294SPavel Emelyanov  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
166544c4e1b2SEric W. Biederman  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
166644c4e1b2SEric W. Biederman  *                     current.
16677af57294SPavel Emelyanov  * task_xid_nr_ns()  : id seen from the ns specified;
16687af57294SPavel Emelyanov  *
16697af57294SPavel Emelyanov  * set_task_vxid()   : assigns a virtual id to a task;
16707af57294SPavel Emelyanov  *
16717af57294SPavel Emelyanov  * see also pid_nr() etc in include/linux/pid.h
16727af57294SPavel Emelyanov  */
167352ee2dfdSOleg Nesterov pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
167452ee2dfdSOleg Nesterov 			struct pid_namespace *ns);
16757af57294SPavel Emelyanov 
1676e868171aSAlexey Dobriyan static inline pid_t task_pid_nr(struct task_struct *tsk)
16777af57294SPavel Emelyanov {
16787af57294SPavel Emelyanov 	return tsk->pid;
16797af57294SPavel Emelyanov }
16807af57294SPavel Emelyanov 
168152ee2dfdSOleg Nesterov static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
168252ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
168352ee2dfdSOleg Nesterov {
168452ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
168552ee2dfdSOleg Nesterov }
16867af57294SPavel Emelyanov 
16877af57294SPavel Emelyanov static inline pid_t task_pid_vnr(struct task_struct *tsk)
16887af57294SPavel Emelyanov {
168952ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
16907af57294SPavel Emelyanov }
16917af57294SPavel Emelyanov 
16927af57294SPavel Emelyanov 
1693e868171aSAlexey Dobriyan static inline pid_t task_tgid_nr(struct task_struct *tsk)
16947af57294SPavel Emelyanov {
16957af57294SPavel Emelyanov 	return tsk->tgid;
16967af57294SPavel Emelyanov }
16977af57294SPavel Emelyanov 
16982f2a3a46SPavel Emelyanov pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
16997af57294SPavel Emelyanov 
17007af57294SPavel Emelyanov static inline pid_t task_tgid_vnr(struct task_struct *tsk)
17017af57294SPavel Emelyanov {
17027af57294SPavel Emelyanov 	return pid_vnr(task_tgid(tsk));
17037af57294SPavel Emelyanov }
17047af57294SPavel Emelyanov 
17057af57294SPavel Emelyanov 
170652ee2dfdSOleg Nesterov static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
170752ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
17087af57294SPavel Emelyanov {
170952ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
17107af57294SPavel Emelyanov }
17117af57294SPavel Emelyanov 
17127af57294SPavel Emelyanov static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
17137af57294SPavel Emelyanov {
171452ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
17157af57294SPavel Emelyanov }
17167af57294SPavel Emelyanov 
17177af57294SPavel Emelyanov 
171852ee2dfdSOleg Nesterov static inline pid_t task_session_nr_ns(struct task_struct *tsk,
171952ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
17207af57294SPavel Emelyanov {
172152ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
17227af57294SPavel Emelyanov }
17237af57294SPavel Emelyanov 
17247af57294SPavel Emelyanov static inline pid_t task_session_vnr(struct task_struct *tsk)
17257af57294SPavel Emelyanov {
172652ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
17277af57294SPavel Emelyanov }
17287af57294SPavel Emelyanov 
17291b0f7ffdSOleg Nesterov /* obsolete, do not use */
17301b0f7ffdSOleg Nesterov static inline pid_t task_pgrp_nr(struct task_struct *tsk)
17311b0f7ffdSOleg Nesterov {
17321b0f7ffdSOleg Nesterov 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
17331b0f7ffdSOleg Nesterov }
17347af57294SPavel Emelyanov 
17351da177e4SLinus Torvalds /**
17361da177e4SLinus Torvalds  * pid_alive - check that a task structure is not stale
17371da177e4SLinus Torvalds  * @p: Task structure to be checked.
17381da177e4SLinus Torvalds  *
17391da177e4SLinus Torvalds  * Test if a process is not yet dead (at most zombie state)
17401da177e4SLinus Torvalds  * If pid_alive fails, then pointers within the task structure
17411da177e4SLinus Torvalds  * can be stale and must not be dereferenced.
1742e69f6186SYacine Belkadi  *
1743e69f6186SYacine Belkadi  * Return: 1 if the process is alive. 0 otherwise.
17441da177e4SLinus Torvalds  */
1745e868171aSAlexey Dobriyan static inline int pid_alive(struct task_struct *p)
17461da177e4SLinus Torvalds {
174792476d7fSEric W. Biederman 	return p->pids[PIDTYPE_PID].pid != NULL;
17481da177e4SLinus Torvalds }
17491da177e4SLinus Torvalds 
1750f400e198SSukadev Bhattiprolu /**
1751b460cbc5SSerge E. Hallyn  * is_global_init - check if a task structure is init
17523260259fSHenne  * @tsk: Task structure to be checked.
17533260259fSHenne  *
17543260259fSHenne  * Check if a task structure is the first user space task the kernel created.
1755e69f6186SYacine Belkadi  *
1756e69f6186SYacine Belkadi  * Return: 1 if the task structure is init. 0 otherwise.
1757f400e198SSukadev Bhattiprolu  */
1758e868171aSAlexey Dobriyan static inline int is_global_init(struct task_struct *tsk)
1759b461cc03SPavel Emelyanov {
1760b461cc03SPavel Emelyanov 	return tsk->pid == 1;
1761b461cc03SPavel Emelyanov }
1762b460cbc5SSerge E. Hallyn 
17639ec52099SCedric Le Goater extern struct pid *cad_pid;
17649ec52099SCedric Le Goater 
17651da177e4SLinus Torvalds extern void free_task(struct task_struct *tsk);
17661da177e4SLinus Torvalds #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1767e56d0903SIngo Molnar 
1768158d9ebdSAndrew Morton extern void __put_task_struct(struct task_struct *t);
1769e56d0903SIngo Molnar 
1770e56d0903SIngo Molnar static inline void put_task_struct(struct task_struct *t)
1771e56d0903SIngo Molnar {
1772e56d0903SIngo Molnar 	if (atomic_dec_and_test(&t->usage))
17738c7904a0SEric W. Biederman 		__put_task_struct(t);
1774e56d0903SIngo Molnar }
17751da177e4SLinus Torvalds 
17766a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
17776a61671bSFrederic Weisbecker extern void task_cputime(struct task_struct *t,
17786a61671bSFrederic Weisbecker 			 cputime_t *utime, cputime_t *stime);
17796a61671bSFrederic Weisbecker extern void task_cputime_scaled(struct task_struct *t,
17806a61671bSFrederic Weisbecker 				cputime_t *utimescaled, cputime_t *stimescaled);
17816a61671bSFrederic Weisbecker extern cputime_t task_gtime(struct task_struct *t);
17826a61671bSFrederic Weisbecker #else
17836fac4829SFrederic Weisbecker static inline void task_cputime(struct task_struct *t,
17846fac4829SFrederic Weisbecker 				cputime_t *utime, cputime_t *stime)
17856fac4829SFrederic Weisbecker {
17866fac4829SFrederic Weisbecker 	if (utime)
17876fac4829SFrederic Weisbecker 		*utime = t->utime;
17886fac4829SFrederic Weisbecker 	if (stime)
17896fac4829SFrederic Weisbecker 		*stime = t->stime;
17906fac4829SFrederic Weisbecker }
17916fac4829SFrederic Weisbecker 
17926fac4829SFrederic Weisbecker static inline void task_cputime_scaled(struct task_struct *t,
17936fac4829SFrederic Weisbecker 				       cputime_t *utimescaled,
17946fac4829SFrederic Weisbecker 				       cputime_t *stimescaled)
17956fac4829SFrederic Weisbecker {
17966fac4829SFrederic Weisbecker 	if (utimescaled)
17976fac4829SFrederic Weisbecker 		*utimescaled = t->utimescaled;
17986fac4829SFrederic Weisbecker 	if (stimescaled)
17996fac4829SFrederic Weisbecker 		*stimescaled = t->stimescaled;
18006fac4829SFrederic Weisbecker }
18016a61671bSFrederic Weisbecker 
18026a61671bSFrederic Weisbecker static inline cputime_t task_gtime(struct task_struct *t)
18036a61671bSFrederic Weisbecker {
18046a61671bSFrederic Weisbecker 	return t->gtime;
18056a61671bSFrederic Weisbecker }
18066a61671bSFrederic Weisbecker #endif
1807e80d0a1aSFrederic Weisbecker extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1808e80d0a1aSFrederic Weisbecker extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
180949048622SBalbir Singh 
18101da177e4SLinus Torvalds /*
18111da177e4SLinus Torvalds  * Per process flags
18121da177e4SLinus Torvalds  */
18131da177e4SLinus Torvalds #define PF_EXITING	0x00000004	/* getting shut down */
1814778e9a9cSAlexey Kuznetsov #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
181594886b84SLaurent Vivier #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
181621aa9af0STejun Heo #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
18171da177e4SLinus Torvalds #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
18184db96cf0SAndi Kleen #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
18191da177e4SLinus Torvalds #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
18201da177e4SLinus Torvalds #define PF_DUMPCORE	0x00000200	/* dumped core */
18211da177e4SLinus Torvalds #define PF_SIGNALED	0x00000400	/* killed by a signal */
18221da177e4SLinus Torvalds #define PF_MEMALLOC	0x00000800	/* Allocating memory */
182372fa5997SVasiliy Kulikov #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
18241da177e4SLinus Torvalds #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1825774a1221STejun Heo #define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
18261da177e4SLinus Torvalds #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
18271da177e4SLinus Torvalds #define PF_FROZEN	0x00010000	/* frozen for system suspend */
18281da177e4SLinus Torvalds #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
18291da177e4SLinus Torvalds #define PF_KSWAPD	0x00040000	/* I am kswapd */
183021caf2fcSMing Lei #define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
18311da177e4SLinus Torvalds #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1832246bb0b1SOleg Nesterov #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1833b31dc66aSJens Axboe #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1834b31dc66aSJens Axboe #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1835b31dc66aSJens Axboe #define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1836b31dc66aSJens Axboe #define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
183714a40ffcSTejun Heo #define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
18384db96cf0SAndi Kleen #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1839c61afb18SPaul Jackson #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
184061a87122SThomas Gleixner #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
184158a69cb4STejun Heo #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
18422b44c4dbSColin Cross #define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
18431da177e4SLinus Torvalds 
18441da177e4SLinus Torvalds /*
18451da177e4SLinus Torvalds  * Only the _current_ task can read/write to tsk->flags, but other
18461da177e4SLinus Torvalds  * tasks can access tsk->flags in readonly mode for example
18471da177e4SLinus Torvalds  * with tsk_used_math (like during threaded core dumping).
18481da177e4SLinus Torvalds  * There is however an exception to this rule during ptrace
18491da177e4SLinus Torvalds  * or during fork: the ptracer task is allowed to write to the
18501da177e4SLinus Torvalds  * child->flags of its traced child (same goes for fork, the parent
18511da177e4SLinus Torvalds  * can write to the child->flags), because we're guaranteed the
18521da177e4SLinus Torvalds  * child is not running and in turn not changing child->flags
18531da177e4SLinus Torvalds  * at the same time the parent does it.
18541da177e4SLinus Torvalds  */
18551da177e4SLinus Torvalds #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
18561da177e4SLinus Torvalds #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
18571da177e4SLinus Torvalds #define clear_used_math() clear_stopped_child_used_math(current)
18581da177e4SLinus Torvalds #define set_used_math() set_stopped_child_used_math(current)
18591da177e4SLinus Torvalds #define conditional_stopped_child_used_math(condition, child) \
18601da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
18611da177e4SLinus Torvalds #define conditional_used_math(condition) \
18621da177e4SLinus Torvalds 	conditional_stopped_child_used_math(condition, current)
18631da177e4SLinus Torvalds #define copy_to_stopped_child_used_math(child) \
18641da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
18651da177e4SLinus Torvalds /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
18661da177e4SLinus Torvalds #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
18671da177e4SLinus Torvalds #define used_math() tsk_used_math(current)
18681da177e4SLinus Torvalds 
186921caf2fcSMing Lei /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
187021caf2fcSMing Lei static inline gfp_t memalloc_noio_flags(gfp_t flags)
187121caf2fcSMing Lei {
187221caf2fcSMing Lei 	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
187321caf2fcSMing Lei 		flags &= ~__GFP_IO;
187421caf2fcSMing Lei 	return flags;
187521caf2fcSMing Lei }
187621caf2fcSMing Lei 
187721caf2fcSMing Lei static inline unsigned int memalloc_noio_save(void)
187821caf2fcSMing Lei {
187921caf2fcSMing Lei 	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
188021caf2fcSMing Lei 	current->flags |= PF_MEMALLOC_NOIO;
188121caf2fcSMing Lei 	return flags;
188221caf2fcSMing Lei }
188321caf2fcSMing Lei 
188421caf2fcSMing Lei static inline void memalloc_noio_restore(unsigned int flags)
188521caf2fcSMing Lei {
188621caf2fcSMing Lei 	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
188721caf2fcSMing Lei }
188821caf2fcSMing Lei 
1889e5c1902eSTejun Heo /*
1890a8f072c1STejun Heo  * task->jobctl flags
1891e5c1902eSTejun Heo  */
1892a8f072c1STejun Heo #define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
1893e5c1902eSTejun Heo 
1894a8f072c1STejun Heo #define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
1895a8f072c1STejun Heo #define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
1896a8f072c1STejun Heo #define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
189773ddff2bSTejun Heo #define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
1898fb1d910cSTejun Heo #define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
1899a8f072c1STejun Heo #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
1900544b2c91STejun Heo #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
1901a8f072c1STejun Heo 
1902a8f072c1STejun Heo #define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT)
1903a8f072c1STejun Heo #define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT)
1904a8f072c1STejun Heo #define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT)
190573ddff2bSTejun Heo #define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT)
1906fb1d910cSTejun Heo #define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT)
1907a8f072c1STejun Heo #define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT)
1908544b2c91STejun Heo #define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT)
1909a8f072c1STejun Heo 
1910fb1d910cSTejun Heo #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
191173ddff2bSTejun Heo #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
19123759a0d9STejun Heo 
19137dd3db54STejun Heo extern bool task_set_jobctl_pending(struct task_struct *task,
19147dd3db54STejun Heo 				    unsigned int mask);
191573ddff2bSTejun Heo extern void task_clear_jobctl_trapping(struct task_struct *task);
19163759a0d9STejun Heo extern void task_clear_jobctl_pending(struct task_struct *task,
19173759a0d9STejun Heo 				      unsigned int mask);
191839efa3efSTejun Heo 
1919a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
1920f41d911fSPaul E. McKenney 
1921f41d911fSPaul E. McKenney #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
19221aa03f11SPaul E. McKenney #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1923f41d911fSPaul E. McKenney 
1924f41d911fSPaul E. McKenney static inline void rcu_copy_process(struct task_struct *p)
1925f41d911fSPaul E. McKenney {
1926f41d911fSPaul E. McKenney 	p->rcu_read_lock_nesting = 0;
1927f41d911fSPaul E. McKenney 	p->rcu_read_unlock_special = 0;
1928a57eb940SPaul E. McKenney #ifdef CONFIG_TREE_PREEMPT_RCU
1929dd5d19baSPaul E. McKenney 	p->rcu_blocked_node = NULL;
193024278d14SPaul E. McKenney #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
193124278d14SPaul E. McKenney #ifdef CONFIG_RCU_BOOST
193224278d14SPaul E. McKenney 	p->rcu_boost_mutex = NULL;
193324278d14SPaul E. McKenney #endif /* #ifdef CONFIG_RCU_BOOST */
1934f41d911fSPaul E. McKenney 	INIT_LIST_HEAD(&p->rcu_node_entry);
1935f41d911fSPaul E. McKenney }
1936f41d911fSPaul E. McKenney 
1937f41d911fSPaul E. McKenney #else
1938f41d911fSPaul E. McKenney 
1939f41d911fSPaul E. McKenney static inline void rcu_copy_process(struct task_struct *p)
1940f41d911fSPaul E. McKenney {
1941f41d911fSPaul E. McKenney }
1942f41d911fSPaul E. McKenney 
1943f41d911fSPaul E. McKenney #endif
1944f41d911fSPaul E. McKenney 
1945907aed48SMel Gorman static inline void tsk_restore_flags(struct task_struct *task,
1946907aed48SMel Gorman 				unsigned long orig_flags, unsigned long flags)
1947907aed48SMel Gorman {
1948907aed48SMel Gorman 	task->flags &= ~flags;
1949907aed48SMel Gorman 	task->flags |= orig_flags & flags;
1950907aed48SMel Gorman }
1951907aed48SMel Gorman 
19521da177e4SLinus Torvalds #ifdef CONFIG_SMP
19531e1b6c51SKOSAKI Motohiro extern void do_set_cpus_allowed(struct task_struct *p,
19541e1b6c51SKOSAKI Motohiro 			       const struct cpumask *new_mask);
19551e1b6c51SKOSAKI Motohiro 
1956cd8ba7cdSMike Travis extern int set_cpus_allowed_ptr(struct task_struct *p,
195796f874e2SRusty Russell 				const struct cpumask *new_mask);
19581da177e4SLinus Torvalds #else
19591e1b6c51SKOSAKI Motohiro static inline void do_set_cpus_allowed(struct task_struct *p,
19601e1b6c51SKOSAKI Motohiro 				      const struct cpumask *new_mask)
19611e1b6c51SKOSAKI Motohiro {
19621e1b6c51SKOSAKI Motohiro }
1963cd8ba7cdSMike Travis static inline int set_cpus_allowed_ptr(struct task_struct *p,
196496f874e2SRusty Russell 				       const struct cpumask *new_mask)
19651da177e4SLinus Torvalds {
196696f874e2SRusty Russell 	if (!cpumask_test_cpu(0, new_mask))
19671da177e4SLinus Torvalds 		return -EINVAL;
19681da177e4SLinus Torvalds 	return 0;
19691da177e4SLinus Torvalds }
19701da177e4SLinus Torvalds #endif
1971e0ad9556SRusty Russell 
19723451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON
19735167e8d5SPeter Zijlstra void calc_load_enter_idle(void);
19745167e8d5SPeter Zijlstra void calc_load_exit_idle(void);
19755167e8d5SPeter Zijlstra #else
19765167e8d5SPeter Zijlstra static inline void calc_load_enter_idle(void) { }
19775167e8d5SPeter Zijlstra static inline void calc_load_exit_idle(void) { }
19783451d024SFrederic Weisbecker #endif /* CONFIG_NO_HZ_COMMON */
19795167e8d5SPeter Zijlstra 
1980e0ad9556SRusty Russell #ifndef CONFIG_CPUMASK_OFFSTACK
1981cd8ba7cdSMike Travis static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1982cd8ba7cdSMike Travis {
1983cd8ba7cdSMike Travis 	return set_cpus_allowed_ptr(p, &new_mask);
1984cd8ba7cdSMike Travis }
1985e0ad9556SRusty Russell #endif
19861da177e4SLinus Torvalds 
1987b342501cSIngo Molnar /*
1988c676329aSPeter Zijlstra  * Do not use outside of architecture code which knows its limitations.
1989c676329aSPeter Zijlstra  *
1990c676329aSPeter Zijlstra  * sched_clock() has no promise of monotonicity or bounded drift between
1991c676329aSPeter Zijlstra  * CPUs, use (which you should not) requires disabling IRQs.
1992c676329aSPeter Zijlstra  *
1993c676329aSPeter Zijlstra  * Please use one of the three interfaces below.
1994b342501cSIngo Molnar  */
19951bbfa6f2SMike Frysinger extern unsigned long long notrace sched_clock(void);
1996c676329aSPeter Zijlstra /*
1997489a71b0SHiroshi Shimamoto  * See the comment in kernel/sched/clock.c
1998c676329aSPeter Zijlstra  */
1999c676329aSPeter Zijlstra extern u64 cpu_clock(int cpu);
2000c676329aSPeter Zijlstra extern u64 local_clock(void);
2001c676329aSPeter Zijlstra extern u64 sched_clock_cpu(int cpu);
2002c676329aSPeter Zijlstra 
2003e436d800SIngo Molnar 
2004c1955a3dSPeter Zijlstra extern void sched_clock_init(void);
2005c1955a3dSPeter Zijlstra 
20063e51f33fSPeter Zijlstra #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
20073e51f33fSPeter Zijlstra static inline void sched_clock_tick(void)
20083e51f33fSPeter Zijlstra {
20093e51f33fSPeter Zijlstra }
20103e51f33fSPeter Zijlstra 
20113e51f33fSPeter Zijlstra static inline void sched_clock_idle_sleep_event(void)
20123e51f33fSPeter Zijlstra {
20133e51f33fSPeter Zijlstra }
20143e51f33fSPeter Zijlstra 
20153e51f33fSPeter Zijlstra static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
20163e51f33fSPeter Zijlstra {
20173e51f33fSPeter Zijlstra }
20183e51f33fSPeter Zijlstra #else
2019c676329aSPeter Zijlstra /*
2020c676329aSPeter Zijlstra  * Architectures can set this to 1 if they have specified
2021c676329aSPeter Zijlstra  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
2022c676329aSPeter Zijlstra  * but then during bootup it turns out that sched_clock()
2023c676329aSPeter Zijlstra  * is reliable after all:
2024c676329aSPeter Zijlstra  */
202535af99e6SPeter Zijlstra extern int sched_clock_stable(void);
202635af99e6SPeter Zijlstra extern void set_sched_clock_stable(void);
202735af99e6SPeter Zijlstra extern void clear_sched_clock_stable(void);
2028c676329aSPeter Zijlstra 
20293e51f33fSPeter Zijlstra extern void sched_clock_tick(void);
20303e51f33fSPeter Zijlstra extern void sched_clock_idle_sleep_event(void);
20313e51f33fSPeter Zijlstra extern void sched_clock_idle_wakeup_event(u64 delta_ns);
20323e51f33fSPeter Zijlstra #endif
20333e51f33fSPeter Zijlstra 
2034b52bfee4SVenkatesh Pallipadi #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2035b52bfee4SVenkatesh Pallipadi /*
2036b52bfee4SVenkatesh Pallipadi  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2037b52bfee4SVenkatesh Pallipadi  * The reason for this explicit opt-in is not to have perf penalty with
2038b52bfee4SVenkatesh Pallipadi  * slow sched_clocks.
2039b52bfee4SVenkatesh Pallipadi  */
2040b52bfee4SVenkatesh Pallipadi extern void enable_sched_clock_irqtime(void);
2041b52bfee4SVenkatesh Pallipadi extern void disable_sched_clock_irqtime(void);
2042b52bfee4SVenkatesh Pallipadi #else
2043b52bfee4SVenkatesh Pallipadi static inline void enable_sched_clock_irqtime(void) {}
2044b52bfee4SVenkatesh Pallipadi static inline void disable_sched_clock_irqtime(void) {}
2045b52bfee4SVenkatesh Pallipadi #endif
2046b52bfee4SVenkatesh Pallipadi 
204736c8b586SIngo Molnar extern unsigned long long
204841b86e9cSIngo Molnar task_sched_runtime(struct task_struct *task);
20491da177e4SLinus Torvalds 
20501da177e4SLinus Torvalds /* sched_exec is called by processes performing an exec */
20511da177e4SLinus Torvalds #ifdef CONFIG_SMP
20521da177e4SLinus Torvalds extern void sched_exec(void);
20531da177e4SLinus Torvalds #else
20541da177e4SLinus Torvalds #define sched_exec()   {}
20551da177e4SLinus Torvalds #endif
20561da177e4SLinus Torvalds 
20572aa44d05SIngo Molnar extern void sched_clock_idle_sleep_event(void);
20582aa44d05SIngo Molnar extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2059bb29ab26SIngo Molnar 
20601da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU
20611da177e4SLinus Torvalds extern void idle_task_exit(void);
20621da177e4SLinus Torvalds #else
20631da177e4SLinus Torvalds static inline void idle_task_exit(void) {}
20641da177e4SLinus Torvalds #endif
20651da177e4SLinus Torvalds 
20663451d024SFrederic Weisbecker #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
20671c20091eSFrederic Weisbecker extern void wake_up_nohz_cpu(int cpu);
206806d8308cSThomas Gleixner #else
20691c20091eSFrederic Weisbecker static inline void wake_up_nohz_cpu(int cpu) { }
207006d8308cSThomas Gleixner #endif
207106d8308cSThomas Gleixner 
2072ce831b38SFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL
2073ce831b38SFrederic Weisbecker extern bool sched_can_stop_tick(void);
2074265f22a9SFrederic Weisbecker extern u64 scheduler_tick_max_deferment(void);
2075ce831b38SFrederic Weisbecker #else
2076ce831b38SFrederic Weisbecker static inline bool sched_can_stop_tick(void) { return false; }
2077bf0f6f24SIngo Molnar #endif
2078bf0f6f24SIngo Molnar 
20795091faa4SMike Galbraith #ifdef CONFIG_SCHED_AUTOGROUP
20805091faa4SMike Galbraith extern void sched_autogroup_create_attach(struct task_struct *p);
20815091faa4SMike Galbraith extern void sched_autogroup_detach(struct task_struct *p);
20825091faa4SMike Galbraith extern void sched_autogroup_fork(struct signal_struct *sig);
20835091faa4SMike Galbraith extern void sched_autogroup_exit(struct signal_struct *sig);
20845091faa4SMike Galbraith #ifdef CONFIG_PROC_FS
20855091faa4SMike Galbraith extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
20862e5b5b3aSHiroshi Shimamoto extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
20875091faa4SMike Galbraith #endif
20885091faa4SMike Galbraith #else
20895091faa4SMike Galbraith static inline void sched_autogroup_create_attach(struct task_struct *p) { }
20905091faa4SMike Galbraith static inline void sched_autogroup_detach(struct task_struct *p) { }
20915091faa4SMike Galbraith static inline void sched_autogroup_fork(struct signal_struct *sig) { }
20925091faa4SMike Galbraith static inline void sched_autogroup_exit(struct signal_struct *sig) { }
20935091faa4SMike Galbraith #endif
20945091faa4SMike Galbraith 
2095d95f4122SMike Galbraith extern bool yield_to(struct task_struct *p, bool preempt);
209636c8b586SIngo Molnar extern void set_user_nice(struct task_struct *p, long nice);
209736c8b586SIngo Molnar extern int task_prio(const struct task_struct *p);
2098d0ea0268SDongsheng Yang /**
2099d0ea0268SDongsheng Yang  * task_nice - return the nice value of a given task.
2100d0ea0268SDongsheng Yang  * @p: the task in question.
2101d0ea0268SDongsheng Yang  *
2102d0ea0268SDongsheng Yang  * Return: The nice value [ -20 ... 0 ... 19 ].
2103d0ea0268SDongsheng Yang  */
2104d0ea0268SDongsheng Yang static inline int task_nice(const struct task_struct *p)
2105d0ea0268SDongsheng Yang {
2106d0ea0268SDongsheng Yang 	return PRIO_TO_NICE((p)->static_prio);
2107d0ea0268SDongsheng Yang }
210836c8b586SIngo Molnar extern int can_nice(const struct task_struct *p, const int nice);
210936c8b586SIngo Molnar extern int task_curr(const struct task_struct *p);
21101da177e4SLinus Torvalds extern int idle_cpu(int cpu);
2111fe7de49fSKOSAKI Motohiro extern int sched_setscheduler(struct task_struct *, int,
2112fe7de49fSKOSAKI Motohiro 			      const struct sched_param *);
2113961ccdddSRusty Russell extern int sched_setscheduler_nocheck(struct task_struct *, int,
2114fe7de49fSKOSAKI Motohiro 				      const struct sched_param *);
2115d50dde5aSDario Faggioli extern int sched_setattr(struct task_struct *,
2116d50dde5aSDario Faggioli 			 const struct sched_attr *);
211736c8b586SIngo Molnar extern struct task_struct *idle_task(int cpu);
2118c4f30608SPaul E. McKenney /**
2119c4f30608SPaul E. McKenney  * is_idle_task - is the specified task an idle task?
2120fa757281SRandy Dunlap  * @p: the task in question.
2121e69f6186SYacine Belkadi  *
2122e69f6186SYacine Belkadi  * Return: 1 if @p is an idle task. 0 otherwise.
2123c4f30608SPaul E. McKenney  */
21247061ca3bSPaul E. McKenney static inline bool is_idle_task(const struct task_struct *p)
2125c4f30608SPaul E. McKenney {
2126c4f30608SPaul E. McKenney 	return p->pid == 0;
2127c4f30608SPaul E. McKenney }
212836c8b586SIngo Molnar extern struct task_struct *curr_task(int cpu);
212936c8b586SIngo Molnar extern void set_curr_task(int cpu, struct task_struct *p);
21301da177e4SLinus Torvalds 
21311da177e4SLinus Torvalds void yield(void);
21321da177e4SLinus Torvalds 
21331da177e4SLinus Torvalds /*
21341da177e4SLinus Torvalds  * The default (Linux) execution domain.
21351da177e4SLinus Torvalds  */
21361da177e4SLinus Torvalds extern struct exec_domain	default_exec_domain;
21371da177e4SLinus Torvalds 
21381da177e4SLinus Torvalds union thread_union {
21391da177e4SLinus Torvalds 	struct thread_info thread_info;
21401da177e4SLinus Torvalds 	unsigned long stack[THREAD_SIZE/sizeof(long)];
21411da177e4SLinus Torvalds };
21421da177e4SLinus Torvalds 
21431da177e4SLinus Torvalds #ifndef __HAVE_ARCH_KSTACK_END
21441da177e4SLinus Torvalds static inline int kstack_end(void *addr)
21451da177e4SLinus Torvalds {
21461da177e4SLinus Torvalds 	/* Reliable end of stack detection:
21471da177e4SLinus Torvalds 	 * Some APM bios versions misalign the stack
21481da177e4SLinus Torvalds 	 */
21491da177e4SLinus Torvalds 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
21501da177e4SLinus Torvalds }
21511da177e4SLinus Torvalds #endif
21521da177e4SLinus Torvalds 
21531da177e4SLinus Torvalds extern union thread_union init_thread_union;
21541da177e4SLinus Torvalds extern struct task_struct init_task;
21551da177e4SLinus Torvalds 
21561da177e4SLinus Torvalds extern struct   mm_struct init_mm;
21571da177e4SLinus Torvalds 
2158198fe21bSPavel Emelyanov extern struct pid_namespace init_pid_ns;
2159198fe21bSPavel Emelyanov 
2160198fe21bSPavel Emelyanov /*
2161198fe21bSPavel Emelyanov  * find a task by one of its numerical ids
2162198fe21bSPavel Emelyanov  *
2163198fe21bSPavel Emelyanov  * find_task_by_pid_ns():
2164198fe21bSPavel Emelyanov  *      finds a task by its pid in the specified namespace
2165228ebcbeSPavel Emelyanov  * find_task_by_vpid():
2166228ebcbeSPavel Emelyanov  *      finds a task by its virtual pid
2167198fe21bSPavel Emelyanov  *
2168e49859e7SPavel Emelyanov  * see also find_vpid() etc in include/linux/pid.h
2169198fe21bSPavel Emelyanov  */
2170198fe21bSPavel Emelyanov 
2171228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_vpid(pid_t nr);
2172228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2173228ebcbeSPavel Emelyanov 		struct pid_namespace *ns);
2174198fe21bSPavel Emelyanov 
21751da177e4SLinus Torvalds /* per-UID process charging. */
21767b44ab97SEric W. Biederman extern struct user_struct * alloc_uid(kuid_t);
21771da177e4SLinus Torvalds static inline struct user_struct *get_uid(struct user_struct *u)
21781da177e4SLinus Torvalds {
21791da177e4SLinus Torvalds 	atomic_inc(&u->__count);
21801da177e4SLinus Torvalds 	return u;
21811da177e4SLinus Torvalds }
21821da177e4SLinus Torvalds extern void free_uid(struct user_struct *);
21831da177e4SLinus Torvalds 
21841da177e4SLinus Torvalds #include <asm/current.h>
21851da177e4SLinus Torvalds 
2186f0af911aSTorben Hohn extern void xtime_update(unsigned long ticks);
21871da177e4SLinus Torvalds 
2188b3c97528SHarvey Harrison extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2189b3c97528SHarvey Harrison extern int wake_up_process(struct task_struct *tsk);
21903e51e3edSSamir Bellabes extern void wake_up_new_task(struct task_struct *tsk);
21911da177e4SLinus Torvalds #ifdef CONFIG_SMP
21921da177e4SLinus Torvalds  extern void kick_process(struct task_struct *tsk);
21931da177e4SLinus Torvalds #else
21941da177e4SLinus Torvalds  static inline void kick_process(struct task_struct *tsk) { }
21951da177e4SLinus Torvalds #endif
2196aab03e05SDario Faggioli extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2197ad46c2c4SIngo Molnar extern void sched_dead(struct task_struct *p);
21981da177e4SLinus Torvalds 
21991da177e4SLinus Torvalds extern void proc_caches_init(void);
22001da177e4SLinus Torvalds extern void flush_signals(struct task_struct *);
22013bcac026SDavid Howells extern void __flush_signals(struct task_struct *);
220210ab825bSOleg Nesterov extern void ignore_signals(struct task_struct *);
22031da177e4SLinus Torvalds extern void flush_signal_handlers(struct task_struct *, int force_default);
22041da177e4SLinus Torvalds extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
22051da177e4SLinus Torvalds 
22061da177e4SLinus Torvalds static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
22071da177e4SLinus Torvalds {
22081da177e4SLinus Torvalds 	unsigned long flags;
22091da177e4SLinus Torvalds 	int ret;
22101da177e4SLinus Torvalds 
22111da177e4SLinus Torvalds 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
22121da177e4SLinus Torvalds 	ret = dequeue_signal(tsk, mask, info);
22131da177e4SLinus Torvalds 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
22141da177e4SLinus Torvalds 
22151da177e4SLinus Torvalds 	return ret;
22161da177e4SLinus Torvalds }
22171da177e4SLinus Torvalds 
22181da177e4SLinus Torvalds extern void block_all_signals(int (*notifier)(void *priv), void *priv,
22191da177e4SLinus Torvalds 			      sigset_t *mask);
22201da177e4SLinus Torvalds extern void unblock_all_signals(void);
22211da177e4SLinus Torvalds extern void release_task(struct task_struct * p);
22221da177e4SLinus Torvalds extern int send_sig_info(int, struct siginfo *, struct task_struct *);
22231da177e4SLinus Torvalds extern int force_sigsegv(int, struct task_struct *);
22241da177e4SLinus Torvalds extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2225c4b92fc1SEric W. Biederman extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2226c4b92fc1SEric W. Biederman extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2227d178bc3aSSerge Hallyn extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2228d178bc3aSSerge Hallyn 				const struct cred *, u32);
2229c4b92fc1SEric W. Biederman extern int kill_pgrp(struct pid *pid, int sig, int priv);
2230c4b92fc1SEric W. Biederman extern int kill_pid(struct pid *pid, int sig, int priv);
2231c3de4b38SMatthew Wilcox extern int kill_proc_info(int, struct siginfo *, pid_t);
223286773473SOleg Nesterov extern __must_check bool do_notify_parent(struct task_struct *, int);
2233a7f0765eSOleg Nesterov extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
22341da177e4SLinus Torvalds extern void force_sig(int, struct task_struct *);
22351da177e4SLinus Torvalds extern int send_sig(int, struct task_struct *, int);
223609faef11SOleg Nesterov extern int zap_other_threads(struct task_struct *p);
22371da177e4SLinus Torvalds extern struct sigqueue *sigqueue_alloc(void);
22381da177e4SLinus Torvalds extern void sigqueue_free(struct sigqueue *);
2239ac5c2153SOleg Nesterov extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
22409ac95f2fSOleg Nesterov extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
22411da177e4SLinus Torvalds 
224251a7b448SAl Viro static inline void restore_saved_sigmask(void)
224351a7b448SAl Viro {
224451a7b448SAl Viro 	if (test_and_clear_restore_sigmask())
224577097ae5SAl Viro 		__set_current_blocked(&current->saved_sigmask);
224651a7b448SAl Viro }
224751a7b448SAl Viro 
2248b7f9a11aSAl Viro static inline sigset_t *sigmask_to_save(void)
2249b7f9a11aSAl Viro {
2250b7f9a11aSAl Viro 	sigset_t *res = &current->blocked;
2251b7f9a11aSAl Viro 	if (unlikely(test_restore_sigmask()))
2252b7f9a11aSAl Viro 		res = &current->saved_sigmask;
2253b7f9a11aSAl Viro 	return res;
2254b7f9a11aSAl Viro }
2255b7f9a11aSAl Viro 
22569ec52099SCedric Le Goater static inline int kill_cad_pid(int sig, int priv)
22579ec52099SCedric Le Goater {
22589ec52099SCedric Le Goater 	return kill_pid(cad_pid, sig, priv);
22599ec52099SCedric Le Goater }
22609ec52099SCedric Le Goater 
22611da177e4SLinus Torvalds /* These can be the second arg to send_sig_info/send_group_sig_info.  */
22621da177e4SLinus Torvalds #define SEND_SIG_NOINFO ((struct siginfo *) 0)
22631da177e4SLinus Torvalds #define SEND_SIG_PRIV	((struct siginfo *) 1)
22641da177e4SLinus Torvalds #define SEND_SIG_FORCED	((struct siginfo *) 2)
22651da177e4SLinus Torvalds 
22662a855dd0SSebastian Andrzej Siewior /*
22672a855dd0SSebastian Andrzej Siewior  * True if we are on the alternate signal stack.
22682a855dd0SSebastian Andrzej Siewior  */
22691da177e4SLinus Torvalds static inline int on_sig_stack(unsigned long sp)
22701da177e4SLinus Torvalds {
22712a855dd0SSebastian Andrzej Siewior #ifdef CONFIG_STACK_GROWSUP
22722a855dd0SSebastian Andrzej Siewior 	return sp >= current->sas_ss_sp &&
22732a855dd0SSebastian Andrzej Siewior 		sp - current->sas_ss_sp < current->sas_ss_size;
22742a855dd0SSebastian Andrzej Siewior #else
22752a855dd0SSebastian Andrzej Siewior 	return sp > current->sas_ss_sp &&
22762a855dd0SSebastian Andrzej Siewior 		sp - current->sas_ss_sp <= current->sas_ss_size;
22772a855dd0SSebastian Andrzej Siewior #endif
22781da177e4SLinus Torvalds }
22791da177e4SLinus Torvalds 
22801da177e4SLinus Torvalds static inline int sas_ss_flags(unsigned long sp)
22811da177e4SLinus Torvalds {
22821da177e4SLinus Torvalds 	return (current->sas_ss_size == 0 ? SS_DISABLE
22831da177e4SLinus Torvalds 		: on_sig_stack(sp) ? SS_ONSTACK : 0);
22841da177e4SLinus Torvalds }
22851da177e4SLinus Torvalds 
22865a1b98d3SAl Viro static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
22875a1b98d3SAl Viro {
22885a1b98d3SAl Viro 	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
22895a1b98d3SAl Viro #ifdef CONFIG_STACK_GROWSUP
22905a1b98d3SAl Viro 		return current->sas_ss_sp;
22915a1b98d3SAl Viro #else
22925a1b98d3SAl Viro 		return current->sas_ss_sp + current->sas_ss_size;
22935a1b98d3SAl Viro #endif
22945a1b98d3SAl Viro 	return sp;
22955a1b98d3SAl Viro }
22965a1b98d3SAl Viro 
22971da177e4SLinus Torvalds /*
22981da177e4SLinus Torvalds  * Routines for handling mm_structs
22991da177e4SLinus Torvalds  */
23001da177e4SLinus Torvalds extern struct mm_struct * mm_alloc(void);
23011da177e4SLinus Torvalds 
23021da177e4SLinus Torvalds /* mmdrop drops the mm and the page tables */
2303b3c97528SHarvey Harrison extern void __mmdrop(struct mm_struct *);
23041da177e4SLinus Torvalds static inline void mmdrop(struct mm_struct * mm)
23051da177e4SLinus Torvalds {
23066fb43d7bSIngo Molnar 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
23071da177e4SLinus Torvalds 		__mmdrop(mm);
23081da177e4SLinus Torvalds }
23091da177e4SLinus Torvalds 
23101da177e4SLinus Torvalds /* mmput gets rid of the mappings and all user-space */
23111da177e4SLinus Torvalds extern void mmput(struct mm_struct *);
23121da177e4SLinus Torvalds /* Grab a reference to a task's mm, if it is not already going away */
23131da177e4SLinus Torvalds extern struct mm_struct *get_task_mm(struct task_struct *task);
23148cdb878dSChristopher Yeoh /*
23158cdb878dSChristopher Yeoh  * Grab a reference to a task's mm, if it is not already going away
23168cdb878dSChristopher Yeoh  * and ptrace_may_access with the mode parameter passed to it
23178cdb878dSChristopher Yeoh  * succeeds.
23188cdb878dSChristopher Yeoh  */
23198cdb878dSChristopher Yeoh extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
23201da177e4SLinus Torvalds /* Remove the current tasks stale references to the old mm_struct */
23211da177e4SLinus Torvalds extern void mm_release(struct task_struct *, struct mm_struct *);
23221da177e4SLinus Torvalds 
23236f2c55b8SAlexey Dobriyan extern int copy_thread(unsigned long, unsigned long, unsigned long,
2324afa86fc4SAl Viro 			struct task_struct *);
23251da177e4SLinus Torvalds extern void flush_thread(void);
23261da177e4SLinus Torvalds extern void exit_thread(void);
23271da177e4SLinus Torvalds 
23281da177e4SLinus Torvalds extern void exit_files(struct task_struct *);
2329a7e5328aSOleg Nesterov extern void __cleanup_sighand(struct sighand_struct *);
2330cbaffba1SOleg Nesterov 
23311da177e4SLinus Torvalds extern void exit_itimers(struct signal_struct *);
2332cbaffba1SOleg Nesterov extern void flush_itimer_signals(void);
23331da177e4SLinus Torvalds 
23349402c95fSJoe Perches extern void do_group_exit(int);
23351da177e4SLinus Torvalds 
23361da177e4SLinus Torvalds extern int allow_signal(int);
23371da177e4SLinus Torvalds extern int disallow_signal(int);
23381da177e4SLinus Torvalds 
2339d7627467SDavid Howells extern int do_execve(const char *,
2340d7627467SDavid Howells 		     const char __user * const __user *,
2341da3d4c5fSAl Viro 		     const char __user * const __user *);
2342e80d6661SAl Viro extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
234336c8b586SIngo Molnar struct task_struct *fork_idle(int);
23442aa3a7f8SAl Viro extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
23451da177e4SLinus Torvalds 
23461da177e4SLinus Torvalds extern void set_task_comm(struct task_struct *tsk, char *from);
234759714d65SAndrew Morton extern char *get_task_comm(char *to, struct task_struct *tsk);
23481da177e4SLinus Torvalds 
23491da177e4SLinus Torvalds #ifdef CONFIG_SMP
2350317f3941SPeter Zijlstra void scheduler_ipi(void);
235185ba2d86SRoland McGrath extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
23521da177e4SLinus Torvalds #else
2353184748ccSPeter Zijlstra static inline void scheduler_ipi(void) { }
235485ba2d86SRoland McGrath static inline unsigned long wait_task_inactive(struct task_struct *p,
235585ba2d86SRoland McGrath 					       long match_state)
235685ba2d86SRoland McGrath {
235785ba2d86SRoland McGrath 	return 1;
235885ba2d86SRoland McGrath }
23591da177e4SLinus Torvalds #endif
23601da177e4SLinus Torvalds 
236105725f7eSJiri Pirko #define next_task(p) \
236205725f7eSJiri Pirko 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
23631da177e4SLinus Torvalds 
23641da177e4SLinus Torvalds #define for_each_process(p) \
23651da177e4SLinus Torvalds 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
23661da177e4SLinus Torvalds 
23675bb459bbSOleg Nesterov extern bool current_is_single_threaded(void);
2368d84f4f99SDavid Howells 
23691da177e4SLinus Torvalds /*
23701da177e4SLinus Torvalds  * Careful: do_each_thread/while_each_thread is a double loop so
23711da177e4SLinus Torvalds  *          'break' will not work as expected - use goto instead.
23721da177e4SLinus Torvalds  */
23731da177e4SLinus Torvalds #define do_each_thread(g, t) \
23741da177e4SLinus Torvalds 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
23751da177e4SLinus Torvalds 
23761da177e4SLinus Torvalds #define while_each_thread(g, t) \
23771da177e4SLinus Torvalds 	while ((t = next_thread(t)) != g)
23781da177e4SLinus Torvalds 
23790c740d0aSOleg Nesterov #define __for_each_thread(signal, t)	\
23800c740d0aSOleg Nesterov 	list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
23810c740d0aSOleg Nesterov 
23820c740d0aSOleg Nesterov #define for_each_thread(p, t)		\
23830c740d0aSOleg Nesterov 	__for_each_thread((p)->signal, t)
23840c740d0aSOleg Nesterov 
23850c740d0aSOleg Nesterov /* Careful: this is a double loop, 'break' won't work as expected. */
23860c740d0aSOleg Nesterov #define for_each_process_thread(p, t)	\
23870c740d0aSOleg Nesterov 	for_each_process(p) for_each_thread(p, t)
23880c740d0aSOleg Nesterov 
23897e49827cSOleg Nesterov static inline int get_nr_threads(struct task_struct *tsk)
23907e49827cSOleg Nesterov {
2391b3ac022cSOleg Nesterov 	return tsk->signal->nr_threads;
23927e49827cSOleg Nesterov }
23937e49827cSOleg Nesterov 
2394087806b1SOleg Nesterov static inline bool thread_group_leader(struct task_struct *p)
2395087806b1SOleg Nesterov {
2396087806b1SOleg Nesterov 	return p->exit_signal >= 0;
2397087806b1SOleg Nesterov }
23981da177e4SLinus Torvalds 
23990804ef4bSEric W. Biederman /* Do to the insanities of de_thread it is possible for a process
24000804ef4bSEric W. Biederman  * to have the pid of the thread group leader without actually being
24010804ef4bSEric W. Biederman  * the thread group leader.  For iteration through the pids in proc
24020804ef4bSEric W. Biederman  * all we care about is that we have a task with the appropriate
24030804ef4bSEric W. Biederman  * pid, we don't actually care if we have the right task.
24040804ef4bSEric W. Biederman  */
2405e1403b8eSOleg Nesterov static inline bool has_group_leader_pid(struct task_struct *p)
24060804ef4bSEric W. Biederman {
2407e1403b8eSOleg Nesterov 	return task_pid(p) == p->signal->leader_pid;
24080804ef4bSEric W. Biederman }
24090804ef4bSEric W. Biederman 
2410bac0abd6SPavel Emelyanov static inline
2411e1403b8eSOleg Nesterov bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2412bac0abd6SPavel Emelyanov {
2413e1403b8eSOleg Nesterov 	return p1->signal == p2->signal;
2414bac0abd6SPavel Emelyanov }
2415bac0abd6SPavel Emelyanov 
241636c8b586SIngo Molnar static inline struct task_struct *next_thread(const struct task_struct *p)
241747e65328SOleg Nesterov {
241805725f7eSJiri Pirko 	return list_entry_rcu(p->thread_group.next,
241936c8b586SIngo Molnar 			      struct task_struct, thread_group);
242047e65328SOleg Nesterov }
242147e65328SOleg Nesterov 
2422e868171aSAlexey Dobriyan static inline int thread_group_empty(struct task_struct *p)
24231da177e4SLinus Torvalds {
242447e65328SOleg Nesterov 	return list_empty(&p->thread_group);
24251da177e4SLinus Torvalds }
24261da177e4SLinus Torvalds 
24271da177e4SLinus Torvalds #define delay_group_leader(p) \
24281da177e4SLinus Torvalds 		(thread_group_leader(p) && !thread_group_empty(p))
24291da177e4SLinus Torvalds 
24301da177e4SLinus Torvalds /*
2431260ea101SEric W. Biederman  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
243222e2c507SJens Axboe  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2433ddbcc7e8SPaul Menage  * pins the final release of task.io_context.  Also protects ->cpuset and
2434d68b46feSOleg Nesterov  * ->cgroup.subsys[]. And ->vfork_done.
24351da177e4SLinus Torvalds  *
24361da177e4SLinus Torvalds  * Nests both inside and outside of read_lock(&tasklist_lock).
24371da177e4SLinus Torvalds  * It must not be nested with write_lock_irq(&tasklist_lock),
24381da177e4SLinus Torvalds  * neither inside nor outside.
24391da177e4SLinus Torvalds  */
24401da177e4SLinus Torvalds static inline void task_lock(struct task_struct *p)
24411da177e4SLinus Torvalds {
24421da177e4SLinus Torvalds 	spin_lock(&p->alloc_lock);
24431da177e4SLinus Torvalds }
24441da177e4SLinus Torvalds 
24451da177e4SLinus Torvalds static inline void task_unlock(struct task_struct *p)
24461da177e4SLinus Torvalds {
24471da177e4SLinus Torvalds 	spin_unlock(&p->alloc_lock);
24481da177e4SLinus Torvalds }
24491da177e4SLinus Torvalds 
2450b8ed374eSNamhyung Kim extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2451f63ee72eSOleg Nesterov 							unsigned long *flags);
2452f63ee72eSOleg Nesterov 
24539388dc30SAnton Vorontsov static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
24549388dc30SAnton Vorontsov 						       unsigned long *flags)
24559388dc30SAnton Vorontsov {
24569388dc30SAnton Vorontsov 	struct sighand_struct *ret;
24579388dc30SAnton Vorontsov 
24589388dc30SAnton Vorontsov 	ret = __lock_task_sighand(tsk, flags);
24599388dc30SAnton Vorontsov 	(void)__cond_lock(&tsk->sighand->siglock, ret);
24609388dc30SAnton Vorontsov 	return ret;
24619388dc30SAnton Vorontsov }
2462b8ed374eSNamhyung Kim 
2463f63ee72eSOleg Nesterov static inline void unlock_task_sighand(struct task_struct *tsk,
2464f63ee72eSOleg Nesterov 						unsigned long *flags)
2465f63ee72eSOleg Nesterov {
2466f63ee72eSOleg Nesterov 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2467f63ee72eSOleg Nesterov }
2468f63ee72eSOleg Nesterov 
24694714d1d3SBen Blum #ifdef CONFIG_CGROUPS
2470257058aeSTejun Heo static inline void threadgroup_change_begin(struct task_struct *tsk)
24714714d1d3SBen Blum {
2472257058aeSTejun Heo 	down_read(&tsk->signal->group_rwsem);
24734714d1d3SBen Blum }
2474257058aeSTejun Heo static inline void threadgroup_change_end(struct task_struct *tsk)
24754714d1d3SBen Blum {
2476257058aeSTejun Heo 	up_read(&tsk->signal->group_rwsem);
24774714d1d3SBen Blum }
247877e4ef99STejun Heo 
247977e4ef99STejun Heo /**
248077e4ef99STejun Heo  * threadgroup_lock - lock threadgroup
248177e4ef99STejun Heo  * @tsk: member task of the threadgroup to lock
248277e4ef99STejun Heo  *
248377e4ef99STejun Heo  * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
248477e4ef99STejun Heo  * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2485e56fb287SOleg Nesterov  * change ->group_leader/pid.  This is useful for cases where the threadgroup
2486e56fb287SOleg Nesterov  * needs to stay stable across blockable operations.
248777e4ef99STejun Heo  *
248877e4ef99STejun Heo  * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
248977e4ef99STejun Heo  * synchronization.  While held, no new task will be added to threadgroup
249077e4ef99STejun Heo  * and no existing live task will have its PF_EXITING set.
249177e4ef99STejun Heo  *
2492e56fb287SOleg Nesterov  * de_thread() does threadgroup_change_{begin|end}() when a non-leader
2493e56fb287SOleg Nesterov  * sub-thread becomes a new leader.
249477e4ef99STejun Heo  */
2495257058aeSTejun Heo static inline void threadgroup_lock(struct task_struct *tsk)
24964714d1d3SBen Blum {
2497257058aeSTejun Heo 	down_write(&tsk->signal->group_rwsem);
24984714d1d3SBen Blum }
249977e4ef99STejun Heo 
250077e4ef99STejun Heo /**
250177e4ef99STejun Heo  * threadgroup_unlock - unlock threadgroup
250277e4ef99STejun Heo  * @tsk: member task of the threadgroup to unlock
250377e4ef99STejun Heo  *
250477e4ef99STejun Heo  * Reverse threadgroup_lock().
250577e4ef99STejun Heo  */
2506257058aeSTejun Heo static inline void threadgroup_unlock(struct task_struct *tsk)
25074714d1d3SBen Blum {
2508257058aeSTejun Heo 	up_write(&tsk->signal->group_rwsem);
25094714d1d3SBen Blum }
25104714d1d3SBen Blum #else
2511257058aeSTejun Heo static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2512257058aeSTejun Heo static inline void threadgroup_change_end(struct task_struct *tsk) {}
2513257058aeSTejun Heo static inline void threadgroup_lock(struct task_struct *tsk) {}
2514257058aeSTejun Heo static inline void threadgroup_unlock(struct task_struct *tsk) {}
25154714d1d3SBen Blum #endif
25164714d1d3SBen Blum 
2517f037360fSAl Viro #ifndef __HAVE_THREAD_FUNCTIONS
2518f037360fSAl Viro 
2519f7e4217bSRoman Zippel #define task_thread_info(task)	((struct thread_info *)(task)->stack)
2520f7e4217bSRoman Zippel #define task_stack_page(task)	((task)->stack)
2521a1261f54SAl Viro 
252210ebffdeSAl Viro static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
252310ebffdeSAl Viro {
252410ebffdeSAl Viro 	*task_thread_info(p) = *task_thread_info(org);
252510ebffdeSAl Viro 	task_thread_info(p)->task = p;
252610ebffdeSAl Viro }
252710ebffdeSAl Viro 
252810ebffdeSAl Viro static inline unsigned long *end_of_stack(struct task_struct *p)
252910ebffdeSAl Viro {
2530f7e4217bSRoman Zippel 	return (unsigned long *)(task_thread_info(p) + 1);
253110ebffdeSAl Viro }
253210ebffdeSAl Viro 
2533f037360fSAl Viro #endif
2534f037360fSAl Viro 
25358b05c7e6SFUJITA Tomonori static inline int object_is_on_stack(void *obj)
25368b05c7e6SFUJITA Tomonori {
25378b05c7e6SFUJITA Tomonori 	void *stack = task_stack_page(current);
25388b05c7e6SFUJITA Tomonori 
25398b05c7e6SFUJITA Tomonori 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
25408b05c7e6SFUJITA Tomonori }
25418b05c7e6SFUJITA Tomonori 
25428c9843e5SBenjamin Herrenschmidt extern void thread_info_cache_init(void);
25438c9843e5SBenjamin Herrenschmidt 
25447c9f8861SEric Sandeen #ifdef CONFIG_DEBUG_STACK_USAGE
25457c9f8861SEric Sandeen static inline unsigned long stack_not_used(struct task_struct *p)
25467c9f8861SEric Sandeen {
25477c9f8861SEric Sandeen 	unsigned long *n = end_of_stack(p);
25487c9f8861SEric Sandeen 
25497c9f8861SEric Sandeen 	do { 	/* Skip over canary */
25507c9f8861SEric Sandeen 		n++;
25517c9f8861SEric Sandeen 	} while (!*n);
25527c9f8861SEric Sandeen 
25537c9f8861SEric Sandeen 	return (unsigned long)n - (unsigned long)end_of_stack(p);
25547c9f8861SEric Sandeen }
25557c9f8861SEric Sandeen #endif
25567c9f8861SEric Sandeen 
25571da177e4SLinus Torvalds /* set thread flags in other task's structures
25581da177e4SLinus Torvalds  * - see asm/thread_info.h for TIF_xxxx flags available
25591da177e4SLinus Torvalds  */
25601da177e4SLinus Torvalds static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
25611da177e4SLinus Torvalds {
2562a1261f54SAl Viro 	set_ti_thread_flag(task_thread_info(tsk), flag);
25631da177e4SLinus Torvalds }
25641da177e4SLinus Torvalds 
25651da177e4SLinus Torvalds static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
25661da177e4SLinus Torvalds {
2567a1261f54SAl Viro 	clear_ti_thread_flag(task_thread_info(tsk), flag);
25681da177e4SLinus Torvalds }
25691da177e4SLinus Torvalds 
25701da177e4SLinus Torvalds static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
25711da177e4SLinus Torvalds {
2572a1261f54SAl Viro 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
25731da177e4SLinus Torvalds }
25741da177e4SLinus Torvalds 
25751da177e4SLinus Torvalds static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
25761da177e4SLinus Torvalds {
2577a1261f54SAl Viro 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
25781da177e4SLinus Torvalds }
25791da177e4SLinus Torvalds 
25801da177e4SLinus Torvalds static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
25811da177e4SLinus Torvalds {
2582a1261f54SAl Viro 	return test_ti_thread_flag(task_thread_info(tsk), flag);
25831da177e4SLinus Torvalds }
25841da177e4SLinus Torvalds 
25851da177e4SLinus Torvalds static inline void set_tsk_need_resched(struct task_struct *tsk)
25861da177e4SLinus Torvalds {
25871da177e4SLinus Torvalds 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
25881da177e4SLinus Torvalds }
25891da177e4SLinus Torvalds 
25901da177e4SLinus Torvalds static inline void clear_tsk_need_resched(struct task_struct *tsk)
25911da177e4SLinus Torvalds {
25921da177e4SLinus Torvalds 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
25931da177e4SLinus Torvalds }
25941da177e4SLinus Torvalds 
25958ae121acSGregory Haskins static inline int test_tsk_need_resched(struct task_struct *tsk)
25968ae121acSGregory Haskins {
25978ae121acSGregory Haskins 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
25988ae121acSGregory Haskins }
25998ae121acSGregory Haskins 
2600690cc3ffSEric W. Biederman static inline int restart_syscall(void)
2601690cc3ffSEric W. Biederman {
2602690cc3ffSEric W. Biederman 	set_tsk_thread_flag(current, TIF_SIGPENDING);
2603690cc3ffSEric W. Biederman 	return -ERESTARTNOINTR;
2604690cc3ffSEric W. Biederman }
2605690cc3ffSEric W. Biederman 
26061da177e4SLinus Torvalds static inline int signal_pending(struct task_struct *p)
26071da177e4SLinus Torvalds {
26081da177e4SLinus Torvalds 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
26091da177e4SLinus Torvalds }
26101da177e4SLinus Torvalds 
2611d9588725SRoland McGrath static inline int __fatal_signal_pending(struct task_struct *p)
2612d9588725SRoland McGrath {
2613d9588725SRoland McGrath 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2614d9588725SRoland McGrath }
2615f776d12dSMatthew Wilcox 
2616f776d12dSMatthew Wilcox static inline int fatal_signal_pending(struct task_struct *p)
2617f776d12dSMatthew Wilcox {
2618f776d12dSMatthew Wilcox 	return signal_pending(p) && __fatal_signal_pending(p);
2619f776d12dSMatthew Wilcox }
2620f776d12dSMatthew Wilcox 
262116882c1eSOleg Nesterov static inline int signal_pending_state(long state, struct task_struct *p)
262216882c1eSOleg Nesterov {
262316882c1eSOleg Nesterov 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
262416882c1eSOleg Nesterov 		return 0;
262516882c1eSOleg Nesterov 	if (!signal_pending(p))
262616882c1eSOleg Nesterov 		return 0;
262716882c1eSOleg Nesterov 
262816882c1eSOleg Nesterov 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
262916882c1eSOleg Nesterov }
263016882c1eSOleg Nesterov 
26311da177e4SLinus Torvalds /*
26321da177e4SLinus Torvalds  * cond_resched() and cond_resched_lock(): latency reduction via
26331da177e4SLinus Torvalds  * explicit rescheduling in places that are safe. The return
26341da177e4SLinus Torvalds  * value indicates whether a reschedule was done in fact.
26351da177e4SLinus Torvalds  * cond_resched_lock() will drop the spinlock before scheduling,
26361da177e4SLinus Torvalds  * cond_resched_softirq() will enable bhs before scheduling.
26371da177e4SLinus Torvalds  */
2638c3921ab7SLinus Torvalds extern int _cond_resched(void);
26396f80bd98SFrederic Weisbecker 
2640613afbf8SFrederic Weisbecker #define cond_resched() ({			\
2641613afbf8SFrederic Weisbecker 	__might_sleep(__FILE__, __LINE__, 0);	\
2642613afbf8SFrederic Weisbecker 	_cond_resched();			\
2643613afbf8SFrederic Weisbecker })
26446f80bd98SFrederic Weisbecker 
2645613afbf8SFrederic Weisbecker extern int __cond_resched_lock(spinlock_t *lock);
2646613afbf8SFrederic Weisbecker 
2647bdd4e85dSFrederic Weisbecker #ifdef CONFIG_PREEMPT_COUNT
2648716a4234SFrederic Weisbecker #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
264902b67cc3SHerbert Xu #else
2650716a4234SFrederic Weisbecker #define PREEMPT_LOCK_OFFSET	0
265102b67cc3SHerbert Xu #endif
2652716a4234SFrederic Weisbecker 
2653613afbf8SFrederic Weisbecker #define cond_resched_lock(lock) ({				\
2654716a4234SFrederic Weisbecker 	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2655613afbf8SFrederic Weisbecker 	__cond_resched_lock(lock);				\
2656613afbf8SFrederic Weisbecker })
2657613afbf8SFrederic Weisbecker 
2658613afbf8SFrederic Weisbecker extern int __cond_resched_softirq(void);
2659613afbf8SFrederic Weisbecker 
2660613afbf8SFrederic Weisbecker #define cond_resched_softirq() ({					\
266175e1056fSVenkatesh Pallipadi 	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2662613afbf8SFrederic Weisbecker 	__cond_resched_softirq();					\
2663613afbf8SFrederic Weisbecker })
26641da177e4SLinus Torvalds 
2665f6f3c437SSimon Horman static inline void cond_resched_rcu(void)
2666f6f3c437SSimon Horman {
2667f6f3c437SSimon Horman #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2668f6f3c437SSimon Horman 	rcu_read_unlock();
2669f6f3c437SSimon Horman 	cond_resched();
2670f6f3c437SSimon Horman 	rcu_read_lock();
2671f6f3c437SSimon Horman #endif
2672f6f3c437SSimon Horman }
2673f6f3c437SSimon Horman 
26741da177e4SLinus Torvalds /*
26751da177e4SLinus Torvalds  * Does a critical section need to be broken due to another
267695c354feSNick Piggin  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
267795c354feSNick Piggin  * but a general need for low latency)
26781da177e4SLinus Torvalds  */
267995c354feSNick Piggin static inline int spin_needbreak(spinlock_t *lock)
26801da177e4SLinus Torvalds {
268195c354feSNick Piggin #ifdef CONFIG_PREEMPT
268295c354feSNick Piggin 	return spin_is_contended(lock);
268395c354feSNick Piggin #else
26841da177e4SLinus Torvalds 	return 0;
268595c354feSNick Piggin #endif
26861da177e4SLinus Torvalds }
26871da177e4SLinus Torvalds 
26887bb44adeSRoland McGrath /*
2689ee761f62SThomas Gleixner  * Idle thread specific functions to determine the need_resched
2690ee761f62SThomas Gleixner  * polling state. We have two versions, one based on TS_POLLING in
2691ee761f62SThomas Gleixner  * thread_info.status and one based on TIF_POLLING_NRFLAG in
2692ee761f62SThomas Gleixner  * thread_info.flags
2693ee761f62SThomas Gleixner  */
2694ee761f62SThomas Gleixner #ifdef TS_POLLING
2695ee761f62SThomas Gleixner static inline int tsk_is_polling(struct task_struct *p)
2696ee761f62SThomas Gleixner {
2697ee761f62SThomas Gleixner 	return task_thread_info(p)->status & TS_POLLING;
2698ee761f62SThomas Gleixner }
2699ea811747SPeter Zijlstra static inline void __current_set_polling(void)
27003a98f871SThomas Gleixner {
27013a98f871SThomas Gleixner 	current_thread_info()->status |= TS_POLLING;
27023a98f871SThomas Gleixner }
27033a98f871SThomas Gleixner 
2704ea811747SPeter Zijlstra static inline bool __must_check current_set_polling_and_test(void)
2705ea811747SPeter Zijlstra {
2706ea811747SPeter Zijlstra 	__current_set_polling();
2707ea811747SPeter Zijlstra 
2708ea811747SPeter Zijlstra 	/*
2709ea811747SPeter Zijlstra 	 * Polling state must be visible before we test NEED_RESCHED,
2710ea811747SPeter Zijlstra 	 * paired by resched_task()
2711ea811747SPeter Zijlstra 	 */
2712ea811747SPeter Zijlstra 	smp_mb();
2713ea811747SPeter Zijlstra 
2714ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2715ea811747SPeter Zijlstra }
2716ea811747SPeter Zijlstra 
2717ea811747SPeter Zijlstra static inline void __current_clr_polling(void)
27183a98f871SThomas Gleixner {
27193a98f871SThomas Gleixner 	current_thread_info()->status &= ~TS_POLLING;
2720ea811747SPeter Zijlstra }
2721ea811747SPeter Zijlstra 
2722ea811747SPeter Zijlstra static inline bool __must_check current_clr_polling_and_test(void)
2723ea811747SPeter Zijlstra {
2724ea811747SPeter Zijlstra 	__current_clr_polling();
2725ea811747SPeter Zijlstra 
2726ea811747SPeter Zijlstra 	/*
2727ea811747SPeter Zijlstra 	 * Polling state must be visible before we test NEED_RESCHED,
2728ea811747SPeter Zijlstra 	 * paired by resched_task()
2729ea811747SPeter Zijlstra 	 */
2730ea811747SPeter Zijlstra 	smp_mb();
2731ea811747SPeter Zijlstra 
2732ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
27333a98f871SThomas Gleixner }
2734ee761f62SThomas Gleixner #elif defined(TIF_POLLING_NRFLAG)
2735ee761f62SThomas Gleixner static inline int tsk_is_polling(struct task_struct *p)
2736ee761f62SThomas Gleixner {
2737ee761f62SThomas Gleixner 	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2738ee761f62SThomas Gleixner }
2739ea811747SPeter Zijlstra 
2740ea811747SPeter Zijlstra static inline void __current_set_polling(void)
27413a98f871SThomas Gleixner {
27423a98f871SThomas Gleixner 	set_thread_flag(TIF_POLLING_NRFLAG);
27433a98f871SThomas Gleixner }
27443a98f871SThomas Gleixner 
2745ea811747SPeter Zijlstra static inline bool __must_check current_set_polling_and_test(void)
2746ea811747SPeter Zijlstra {
2747ea811747SPeter Zijlstra 	__current_set_polling();
2748ea811747SPeter Zijlstra 
2749ea811747SPeter Zijlstra 	/*
2750ea811747SPeter Zijlstra 	 * Polling state must be visible before we test NEED_RESCHED,
2751ea811747SPeter Zijlstra 	 * paired by resched_task()
2752ea811747SPeter Zijlstra 	 *
2753ea811747SPeter Zijlstra 	 * XXX: assumes set/clear bit are identical barrier wise.
2754ea811747SPeter Zijlstra 	 */
2755ea811747SPeter Zijlstra 	smp_mb__after_clear_bit();
2756ea811747SPeter Zijlstra 
2757ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2758ea811747SPeter Zijlstra }
2759ea811747SPeter Zijlstra 
2760ea811747SPeter Zijlstra static inline void __current_clr_polling(void)
27613a98f871SThomas Gleixner {
27623a98f871SThomas Gleixner 	clear_thread_flag(TIF_POLLING_NRFLAG);
27633a98f871SThomas Gleixner }
2764ea811747SPeter Zijlstra 
2765ea811747SPeter Zijlstra static inline bool __must_check current_clr_polling_and_test(void)
2766ea811747SPeter Zijlstra {
2767ea811747SPeter Zijlstra 	__current_clr_polling();
2768ea811747SPeter Zijlstra 
2769ea811747SPeter Zijlstra 	/*
2770ea811747SPeter Zijlstra 	 * Polling state must be visible before we test NEED_RESCHED,
2771ea811747SPeter Zijlstra 	 * paired by resched_task()
2772ea811747SPeter Zijlstra 	 */
2773ea811747SPeter Zijlstra 	smp_mb__after_clear_bit();
2774ea811747SPeter Zijlstra 
2775ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2776ea811747SPeter Zijlstra }
2777ea811747SPeter Zijlstra 
2778ee761f62SThomas Gleixner #else
2779ee761f62SThomas Gleixner static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2780ea811747SPeter Zijlstra static inline void __current_set_polling(void) { }
2781ea811747SPeter Zijlstra static inline void __current_clr_polling(void) { }
2782ea811747SPeter Zijlstra 
2783ea811747SPeter Zijlstra static inline bool __must_check current_set_polling_and_test(void)
2784ea811747SPeter Zijlstra {
2785ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2786ea811747SPeter Zijlstra }
2787ea811747SPeter Zijlstra static inline bool __must_check current_clr_polling_and_test(void)
2788ea811747SPeter Zijlstra {
2789ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2790ea811747SPeter Zijlstra }
2791ee761f62SThomas Gleixner #endif
2792ee761f62SThomas Gleixner 
27938cb75e0cSPeter Zijlstra static inline void current_clr_polling(void)
27948cb75e0cSPeter Zijlstra {
27958cb75e0cSPeter Zijlstra 	__current_clr_polling();
27968cb75e0cSPeter Zijlstra 
27978cb75e0cSPeter Zijlstra 	/*
27988cb75e0cSPeter Zijlstra 	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
27998cb75e0cSPeter Zijlstra 	 * Once the bit is cleared, we'll get IPIs with every new
28008cb75e0cSPeter Zijlstra 	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
28018cb75e0cSPeter Zijlstra 	 * fold.
28028cb75e0cSPeter Zijlstra 	 */
28038cb75e0cSPeter Zijlstra 	smp_mb(); /* paired with resched_task() */
28048cb75e0cSPeter Zijlstra 
28058cb75e0cSPeter Zijlstra 	preempt_fold_need_resched();
28068cb75e0cSPeter Zijlstra }
28078cb75e0cSPeter Zijlstra 
280875f93fedSPeter Zijlstra static __always_inline bool need_resched(void)
280975f93fedSPeter Zijlstra {
281075f93fedSPeter Zijlstra 	return unlikely(tif_need_resched());
281175f93fedSPeter Zijlstra }
281275f93fedSPeter Zijlstra 
2813ee761f62SThomas Gleixner /*
2814f06febc9SFrank Mayhar  * Thread group CPU time accounting.
2815f06febc9SFrank Mayhar  */
28164cd4c1b4SPeter Zijlstra void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
28174da94d49SPeter Zijlstra void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2818f06febc9SFrank Mayhar 
2819f06febc9SFrank Mayhar static inline void thread_group_cputime_init(struct signal_struct *sig)
2820f06febc9SFrank Mayhar {
2821ee30a7b2SThomas Gleixner 	raw_spin_lock_init(&sig->cputimer.lock);
2822f06febc9SFrank Mayhar }
2823f06febc9SFrank Mayhar 
2824f06febc9SFrank Mayhar /*
28257bb44adeSRoland McGrath  * Reevaluate whether the task has signals pending delivery.
28267bb44adeSRoland McGrath  * Wake the task if so.
28277bb44adeSRoland McGrath  * This is required every time the blocked sigset_t changes.
28287bb44adeSRoland McGrath  * callers must hold sighand->siglock.
28297bb44adeSRoland McGrath  */
28307bb44adeSRoland McGrath extern void recalc_sigpending_and_wake(struct task_struct *t);
28311da177e4SLinus Torvalds extern void recalc_sigpending(void);
28321da177e4SLinus Torvalds 
2833910ffdb1SOleg Nesterov extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2834910ffdb1SOleg Nesterov 
2835910ffdb1SOleg Nesterov static inline void signal_wake_up(struct task_struct *t, bool resume)
2836910ffdb1SOleg Nesterov {
2837910ffdb1SOleg Nesterov 	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2838910ffdb1SOleg Nesterov }
2839910ffdb1SOleg Nesterov static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2840910ffdb1SOleg Nesterov {
2841910ffdb1SOleg Nesterov 	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2842910ffdb1SOleg Nesterov }
28431da177e4SLinus Torvalds 
28441da177e4SLinus Torvalds /*
28451da177e4SLinus Torvalds  * Wrappers for p->thread_info->cpu access. No-op on UP.
28461da177e4SLinus Torvalds  */
28471da177e4SLinus Torvalds #ifdef CONFIG_SMP
28481da177e4SLinus Torvalds 
28491da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
28501da177e4SLinus Torvalds {
2851a1261f54SAl Viro 	return task_thread_info(p)->cpu;
28521da177e4SLinus Torvalds }
28531da177e4SLinus Torvalds 
2854b32e86b4SIngo Molnar static inline int task_node(const struct task_struct *p)
2855b32e86b4SIngo Molnar {
2856b32e86b4SIngo Molnar 	return cpu_to_node(task_cpu(p));
2857b32e86b4SIngo Molnar }
2858b32e86b4SIngo Molnar 
2859c65cc870SIngo Molnar extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
28601da177e4SLinus Torvalds 
28611da177e4SLinus Torvalds #else
28621da177e4SLinus Torvalds 
28631da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
28641da177e4SLinus Torvalds {
28651da177e4SLinus Torvalds 	return 0;
28661da177e4SLinus Torvalds }
28671da177e4SLinus Torvalds 
28681da177e4SLinus Torvalds static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
28691da177e4SLinus Torvalds {
28701da177e4SLinus Torvalds }
28711da177e4SLinus Torvalds 
28721da177e4SLinus Torvalds #endif /* CONFIG_SMP */
28731da177e4SLinus Torvalds 
287496f874e2SRusty Russell extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
287596f874e2SRusty Russell extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
28765c45bf27SSiddha, Suresh B 
28777c941438SDhaval Giani #ifdef CONFIG_CGROUP_SCHED
287807e06b01SYong Zhang extern struct task_group root_task_group;
28798323f26cSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */
28809b5b7751SSrivatsa Vaddagiri 
288154e99124SDhaval Giani extern int task_can_switch_user(struct user_struct *up,
288254e99124SDhaval Giani 					struct task_struct *tsk);
288354e99124SDhaval Giani 
28844b98d11bSAlexey Dobriyan #ifdef CONFIG_TASK_XACCT
28854b98d11bSAlexey Dobriyan static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
28864b98d11bSAlexey Dobriyan {
2887940389b8SAndrea Righi 	tsk->ioac.rchar += amt;
28884b98d11bSAlexey Dobriyan }
28894b98d11bSAlexey Dobriyan 
28904b98d11bSAlexey Dobriyan static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
28914b98d11bSAlexey Dobriyan {
2892940389b8SAndrea Righi 	tsk->ioac.wchar += amt;
28934b98d11bSAlexey Dobriyan }
28944b98d11bSAlexey Dobriyan 
28954b98d11bSAlexey Dobriyan static inline void inc_syscr(struct task_struct *tsk)
28964b98d11bSAlexey Dobriyan {
2897940389b8SAndrea Righi 	tsk->ioac.syscr++;
28984b98d11bSAlexey Dobriyan }
28994b98d11bSAlexey Dobriyan 
29004b98d11bSAlexey Dobriyan static inline void inc_syscw(struct task_struct *tsk)
29014b98d11bSAlexey Dobriyan {
2902940389b8SAndrea Righi 	tsk->ioac.syscw++;
29034b98d11bSAlexey Dobriyan }
29044b98d11bSAlexey Dobriyan #else
29054b98d11bSAlexey Dobriyan static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
29064b98d11bSAlexey Dobriyan {
29074b98d11bSAlexey Dobriyan }
29084b98d11bSAlexey Dobriyan 
29094b98d11bSAlexey Dobriyan static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
29104b98d11bSAlexey Dobriyan {
29114b98d11bSAlexey Dobriyan }
29124b98d11bSAlexey Dobriyan 
29134b98d11bSAlexey Dobriyan static inline void inc_syscr(struct task_struct *tsk)
29144b98d11bSAlexey Dobriyan {
29154b98d11bSAlexey Dobriyan }
29164b98d11bSAlexey Dobriyan 
29174b98d11bSAlexey Dobriyan static inline void inc_syscw(struct task_struct *tsk)
29184b98d11bSAlexey Dobriyan {
29194b98d11bSAlexey Dobriyan }
29204b98d11bSAlexey Dobriyan #endif
29214b98d11bSAlexey Dobriyan 
292282455257SDave Hansen #ifndef TASK_SIZE_OF
292382455257SDave Hansen #define TASK_SIZE_OF(tsk)	TASK_SIZE
292482455257SDave Hansen #endif
292582455257SDave Hansen 
2926cf475ad2SBalbir Singh #ifdef CONFIG_MM_OWNER
2927cf475ad2SBalbir Singh extern void mm_update_next_owner(struct mm_struct *mm);
2928cf475ad2SBalbir Singh extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2929cf475ad2SBalbir Singh #else
2930cf475ad2SBalbir Singh static inline void mm_update_next_owner(struct mm_struct *mm)
2931cf475ad2SBalbir Singh {
2932cf475ad2SBalbir Singh }
2933cf475ad2SBalbir Singh 
2934cf475ad2SBalbir Singh static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2935cf475ad2SBalbir Singh {
2936cf475ad2SBalbir Singh }
2937cf475ad2SBalbir Singh #endif /* CONFIG_MM_OWNER */
2938cf475ad2SBalbir Singh 
29393e10e716SJiri Slaby static inline unsigned long task_rlimit(const struct task_struct *tsk,
29403e10e716SJiri Slaby 		unsigned int limit)
29413e10e716SJiri Slaby {
29423e10e716SJiri Slaby 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
29433e10e716SJiri Slaby }
29443e10e716SJiri Slaby 
29453e10e716SJiri Slaby static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
29463e10e716SJiri Slaby 		unsigned int limit)
29473e10e716SJiri Slaby {
29483e10e716SJiri Slaby 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
29493e10e716SJiri Slaby }
29503e10e716SJiri Slaby 
29513e10e716SJiri Slaby static inline unsigned long rlimit(unsigned int limit)
29523e10e716SJiri Slaby {
29533e10e716SJiri Slaby 	return task_rlimit(current, limit);
29543e10e716SJiri Slaby }
29553e10e716SJiri Slaby 
29563e10e716SJiri Slaby static inline unsigned long rlimit_max(unsigned int limit)
29573e10e716SJiri Slaby {
29583e10e716SJiri Slaby 	return task_rlimit_max(current, limit);
29593e10e716SJiri Slaby }
29603e10e716SJiri Slaby 
29611da177e4SLinus Torvalds #endif
2962