xref: /linux/include/linux/sched.h (revision f809ca9a554dda49fb264c79e31c722e0b063ff8)
11da177e4SLinus Torvalds #ifndef _LINUX_SCHED_H
21da177e4SLinus Torvalds #define _LINUX_SCHED_H
31da177e4SLinus Torvalds 
4607ca46eSDavid Howells #include <uapi/linux/sched.h>
5b7b3c76aSDavid Woodhouse 
6b7b3c76aSDavid Woodhouse 
7b7b3c76aSDavid Woodhouse struct sched_param {
8b7b3c76aSDavid Woodhouse 	int sched_priority;
9b7b3c76aSDavid Woodhouse };
10b7b3c76aSDavid Woodhouse 
111da177e4SLinus Torvalds #include <asm/param.h>	/* for HZ */
121da177e4SLinus Torvalds 
131da177e4SLinus Torvalds #include <linux/capability.h>
141da177e4SLinus Torvalds #include <linux/threads.h>
151da177e4SLinus Torvalds #include <linux/kernel.h>
161da177e4SLinus Torvalds #include <linux/types.h>
171da177e4SLinus Torvalds #include <linux/timex.h>
181da177e4SLinus Torvalds #include <linux/jiffies.h>
191da177e4SLinus Torvalds #include <linux/rbtree.h>
201da177e4SLinus Torvalds #include <linux/thread_info.h>
211da177e4SLinus Torvalds #include <linux/cpumask.h>
221da177e4SLinus Torvalds #include <linux/errno.h>
231da177e4SLinus Torvalds #include <linux/nodemask.h>
24c92ff1bdSMartin Schwidefsky #include <linux/mm_types.h>
25f27dde8dSPeter Zijlstra #include <linux/preempt.h>
261da177e4SLinus Torvalds 
271da177e4SLinus Torvalds #include <asm/page.h>
281da177e4SLinus Torvalds #include <asm/ptrace.h>
291da177e4SLinus Torvalds #include <asm/cputime.h>
301da177e4SLinus Torvalds 
311da177e4SLinus Torvalds #include <linux/smp.h>
321da177e4SLinus Torvalds #include <linux/sem.h>
331da177e4SLinus Torvalds #include <linux/signal.h>
341da177e4SLinus Torvalds #include <linux/compiler.h>
351da177e4SLinus Torvalds #include <linux/completion.h>
361da177e4SLinus Torvalds #include <linux/pid.h>
371da177e4SLinus Torvalds #include <linux/percpu.h>
381da177e4SLinus Torvalds #include <linux/topology.h>
393e26c149SPeter Zijlstra #include <linux/proportions.h>
401da177e4SLinus Torvalds #include <linux/seccomp.h>
41e56d0903SIngo Molnar #include <linux/rcupdate.h>
4205725f7eSJiri Pirko #include <linux/rculist.h>
4323f78d4aSIngo Molnar #include <linux/rtmutex.h>
441da177e4SLinus Torvalds 
45a3b6714eSDavid Woodhouse #include <linux/time.h>
46a3b6714eSDavid Woodhouse #include <linux/param.h>
47a3b6714eSDavid Woodhouse #include <linux/resource.h>
48a3b6714eSDavid Woodhouse #include <linux/timer.h>
49a3b6714eSDavid Woodhouse #include <linux/hrtimer.h>
507c3ab738SAndrew Morton #include <linux/task_io_accounting.h>
519745512cSArjan van de Ven #include <linux/latencytop.h>
529e2b2dc4SDavid Howells #include <linux/cred.h>
53fa14ff4aSPeter Zijlstra #include <linux/llist.h>
547b44ab97SEric W. Biederman #include <linux/uidgid.h>
5521caf2fcSMing Lei #include <linux/gfp.h>
56a3b6714eSDavid Woodhouse 
57a3b6714eSDavid Woodhouse #include <asm/processor.h>
5836d57ac4SH. J. Lu 
591da177e4SLinus Torvalds struct exec_domain;
60c87e2837SIngo Molnar struct futex_pi_state;
61286100a6SAlexey Dobriyan struct robust_list_head;
62bddd87c7SAkinobu Mita struct bio_list;
635ad4e53bSAl Viro struct fs_struct;
64cdd6c482SIngo Molnar struct perf_event_context;
6573c10101SJens Axboe struct blk_plug;
661da177e4SLinus Torvalds 
671da177e4SLinus Torvalds /*
681da177e4SLinus Torvalds  * List of flags we want to share for kernel threads,
691da177e4SLinus Torvalds  * if only because they are not used by them anyway.
701da177e4SLinus Torvalds  */
711da177e4SLinus Torvalds #define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
721da177e4SLinus Torvalds 
731da177e4SLinus Torvalds /*
741da177e4SLinus Torvalds  * These are the constant used to fake the fixed-point load-average
751da177e4SLinus Torvalds  * counting. Some notes:
761da177e4SLinus Torvalds  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
771da177e4SLinus Torvalds  *    a load-average precision of 10 bits integer + 11 bits fractional
781da177e4SLinus Torvalds  *  - if you want to count load-averages more often, you need more
791da177e4SLinus Torvalds  *    precision, or rounding will get you. With 2-second counting freq,
801da177e4SLinus Torvalds  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
811da177e4SLinus Torvalds  *    11 bit fractions.
821da177e4SLinus Torvalds  */
831da177e4SLinus Torvalds extern unsigned long avenrun[];		/* Load averages */
842d02494fSThomas Gleixner extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
851da177e4SLinus Torvalds 
861da177e4SLinus Torvalds #define FSHIFT		11		/* nr of bits of precision */
871da177e4SLinus Torvalds #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
880c2043abSLinus Torvalds #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
891da177e4SLinus Torvalds #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
901da177e4SLinus Torvalds #define EXP_5		2014		/* 1/exp(5sec/5min) */
911da177e4SLinus Torvalds #define EXP_15		2037		/* 1/exp(5sec/15min) */
921da177e4SLinus Torvalds 
931da177e4SLinus Torvalds #define CALC_LOAD(load,exp,n) \
941da177e4SLinus Torvalds 	load *= exp; \
951da177e4SLinus Torvalds 	load += n*(FIXED_1-exp); \
961da177e4SLinus Torvalds 	load >>= FSHIFT;
971da177e4SLinus Torvalds 
981da177e4SLinus Torvalds extern unsigned long total_forks;
991da177e4SLinus Torvalds extern int nr_threads;
1001da177e4SLinus Torvalds DECLARE_PER_CPU(unsigned long, process_counts);
1011da177e4SLinus Torvalds extern int nr_processes(void);
1021da177e4SLinus Torvalds extern unsigned long nr_running(void);
1031da177e4SLinus Torvalds extern unsigned long nr_iowait(void);
1048c215bd3SPeter Zijlstra extern unsigned long nr_iowait_cpu(int cpu);
10569d25870SArjan van de Ven extern unsigned long this_cpu_load(void);
10669d25870SArjan van de Ven 
10769d25870SArjan van de Ven 
1080f004f5aSPeter Zijlstra extern void calc_global_load(unsigned long ticks);
1095aaa0b7aSPeter Zijlstra extern void update_cpu_load_nohz(void);
1101da177e4SLinus Torvalds 
1117e49fcceSSteven Rostedt extern unsigned long get_parent_ip(unsigned long addr);
1127e49fcceSSteven Rostedt 
113b637a328SPaul E. McKenney extern void dump_cpu_task(int cpu);
114b637a328SPaul E. McKenney 
11543ae34cbSIngo Molnar struct seq_file;
11643ae34cbSIngo Molnar struct cfs_rq;
1174cf86d77SIngo Molnar struct task_group;
11843ae34cbSIngo Molnar #ifdef CONFIG_SCHED_DEBUG
11943ae34cbSIngo Molnar extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
12043ae34cbSIngo Molnar extern void proc_sched_set_task(struct task_struct *p);
12143ae34cbSIngo Molnar extern void
1225cef9ecaSIngo Molnar print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
12343ae34cbSIngo Molnar #endif
1241da177e4SLinus Torvalds 
1254a8342d2SLinus Torvalds /*
1264a8342d2SLinus Torvalds  * Task state bitmask. NOTE! These bits are also
1274a8342d2SLinus Torvalds  * encoded in fs/proc/array.c: get_task_state().
1284a8342d2SLinus Torvalds  *
1294a8342d2SLinus Torvalds  * We have two separate sets of flags: task->state
1304a8342d2SLinus Torvalds  * is about runnability, while task->exit_state are
1314a8342d2SLinus Torvalds  * about the task exiting. Confusing, but this way
1324a8342d2SLinus Torvalds  * modifying one set can't modify the other one by
1334a8342d2SLinus Torvalds  * mistake.
1344a8342d2SLinus Torvalds  */
1351da177e4SLinus Torvalds #define TASK_RUNNING		0
1361da177e4SLinus Torvalds #define TASK_INTERRUPTIBLE	1
1371da177e4SLinus Torvalds #define TASK_UNINTERRUPTIBLE	2
138f021a3c2SMatthew Wilcox #define __TASK_STOPPED		4
139f021a3c2SMatthew Wilcox #define __TASK_TRACED		8
1404a8342d2SLinus Torvalds /* in tsk->exit_state */
1414a8342d2SLinus Torvalds #define EXIT_ZOMBIE		16
1424a8342d2SLinus Torvalds #define EXIT_DEAD		32
1434a8342d2SLinus Torvalds /* in tsk->state again */
144af927232SMike Galbraith #define TASK_DEAD		64
145f021a3c2SMatthew Wilcox #define TASK_WAKEKILL		128
146e9c84311SPeter Zijlstra #define TASK_WAKING		256
147f2530dc7SThomas Gleixner #define TASK_PARKED		512
148f2530dc7SThomas Gleixner #define TASK_STATE_MAX		1024
149f021a3c2SMatthew Wilcox 
150f2530dc7SThomas Gleixner #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
15173342151SPeter Zijlstra 
152e1781538SPeter Zijlstra extern char ___assert_task_state[1 - 2*!!(
153e1781538SPeter Zijlstra 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
154f021a3c2SMatthew Wilcox 
155f021a3c2SMatthew Wilcox /* Convenience macros for the sake of set_task_state */
156f021a3c2SMatthew Wilcox #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
157f021a3c2SMatthew Wilcox #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
158f021a3c2SMatthew Wilcox #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
1591da177e4SLinus Torvalds 
16092a1f4bcSMatthew Wilcox /* Convenience macros for the sake of wake_up */
16192a1f4bcSMatthew Wilcox #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
162f021a3c2SMatthew Wilcox #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
16392a1f4bcSMatthew Wilcox 
16492a1f4bcSMatthew Wilcox /* get_task_state() */
16592a1f4bcSMatthew Wilcox #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
166f021a3c2SMatthew Wilcox 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
167f021a3c2SMatthew Wilcox 				 __TASK_TRACED)
16892a1f4bcSMatthew Wilcox 
169f021a3c2SMatthew Wilcox #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
170f021a3c2SMatthew Wilcox #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
1718f92054eSDavid Howells #define task_is_dead(task)	((task)->exit_state != 0)
17292a1f4bcSMatthew Wilcox #define task_is_stopped_or_traced(task)	\
173f021a3c2SMatthew Wilcox 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
17492a1f4bcSMatthew Wilcox #define task_contributes_to_load(task)	\
175e3c8ca83SNathan Lynch 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
176376fede8STejun Heo 				 (task->flags & PF_FROZEN) == 0)
1771da177e4SLinus Torvalds 
1781da177e4SLinus Torvalds #define __set_task_state(tsk, state_value)		\
1791da177e4SLinus Torvalds 	do { (tsk)->state = (state_value); } while (0)
1801da177e4SLinus Torvalds #define set_task_state(tsk, state_value)		\
1811da177e4SLinus Torvalds 	set_mb((tsk)->state, (state_value))
1821da177e4SLinus Torvalds 
183498d0c57SAndrew Morton /*
184498d0c57SAndrew Morton  * set_current_state() includes a barrier so that the write of current->state
185498d0c57SAndrew Morton  * is correctly serialised wrt the caller's subsequent test of whether to
186498d0c57SAndrew Morton  * actually sleep:
187498d0c57SAndrew Morton  *
188498d0c57SAndrew Morton  *	set_current_state(TASK_UNINTERRUPTIBLE);
189498d0c57SAndrew Morton  *	if (do_i_need_to_sleep())
190498d0c57SAndrew Morton  *		schedule();
191498d0c57SAndrew Morton  *
192498d0c57SAndrew Morton  * If the caller does not need such serialisation then use __set_current_state()
193498d0c57SAndrew Morton  */
1941da177e4SLinus Torvalds #define __set_current_state(state_value)			\
1951da177e4SLinus Torvalds 	do { current->state = (state_value); } while (0)
1961da177e4SLinus Torvalds #define set_current_state(state_value)		\
1971da177e4SLinus Torvalds 	set_mb(current->state, (state_value))
1981da177e4SLinus Torvalds 
1991da177e4SLinus Torvalds /* Task command name length */
2001da177e4SLinus Torvalds #define TASK_COMM_LEN 16
2011da177e4SLinus Torvalds 
2021da177e4SLinus Torvalds #include <linux/spinlock.h>
2031da177e4SLinus Torvalds 
2041da177e4SLinus Torvalds /*
2051da177e4SLinus Torvalds  * This serializes "schedule()" and also protects
2061da177e4SLinus Torvalds  * the run-queue from deletions/modifications (but
2071da177e4SLinus Torvalds  * _adding_ to the beginning of the run-queue has
2081da177e4SLinus Torvalds  * a separate lock).
2091da177e4SLinus Torvalds  */
2101da177e4SLinus Torvalds extern rwlock_t tasklist_lock;
2111da177e4SLinus Torvalds extern spinlock_t mmlist_lock;
2121da177e4SLinus Torvalds 
21336c8b586SIngo Molnar struct task_struct;
2141da177e4SLinus Torvalds 
215db1466b3SPaul E. McKenney #ifdef CONFIG_PROVE_RCU
216db1466b3SPaul E. McKenney extern int lockdep_tasklist_lock_is_held(void);
217db1466b3SPaul E. McKenney #endif /* #ifdef CONFIG_PROVE_RCU */
218db1466b3SPaul E. McKenney 
2191da177e4SLinus Torvalds extern void sched_init(void);
2201da177e4SLinus Torvalds extern void sched_init_smp(void);
2212d07b255SHarvey Harrison extern asmlinkage void schedule_tail(struct task_struct *prev);
22236c8b586SIngo Molnar extern void init_idle(struct task_struct *idle, int cpu);
2231df21055SIngo Molnar extern void init_idle_bootup_task(struct task_struct *idle);
2241da177e4SLinus Torvalds 
22589f19f04SAndrew Morton extern int runqueue_is_locked(int cpu);
226017730c1SIngo Molnar 
2273451d024SFrederic Weisbecker #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
228c1cc017cSAlex Shi extern void nohz_balance_enter_idle(int cpu);
22969e1e811SSuresh Siddha extern void set_cpu_sd_state_idle(void);
23083cd4fe2SVenkatesh Pallipadi extern int get_nohz_timer_target(void);
23146cb4b7cSSiddha, Suresh B #else
232c1cc017cSAlex Shi static inline void nohz_balance_enter_idle(int cpu) { }
233fdaabd80SPeter Zijlstra static inline void set_cpu_sd_state_idle(void) { }
23446cb4b7cSSiddha, Suresh B #endif
2351da177e4SLinus Torvalds 
236e59e2ae2SIngo Molnar /*
23739bc89fdSIngo Molnar  * Only dump TASK_* tasks. (0 for all tasks)
238e59e2ae2SIngo Molnar  */
239e59e2ae2SIngo Molnar extern void show_state_filter(unsigned long state_filter);
240e59e2ae2SIngo Molnar 
241e59e2ae2SIngo Molnar static inline void show_state(void)
242e59e2ae2SIngo Molnar {
24339bc89fdSIngo Molnar 	show_state_filter(0);
244e59e2ae2SIngo Molnar }
245e59e2ae2SIngo Molnar 
2461da177e4SLinus Torvalds extern void show_regs(struct pt_regs *);
2471da177e4SLinus Torvalds 
2481da177e4SLinus Torvalds /*
2491da177e4SLinus Torvalds  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
2501da177e4SLinus Torvalds  * task), SP is the stack pointer of the first frame that should be shown in the back
2511da177e4SLinus Torvalds  * trace (or NULL if the entire call-chain of the task should be shown).
2521da177e4SLinus Torvalds  */
2531da177e4SLinus Torvalds extern void show_stack(struct task_struct *task, unsigned long *sp);
2541da177e4SLinus Torvalds 
2551da177e4SLinus Torvalds void io_schedule(void);
2561da177e4SLinus Torvalds long io_schedule_timeout(long timeout);
2571da177e4SLinus Torvalds 
2581da177e4SLinus Torvalds extern void cpu_init (void);
2591da177e4SLinus Torvalds extern void trap_init(void);
2601da177e4SLinus Torvalds extern void update_process_times(int user);
2611da177e4SLinus Torvalds extern void scheduler_tick(void);
2621da177e4SLinus Torvalds 
26382a1fcb9SIngo Molnar extern void sched_show_task(struct task_struct *p);
26482a1fcb9SIngo Molnar 
26519cc36c0SFrederic Weisbecker #ifdef CONFIG_LOCKUP_DETECTOR
2668446f1d3SIngo Molnar extern void touch_softlockup_watchdog(void);
267d6ad3e28SJason Wessel extern void touch_softlockup_watchdog_sync(void);
26804c9167fSJeremy Fitzhardinge extern void touch_all_softlockup_watchdogs(void);
269332fbdbcSDon Zickus extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
2708d65af78SAlexey Dobriyan 				  void __user *buffer,
271baf48f65SMandeep Singh Baines 				  size_t *lenp, loff_t *ppos);
2729c44bc03SIngo Molnar extern unsigned int  softlockup_panic;
273004417a6SPeter Zijlstra void lockup_detector_init(void);
2748446f1d3SIngo Molnar #else
2758446f1d3SIngo Molnar static inline void touch_softlockup_watchdog(void)
2768446f1d3SIngo Molnar {
2778446f1d3SIngo Molnar }
278d6ad3e28SJason Wessel static inline void touch_softlockup_watchdog_sync(void)
279d6ad3e28SJason Wessel {
280d6ad3e28SJason Wessel }
28104c9167fSJeremy Fitzhardinge static inline void touch_all_softlockup_watchdogs(void)
28204c9167fSJeremy Fitzhardinge {
28304c9167fSJeremy Fitzhardinge }
284004417a6SPeter Zijlstra static inline void lockup_detector_init(void)
285004417a6SPeter Zijlstra {
286004417a6SPeter Zijlstra }
2878446f1d3SIngo Molnar #endif
2888446f1d3SIngo Molnar 
2891da177e4SLinus Torvalds /* Attach to any functions which should be ignored in wchan output. */
2901da177e4SLinus Torvalds #define __sched		__attribute__((__section__(".sched.text")))
291deaf2227SIngo Molnar 
292deaf2227SIngo Molnar /* Linker adds these: start and end of __sched functions */
293deaf2227SIngo Molnar extern char __sched_text_start[], __sched_text_end[];
294deaf2227SIngo Molnar 
2951da177e4SLinus Torvalds /* Is this address in the __sched functions? */
2961da177e4SLinus Torvalds extern int in_sched_functions(unsigned long addr);
2971da177e4SLinus Torvalds 
2981da177e4SLinus Torvalds #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
299b3c97528SHarvey Harrison extern signed long schedule_timeout(signed long timeout);
30064ed93a2SNishanth Aravamudan extern signed long schedule_timeout_interruptible(signed long timeout);
301294d5cc2SMatthew Wilcox extern signed long schedule_timeout_killable(signed long timeout);
30264ed93a2SNishanth Aravamudan extern signed long schedule_timeout_uninterruptible(signed long timeout);
3031da177e4SLinus Torvalds asmlinkage void schedule(void);
304c5491ea7SThomas Gleixner extern void schedule_preempt_disabled(void);
3051da177e4SLinus Torvalds 
306ab516013SSerge E. Hallyn struct nsproxy;
307acce292cSCedric Le Goater struct user_namespace;
3081da177e4SLinus Torvalds 
309efc1a3b1SDavid Howells #ifdef CONFIG_MMU
310efc1a3b1SDavid Howells extern void arch_pick_mmap_layout(struct mm_struct *mm);
3111da177e4SLinus Torvalds extern unsigned long
3121da177e4SLinus Torvalds arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
3131da177e4SLinus Torvalds 		       unsigned long, unsigned long);
3141da177e4SLinus Torvalds extern unsigned long
3151da177e4SLinus Torvalds arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
3161da177e4SLinus Torvalds 			  unsigned long len, unsigned long pgoff,
3171da177e4SLinus Torvalds 			  unsigned long flags);
318efc1a3b1SDavid Howells #else
319efc1a3b1SDavid Howells static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
320efc1a3b1SDavid Howells #endif
3211da177e4SLinus Torvalds 
322901608d9SOleg Nesterov 
3236c5d5238SKawai, Hidehiro extern void set_dumpable(struct mm_struct *mm, int value);
3246c5d5238SKawai, Hidehiro extern int get_dumpable(struct mm_struct *mm);
3256c5d5238SKawai, Hidehiro 
3266c5d5238SKawai, Hidehiro /* mm flags */
3273cb4a0bbSKawai, Hidehiro /* dumpable bits */
3286c5d5238SKawai, Hidehiro #define MMF_DUMPABLE      0  /* core dump is permitted */
3296c5d5238SKawai, Hidehiro #define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
330f8af4da3SHugh Dickins 
3313cb4a0bbSKawai, Hidehiro #define MMF_DUMPABLE_BITS 2
332f8af4da3SHugh Dickins #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
3333cb4a0bbSKawai, Hidehiro 
3343cb4a0bbSKawai, Hidehiro /* coredump filter bits */
3353cb4a0bbSKawai, Hidehiro #define MMF_DUMP_ANON_PRIVATE	2
3363cb4a0bbSKawai, Hidehiro #define MMF_DUMP_ANON_SHARED	3
3373cb4a0bbSKawai, Hidehiro #define MMF_DUMP_MAPPED_PRIVATE	4
3383cb4a0bbSKawai, Hidehiro #define MMF_DUMP_MAPPED_SHARED	5
33982df3973SRoland McGrath #define MMF_DUMP_ELF_HEADERS	6
340e575f111SKOSAKI Motohiro #define MMF_DUMP_HUGETLB_PRIVATE 7
341e575f111SKOSAKI Motohiro #define MMF_DUMP_HUGETLB_SHARED  8
342f8af4da3SHugh Dickins 
3433cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
344e575f111SKOSAKI Motohiro #define MMF_DUMP_FILTER_BITS	7
3453cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_MASK \
3463cb4a0bbSKawai, Hidehiro 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
3473cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_DEFAULT \
348e575f111SKOSAKI Motohiro 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
349656eb2cdSRoland McGrath 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
350656eb2cdSRoland McGrath 
351656eb2cdSRoland McGrath #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
352656eb2cdSRoland McGrath # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
353656eb2cdSRoland McGrath #else
354656eb2cdSRoland McGrath # define MMF_DUMP_MASK_DEFAULT_ELF	0
355656eb2cdSRoland McGrath #endif
356f8af4da3SHugh Dickins 					/* leave room for more dump flags */
357f8af4da3SHugh Dickins #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
358ba76149fSAndrea Arcangeli #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
359bafb282dSKonstantin Khlebnikov #define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
360f8af4da3SHugh Dickins 
3619f68f672SOleg Nesterov #define MMF_HAS_UPROBES		19	/* has uprobes */
3629f68f672SOleg Nesterov #define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
363f8ac4ec9SOleg Nesterov 
364f8af4da3SHugh Dickins #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
3656c5d5238SKawai, Hidehiro 
3661da177e4SLinus Torvalds struct sighand_struct {
3671da177e4SLinus Torvalds 	atomic_t		count;
3681da177e4SLinus Torvalds 	struct k_sigaction	action[_NSIG];
3691da177e4SLinus Torvalds 	spinlock_t		siglock;
370b8fceee1SDavide Libenzi 	wait_queue_head_t	signalfd_wqh;
3711da177e4SLinus Torvalds };
3721da177e4SLinus Torvalds 
3730e464814SKaiGai Kohei struct pacct_struct {
374f6ec29a4SKaiGai Kohei 	int			ac_flag;
375f6ec29a4SKaiGai Kohei 	long			ac_exitcode;
3760e464814SKaiGai Kohei 	unsigned long		ac_mem;
37777787bfbSKaiGai Kohei 	cputime_t		ac_utime, ac_stime;
37877787bfbSKaiGai Kohei 	unsigned long		ac_minflt, ac_majflt;
3790e464814SKaiGai Kohei };
3800e464814SKaiGai Kohei 
38142c4ab41SStanislaw Gruszka struct cpu_itimer {
38242c4ab41SStanislaw Gruszka 	cputime_t expires;
38342c4ab41SStanislaw Gruszka 	cputime_t incr;
3848356b5f9SStanislaw Gruszka 	u32 error;
3858356b5f9SStanislaw Gruszka 	u32 incr_error;
38642c4ab41SStanislaw Gruszka };
38742c4ab41SStanislaw Gruszka 
388f06febc9SFrank Mayhar /**
389d37f761dSFrederic Weisbecker  * struct cputime - snaphsot of system and user cputime
390d37f761dSFrederic Weisbecker  * @utime: time spent in user mode
391d37f761dSFrederic Weisbecker  * @stime: time spent in system mode
392d37f761dSFrederic Weisbecker  *
393d37f761dSFrederic Weisbecker  * Gathers a generic snapshot of user and system time.
394d37f761dSFrederic Weisbecker  */
395d37f761dSFrederic Weisbecker struct cputime {
396d37f761dSFrederic Weisbecker 	cputime_t utime;
397d37f761dSFrederic Weisbecker 	cputime_t stime;
398d37f761dSFrederic Weisbecker };
399d37f761dSFrederic Weisbecker 
400d37f761dSFrederic Weisbecker /**
401f06febc9SFrank Mayhar  * struct task_cputime - collected CPU time counts
402f06febc9SFrank Mayhar  * @utime:		time spent in user mode, in &cputime_t units
403f06febc9SFrank Mayhar  * @stime:		time spent in kernel mode, in &cputime_t units
404f06febc9SFrank Mayhar  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
405f06febc9SFrank Mayhar  *
406d37f761dSFrederic Weisbecker  * This is an extension of struct cputime that includes the total runtime
407d37f761dSFrederic Weisbecker  * spent by the task from the scheduler point of view.
408d37f761dSFrederic Weisbecker  *
409d37f761dSFrederic Weisbecker  * As a result, this structure groups together three kinds of CPU time
410d37f761dSFrederic Weisbecker  * that are tracked for threads and thread groups.  Most things considering
411f06febc9SFrank Mayhar  * CPU time want to group these counts together and treat all three
412f06febc9SFrank Mayhar  * of them in parallel.
413f06febc9SFrank Mayhar  */
414f06febc9SFrank Mayhar struct task_cputime {
415f06febc9SFrank Mayhar 	cputime_t utime;
416f06febc9SFrank Mayhar 	cputime_t stime;
417f06febc9SFrank Mayhar 	unsigned long long sum_exec_runtime;
418f06febc9SFrank Mayhar };
419f06febc9SFrank Mayhar /* Alternate field names when used to cache expirations. */
420f06febc9SFrank Mayhar #define prof_exp	stime
421f06febc9SFrank Mayhar #define virt_exp	utime
422f06febc9SFrank Mayhar #define sched_exp	sum_exec_runtime
423f06febc9SFrank Mayhar 
4244cd4c1b4SPeter Zijlstra #define INIT_CPUTIME	\
4254cd4c1b4SPeter Zijlstra 	(struct task_cputime) {					\
42664861634SMartin Schwidefsky 		.utime = 0,					\
42764861634SMartin Schwidefsky 		.stime = 0,					\
4284cd4c1b4SPeter Zijlstra 		.sum_exec_runtime = 0,				\
4294cd4c1b4SPeter Zijlstra 	}
4304cd4c1b4SPeter Zijlstra 
431a233f112SPeter Zijlstra #define PREEMPT_ENABLED		(PREEMPT_NEED_RESCHED)
432a233f112SPeter Zijlstra 
433a233f112SPeter Zijlstra #ifdef CONFIG_PREEMPT_COUNT
434a233f112SPeter Zijlstra #define PREEMPT_DISABLED	(1 + PREEMPT_ENABLED)
435a233f112SPeter Zijlstra #else
436a233f112SPeter Zijlstra #define PREEMPT_DISABLED	PREEMPT_ENABLED
437a233f112SPeter Zijlstra #endif
438a233f112SPeter Zijlstra 
439c99e6efeSPeter Zijlstra /*
440c99e6efeSPeter Zijlstra  * Disable preemption until the scheduler is running.
441c99e6efeSPeter Zijlstra  * Reset by start_kernel()->sched_init()->init_idle().
442d86ee480SPeter Zijlstra  *
443d86ee480SPeter Zijlstra  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
444d86ee480SPeter Zijlstra  * before the scheduler is active -- see should_resched().
445c99e6efeSPeter Zijlstra  */
446a233f112SPeter Zijlstra #define INIT_PREEMPT_COUNT	(PREEMPT_DISABLED + PREEMPT_ACTIVE)
447c99e6efeSPeter Zijlstra 
448f06febc9SFrank Mayhar /**
4494cd4c1b4SPeter Zijlstra  * struct thread_group_cputimer - thread group interval timer counts
4504cd4c1b4SPeter Zijlstra  * @cputime:		thread group interval timers.
4514cd4c1b4SPeter Zijlstra  * @running:		non-zero when there are timers running and
4524cd4c1b4SPeter Zijlstra  * 			@cputime receives updates.
4534cd4c1b4SPeter Zijlstra  * @lock:		lock for fields in this struct.
454f06febc9SFrank Mayhar  *
455f06febc9SFrank Mayhar  * This structure contains the version of task_cputime, above, that is
4564cd4c1b4SPeter Zijlstra  * used for thread group CPU timer calculations.
457f06febc9SFrank Mayhar  */
4584cd4c1b4SPeter Zijlstra struct thread_group_cputimer {
4594cd4c1b4SPeter Zijlstra 	struct task_cputime cputime;
4604cd4c1b4SPeter Zijlstra 	int running;
461ee30a7b2SThomas Gleixner 	raw_spinlock_t lock;
462f06febc9SFrank Mayhar };
463f06febc9SFrank Mayhar 
4644714d1d3SBen Blum #include <linux/rwsem.h>
4655091faa4SMike Galbraith struct autogroup;
4665091faa4SMike Galbraith 
4671da177e4SLinus Torvalds /*
468e815f0a8SJonathan Neuschäfer  * NOTE! "signal_struct" does not have its own
4691da177e4SLinus Torvalds  * locking, because a shared signal_struct always
4701da177e4SLinus Torvalds  * implies a shared sighand_struct, so locking
4711da177e4SLinus Torvalds  * sighand_struct is always a proper superset of
4721da177e4SLinus Torvalds  * the locking of signal_struct.
4731da177e4SLinus Torvalds  */
4741da177e4SLinus Torvalds struct signal_struct {
475ea6d290cSOleg Nesterov 	atomic_t		sigcnt;
4761da177e4SLinus Torvalds 	atomic_t		live;
477b3ac022cSOleg Nesterov 	int			nr_threads;
4781da177e4SLinus Torvalds 
4791da177e4SLinus Torvalds 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
4801da177e4SLinus Torvalds 
4811da177e4SLinus Torvalds 	/* current thread group signal load-balancing target: */
48236c8b586SIngo Molnar 	struct task_struct	*curr_target;
4831da177e4SLinus Torvalds 
4841da177e4SLinus Torvalds 	/* shared signal handling: */
4851da177e4SLinus Torvalds 	struct sigpending	shared_pending;
4861da177e4SLinus Torvalds 
4871da177e4SLinus Torvalds 	/* thread group exit support */
4881da177e4SLinus Torvalds 	int			group_exit_code;
4891da177e4SLinus Torvalds 	/* overloaded:
4901da177e4SLinus Torvalds 	 * - notify group_exit_task when ->count is equal to notify_count
4911da177e4SLinus Torvalds 	 * - everyone except group_exit_task is stopped during signal delivery
4921da177e4SLinus Torvalds 	 *   of fatal signals, group_exit_task processes the signal.
4931da177e4SLinus Torvalds 	 */
4941da177e4SLinus Torvalds 	int			notify_count;
49507dd20e0SRichard Kennedy 	struct task_struct	*group_exit_task;
4961da177e4SLinus Torvalds 
4971da177e4SLinus Torvalds 	/* thread group stop support, overloads group_exit_code too */
4981da177e4SLinus Torvalds 	int			group_stop_count;
4991da177e4SLinus Torvalds 	unsigned int		flags; /* see SIGNAL_* flags below */
5001da177e4SLinus Torvalds 
501ebec18a6SLennart Poettering 	/*
502ebec18a6SLennart Poettering 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
503ebec18a6SLennart Poettering 	 * manager, to re-parent orphan (double-forking) child processes
504ebec18a6SLennart Poettering 	 * to this process instead of 'init'. The service manager is
505ebec18a6SLennart Poettering 	 * able to receive SIGCHLD signals and is able to investigate
506ebec18a6SLennart Poettering 	 * the process until it calls wait(). All children of this
507ebec18a6SLennart Poettering 	 * process will inherit a flag if they should look for a
508ebec18a6SLennart Poettering 	 * child_subreaper process at exit.
509ebec18a6SLennart Poettering 	 */
510ebec18a6SLennart Poettering 	unsigned int		is_child_subreaper:1;
511ebec18a6SLennart Poettering 	unsigned int		has_child_subreaper:1;
512ebec18a6SLennart Poettering 
5131da177e4SLinus Torvalds 	/* POSIX.1b Interval Timers */
5145ed67f05SPavel Emelyanov 	int			posix_timer_id;
5151da177e4SLinus Torvalds 	struct list_head	posix_timers;
5161da177e4SLinus Torvalds 
5171da177e4SLinus Torvalds 	/* ITIMER_REAL timer for the process */
5182ff678b8SThomas Gleixner 	struct hrtimer real_timer;
519fea9d175SOleg Nesterov 	struct pid *leader_pid;
5202ff678b8SThomas Gleixner 	ktime_t it_real_incr;
5211da177e4SLinus Torvalds 
52242c4ab41SStanislaw Gruszka 	/*
52342c4ab41SStanislaw Gruszka 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
52442c4ab41SStanislaw Gruszka 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
52542c4ab41SStanislaw Gruszka 	 * values are defined to 0 and 1 respectively
52642c4ab41SStanislaw Gruszka 	 */
52742c4ab41SStanislaw Gruszka 	struct cpu_itimer it[2];
5281da177e4SLinus Torvalds 
529f06febc9SFrank Mayhar 	/*
5304cd4c1b4SPeter Zijlstra 	 * Thread group totals for process CPU timers.
5314cd4c1b4SPeter Zijlstra 	 * See thread_group_cputimer(), et al, for details.
532f06febc9SFrank Mayhar 	 */
5334cd4c1b4SPeter Zijlstra 	struct thread_group_cputimer cputimer;
534f06febc9SFrank Mayhar 
535f06febc9SFrank Mayhar 	/* Earliest-expiration cache. */
536f06febc9SFrank Mayhar 	struct task_cputime cputime_expires;
537f06febc9SFrank Mayhar 
538f06febc9SFrank Mayhar 	struct list_head cpu_timers[3];
539f06febc9SFrank Mayhar 
540ab521dc0SEric W. Biederman 	struct pid *tty_old_pgrp;
5411ec320afSCedric Le Goater 
5421da177e4SLinus Torvalds 	/* boolean value for session group leader */
5431da177e4SLinus Torvalds 	int leader;
5441da177e4SLinus Torvalds 
5451da177e4SLinus Torvalds 	struct tty_struct *tty; /* NULL if no tty */
5461da177e4SLinus Torvalds 
5475091faa4SMike Galbraith #ifdef CONFIG_SCHED_AUTOGROUP
5485091faa4SMike Galbraith 	struct autogroup *autogroup;
5495091faa4SMike Galbraith #endif
5501da177e4SLinus Torvalds 	/*
5511da177e4SLinus Torvalds 	 * Cumulative resource counters for dead threads in the group,
5521da177e4SLinus Torvalds 	 * and for reaped dead child processes forked by this group.
5531da177e4SLinus Torvalds 	 * Live threads maintain their own counters and add to these
5541da177e4SLinus Torvalds 	 * in __exit_signal, except for the group leader.
5551da177e4SLinus Torvalds 	 */
55632bd671dSPeter Zijlstra 	cputime_t utime, stime, cutime, cstime;
5579ac52315SLaurent Vivier 	cputime_t gtime;
5589ac52315SLaurent Vivier 	cputime_t cgtime;
5599fbc42eaSFrederic Weisbecker #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
560d37f761dSFrederic Weisbecker 	struct cputime prev_cputime;
5610cf55e1eSHidetoshi Seto #endif
5621da177e4SLinus Torvalds 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
5631da177e4SLinus Torvalds 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
5646eaeeabaSEric Dumazet 	unsigned long inblock, oublock, cinblock, coublock;
5651f10206cSJiri Pirko 	unsigned long maxrss, cmaxrss;
566940389b8SAndrea Righi 	struct task_io_accounting ioac;
5671da177e4SLinus Torvalds 
5681da177e4SLinus Torvalds 	/*
56932bd671dSPeter Zijlstra 	 * Cumulative ns of schedule CPU time fo dead threads in the
57032bd671dSPeter Zijlstra 	 * group, not including a zombie group leader, (This only differs
57132bd671dSPeter Zijlstra 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
57232bd671dSPeter Zijlstra 	 * other than jiffies.)
57332bd671dSPeter Zijlstra 	 */
57432bd671dSPeter Zijlstra 	unsigned long long sum_sched_runtime;
57532bd671dSPeter Zijlstra 
57632bd671dSPeter Zijlstra 	/*
5771da177e4SLinus Torvalds 	 * We don't bother to synchronize most readers of this at all,
5781da177e4SLinus Torvalds 	 * because there is no reader checking a limit that actually needs
5791da177e4SLinus Torvalds 	 * to get both rlim_cur and rlim_max atomically, and either one
5801da177e4SLinus Torvalds 	 * alone is a single word that can safely be read normally.
5811da177e4SLinus Torvalds 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
5821da177e4SLinus Torvalds 	 * protect this instead of the siglock, because they really
5831da177e4SLinus Torvalds 	 * have no need to disable irqs.
5841da177e4SLinus Torvalds 	 */
5851da177e4SLinus Torvalds 	struct rlimit rlim[RLIM_NLIMITS];
5861da177e4SLinus Torvalds 
5870e464814SKaiGai Kohei #ifdef CONFIG_BSD_PROCESS_ACCT
5880e464814SKaiGai Kohei 	struct pacct_struct pacct;	/* per-process accounting information */
5890e464814SKaiGai Kohei #endif
590ad4ecbcbSShailabh Nagar #ifdef CONFIG_TASKSTATS
591ad4ecbcbSShailabh Nagar 	struct taskstats *stats;
592ad4ecbcbSShailabh Nagar #endif
593522ed776SMiloslav Trmac #ifdef CONFIG_AUDIT
594522ed776SMiloslav Trmac 	unsigned audit_tty;
59546e959eaSRichard Guy Briggs 	unsigned audit_tty_log_passwd;
596522ed776SMiloslav Trmac 	struct tty_audit_buf *tty_audit_buf;
597522ed776SMiloslav Trmac #endif
5984714d1d3SBen Blum #ifdef CONFIG_CGROUPS
5994714d1d3SBen Blum 	/*
60077e4ef99STejun Heo 	 * group_rwsem prevents new tasks from entering the threadgroup and
60177e4ef99STejun Heo 	 * member tasks from exiting,a more specifically, setting of
60277e4ef99STejun Heo 	 * PF_EXITING.  fork and exit paths are protected with this rwsem
60377e4ef99STejun Heo 	 * using threadgroup_change_begin/end().  Users which require
60477e4ef99STejun Heo 	 * threadgroup to remain stable should use threadgroup_[un]lock()
60577e4ef99STejun Heo 	 * which also takes care of exec path.  Currently, cgroup is the
60677e4ef99STejun Heo 	 * only user.
6074714d1d3SBen Blum 	 */
608257058aeSTejun Heo 	struct rw_semaphore group_rwsem;
6094714d1d3SBen Blum #endif
61028b83c51SKOSAKI Motohiro 
611e1e12d2fSDavid Rientjes 	oom_flags_t oom_flags;
612a9c58b90SDavid Rientjes 	short oom_score_adj;		/* OOM kill score adjustment */
613a9c58b90SDavid Rientjes 	short oom_score_adj_min;	/* OOM kill score adjustment min value.
614dabb16f6SMandeep Singh Baines 					 * Only settable by CAP_SYS_RESOURCE. */
6159b1bf12dSKOSAKI Motohiro 
6169b1bf12dSKOSAKI Motohiro 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
6179b1bf12dSKOSAKI Motohiro 					 * credential calculations
6189b1bf12dSKOSAKI Motohiro 					 * (notably. ptrace) */
6191da177e4SLinus Torvalds };
6201da177e4SLinus Torvalds 
6211da177e4SLinus Torvalds /*
6221da177e4SLinus Torvalds  * Bits in flags field of signal_struct.
6231da177e4SLinus Torvalds  */
6241da177e4SLinus Torvalds #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
625ee77f075SOleg Nesterov #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
626ee77f075SOleg Nesterov #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
627403bad72SOleg Nesterov #define SIGNAL_GROUP_COREDUMP	0x00000008 /* coredump in progress */
628e4420551SOleg Nesterov /*
629e4420551SOleg Nesterov  * Pending notifications to parent.
630e4420551SOleg Nesterov  */
631e4420551SOleg Nesterov #define SIGNAL_CLD_STOPPED	0x00000010
632e4420551SOleg Nesterov #define SIGNAL_CLD_CONTINUED	0x00000020
633e4420551SOleg Nesterov #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
6341da177e4SLinus Torvalds 
635fae5fa44SOleg Nesterov #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
636fae5fa44SOleg Nesterov 
637ed5d2cacSOleg Nesterov /* If true, all threads except ->group_exit_task have pending SIGKILL */
638ed5d2cacSOleg Nesterov static inline int signal_group_exit(const struct signal_struct *sig)
639ed5d2cacSOleg Nesterov {
640ed5d2cacSOleg Nesterov 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
641ed5d2cacSOleg Nesterov 		(sig->group_exit_task != NULL);
642ed5d2cacSOleg Nesterov }
643ed5d2cacSOleg Nesterov 
6441da177e4SLinus Torvalds /*
6451da177e4SLinus Torvalds  * Some day this will be a full-fledged user tracking system..
6461da177e4SLinus Torvalds  */
6471da177e4SLinus Torvalds struct user_struct {
6481da177e4SLinus Torvalds 	atomic_t __count;	/* reference count */
6491da177e4SLinus Torvalds 	atomic_t processes;	/* How many processes does this user have? */
6501da177e4SLinus Torvalds 	atomic_t files;		/* How many open files does this user have? */
6511da177e4SLinus Torvalds 	atomic_t sigpending;	/* How many pending signals does this user have? */
6522d9048e2SAmy Griffis #ifdef CONFIG_INOTIFY_USER
6530eeca283SRobert Love 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
6540eeca283SRobert Love 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
6550eeca283SRobert Love #endif
6564afeff85SEric Paris #ifdef CONFIG_FANOTIFY
6574afeff85SEric Paris 	atomic_t fanotify_listeners;
6584afeff85SEric Paris #endif
6597ef9964eSDavide Libenzi #ifdef CONFIG_EPOLL
66052bd19f7SRobin Holt 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
6617ef9964eSDavide Libenzi #endif
662970a8645SAlexey Dobriyan #ifdef CONFIG_POSIX_MQUEUE
6631da177e4SLinus Torvalds 	/* protected by mq_lock	*/
6641da177e4SLinus Torvalds 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
665970a8645SAlexey Dobriyan #endif
6661da177e4SLinus Torvalds 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
6671da177e4SLinus Torvalds 
6681da177e4SLinus Torvalds #ifdef CONFIG_KEYS
6691da177e4SLinus Torvalds 	struct key *uid_keyring;	/* UID specific keyring */
6701da177e4SLinus Torvalds 	struct key *session_keyring;	/* UID's default session keyring */
6711da177e4SLinus Torvalds #endif
6721da177e4SLinus Torvalds 
6731da177e4SLinus Torvalds 	/* Hash table maintenance information */
674735de223SPavel Emelyanov 	struct hlist_node uidhash_node;
6757b44ab97SEric W. Biederman 	kuid_t uid;
67624e377a8SSrivatsa Vaddagiri 
677cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
678789f90fcSPeter Zijlstra 	atomic_long_t locked_vm;
679789f90fcSPeter Zijlstra #endif
6801da177e4SLinus Torvalds };
6811da177e4SLinus Torvalds 
682eb41d946SKay Sievers extern int uids_sysfs_init(void);
6835cb350baSDhaval Giani 
6847b44ab97SEric W. Biederman extern struct user_struct *find_user(kuid_t);
6851da177e4SLinus Torvalds 
6861da177e4SLinus Torvalds extern struct user_struct root_user;
6871da177e4SLinus Torvalds #define INIT_USER (&root_user)
6881da177e4SLinus Torvalds 
689b6dff3ecSDavid Howells 
6901da177e4SLinus Torvalds struct backing_dev_info;
6911da177e4SLinus Torvalds struct reclaim_state;
6921da177e4SLinus Torvalds 
69352f17b6cSChandra Seetharaman #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
6941da177e4SLinus Torvalds struct sched_info {
6951da177e4SLinus Torvalds 	/* cumulative counters */
6962d72376bSIngo Molnar 	unsigned long pcount;	      /* # of times run on this cpu */
6979c2c4802SKen Chen 	unsigned long long run_delay; /* time spent waiting on a runqueue */
6981da177e4SLinus Torvalds 
6991da177e4SLinus Torvalds 	/* timestamps */
700172ba844SBalbir Singh 	unsigned long long last_arrival,/* when we last ran on a cpu */
7011da177e4SLinus Torvalds 			   last_queued;	/* when we were last queued to run */
7021da177e4SLinus Torvalds };
70352f17b6cSChandra Seetharaman #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
7041da177e4SLinus Torvalds 
705ca74e92bSShailabh Nagar #ifdef CONFIG_TASK_DELAY_ACCT
706ca74e92bSShailabh Nagar struct task_delay_info {
707ca74e92bSShailabh Nagar 	spinlock_t	lock;
708ca74e92bSShailabh Nagar 	unsigned int	flags;	/* Private per-task flags */
709ca74e92bSShailabh Nagar 
710ca74e92bSShailabh Nagar 	/* For each stat XXX, add following, aligned appropriately
711ca74e92bSShailabh Nagar 	 *
712ca74e92bSShailabh Nagar 	 * struct timespec XXX_start, XXX_end;
713ca74e92bSShailabh Nagar 	 * u64 XXX_delay;
714ca74e92bSShailabh Nagar 	 * u32 XXX_count;
715ca74e92bSShailabh Nagar 	 *
716ca74e92bSShailabh Nagar 	 * Atomicity of updates to XXX_delay, XXX_count protected by
717ca74e92bSShailabh Nagar 	 * single lock above (split into XXX_lock if contention is an issue).
718ca74e92bSShailabh Nagar 	 */
7190ff92245SShailabh Nagar 
7200ff92245SShailabh Nagar 	/*
7210ff92245SShailabh Nagar 	 * XXX_count is incremented on every XXX operation, the delay
7220ff92245SShailabh Nagar 	 * associated with the operation is added to XXX_delay.
7230ff92245SShailabh Nagar 	 * XXX_delay contains the accumulated delay time in nanoseconds.
7240ff92245SShailabh Nagar 	 */
7250ff92245SShailabh Nagar 	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
7260ff92245SShailabh Nagar 	u64 blkio_delay;	/* wait for sync block io completion */
7270ff92245SShailabh Nagar 	u64 swapin_delay;	/* wait for swapin block io completion */
7280ff92245SShailabh Nagar 	u32 blkio_count;	/* total count of the number of sync block */
7290ff92245SShailabh Nagar 				/* io operations performed */
7300ff92245SShailabh Nagar 	u32 swapin_count;	/* total count of the number of swapin block */
7310ff92245SShailabh Nagar 				/* io operations performed */
732873b4771SKeika Kobayashi 
733873b4771SKeika Kobayashi 	struct timespec freepages_start, freepages_end;
734873b4771SKeika Kobayashi 	u64 freepages_delay;	/* wait for memory reclaim */
735873b4771SKeika Kobayashi 	u32 freepages_count;	/* total count of memory reclaim */
736ca74e92bSShailabh Nagar };
73752f17b6cSChandra Seetharaman #endif	/* CONFIG_TASK_DELAY_ACCT */
73852f17b6cSChandra Seetharaman 
73952f17b6cSChandra Seetharaman static inline int sched_info_on(void)
74052f17b6cSChandra Seetharaman {
74152f17b6cSChandra Seetharaman #ifdef CONFIG_SCHEDSTATS
74252f17b6cSChandra Seetharaman 	return 1;
74352f17b6cSChandra Seetharaman #elif defined(CONFIG_TASK_DELAY_ACCT)
74452f17b6cSChandra Seetharaman 	extern int delayacct_on;
74552f17b6cSChandra Seetharaman 	return delayacct_on;
74652f17b6cSChandra Seetharaman #else
74752f17b6cSChandra Seetharaman 	return 0;
748ca74e92bSShailabh Nagar #endif
74952f17b6cSChandra Seetharaman }
750ca74e92bSShailabh Nagar 
751d15bcfdbSIngo Molnar enum cpu_idle_type {
752d15bcfdbSIngo Molnar 	CPU_IDLE,
753d15bcfdbSIngo Molnar 	CPU_NOT_IDLE,
754d15bcfdbSIngo Molnar 	CPU_NEWLY_IDLE,
755d15bcfdbSIngo Molnar 	CPU_MAX_IDLE_TYPES
7561da177e4SLinus Torvalds };
7571da177e4SLinus Torvalds 
7581da177e4SLinus Torvalds /*
7591399fa78SNikhil Rao  * Increase resolution of cpu_power calculations
7601399fa78SNikhil Rao  */
7611399fa78SNikhil Rao #define SCHED_POWER_SHIFT	10
7621399fa78SNikhil Rao #define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT)
7631da177e4SLinus Torvalds 
7641399fa78SNikhil Rao /*
7651399fa78SNikhil Rao  * sched-domains (multiprocessor balancing) declarations:
7661399fa78SNikhil Rao  */
7672dd73a4fSPeter Williams #ifdef CONFIG_SMP
768b5d978e0SPeter Zijlstra #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
769b5d978e0SPeter Zijlstra #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
770b5d978e0SPeter Zijlstra #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
771b5d978e0SPeter Zijlstra #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
772c88d5910SPeter Zijlstra #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
773b5d978e0SPeter Zijlstra #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
774b5d978e0SPeter Zijlstra #define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
775b5d978e0SPeter Zijlstra #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
776b5d978e0SPeter Zijlstra #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
777532cb4c4SMichael Neuling #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
778b5d978e0SPeter Zijlstra #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
779e3589f6cSPeter Zijlstra #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
7805c45bf27SSiddha, Suresh B 
781532cb4c4SMichael Neuling extern int __weak arch_sd_sibiling_asym_packing(void);
782532cb4c4SMichael Neuling 
7831d3504fcSHidetoshi Seto struct sched_domain_attr {
7841d3504fcSHidetoshi Seto 	int relax_domain_level;
7851d3504fcSHidetoshi Seto };
7861d3504fcSHidetoshi Seto 
7871d3504fcSHidetoshi Seto #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
7881d3504fcSHidetoshi Seto 	.relax_domain_level = -1,			\
7891d3504fcSHidetoshi Seto }
7901d3504fcSHidetoshi Seto 
79160495e77SPeter Zijlstra extern int sched_domain_level_max;
79260495e77SPeter Zijlstra 
7935e6521eaSLi Zefan struct sched_group;
7945e6521eaSLi Zefan 
7951da177e4SLinus Torvalds struct sched_domain {
7961da177e4SLinus Torvalds 	/* These fields must be setup */
7971da177e4SLinus Torvalds 	struct sched_domain *parent;	/* top domain must be null terminated */
7981a848870SSiddha, Suresh B 	struct sched_domain *child;	/* bottom domain must be null terminated */
7991da177e4SLinus Torvalds 	struct sched_group *groups;	/* the balancing groups of the domain */
8001da177e4SLinus Torvalds 	unsigned long min_interval;	/* Minimum balance interval ms */
8011da177e4SLinus Torvalds 	unsigned long max_interval;	/* Maximum balance interval ms */
8021da177e4SLinus Torvalds 	unsigned int busy_factor;	/* less balancing by factor if busy */
8031da177e4SLinus Torvalds 	unsigned int imbalance_pct;	/* No balance until over watermark */
8041da177e4SLinus Torvalds 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
8057897986bSNick Piggin 	unsigned int busy_idx;
8067897986bSNick Piggin 	unsigned int idle_idx;
8077897986bSNick Piggin 	unsigned int newidle_idx;
8087897986bSNick Piggin 	unsigned int wake_idx;
809147cbb4bSNick Piggin 	unsigned int forkexec_idx;
810a52bfd73SPeter Zijlstra 	unsigned int smt_gain;
81125f55d9dSVincent Guittot 
81225f55d9dSVincent Guittot 	int nohz_idle;			/* NOHZ IDLE status */
8131da177e4SLinus Torvalds 	int flags;			/* See SD_* */
81460495e77SPeter Zijlstra 	int level;
8151da177e4SLinus Torvalds 
8161da177e4SLinus Torvalds 	/* Runtime fields. */
8171da177e4SLinus Torvalds 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
8181da177e4SLinus Torvalds 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
8191da177e4SLinus Torvalds 	unsigned int nr_balance_failed; /* initialise to 0 */
8201da177e4SLinus Torvalds 
8212398f2c6SPeter Zijlstra 	u64 last_update;
822f48627e6SJason Low 
823f48627e6SJason Low 	/* idle_balance() stats */
8249bd721c5SJason Low 	u64 max_newidle_lb_cost;
825f48627e6SJason Low 	unsigned long next_decay_max_lb_cost;
8262398f2c6SPeter Zijlstra 
8271da177e4SLinus Torvalds #ifdef CONFIG_SCHEDSTATS
8281da177e4SLinus Torvalds 	/* load_balance() stats */
829480b9434SKen Chen 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
830480b9434SKen Chen 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
831480b9434SKen Chen 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
832480b9434SKen Chen 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
833480b9434SKen Chen 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
834480b9434SKen Chen 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
835480b9434SKen Chen 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
836480b9434SKen Chen 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
8371da177e4SLinus Torvalds 
8381da177e4SLinus Torvalds 	/* Active load balancing */
839480b9434SKen Chen 	unsigned int alb_count;
840480b9434SKen Chen 	unsigned int alb_failed;
841480b9434SKen Chen 	unsigned int alb_pushed;
8421da177e4SLinus Torvalds 
84368767a0aSNick Piggin 	/* SD_BALANCE_EXEC stats */
844480b9434SKen Chen 	unsigned int sbe_count;
845480b9434SKen Chen 	unsigned int sbe_balanced;
846480b9434SKen Chen 	unsigned int sbe_pushed;
8471da177e4SLinus Torvalds 
84868767a0aSNick Piggin 	/* SD_BALANCE_FORK stats */
849480b9434SKen Chen 	unsigned int sbf_count;
850480b9434SKen Chen 	unsigned int sbf_balanced;
851480b9434SKen Chen 	unsigned int sbf_pushed;
85268767a0aSNick Piggin 
8531da177e4SLinus Torvalds 	/* try_to_wake_up() stats */
854480b9434SKen Chen 	unsigned int ttwu_wake_remote;
855480b9434SKen Chen 	unsigned int ttwu_move_affine;
856480b9434SKen Chen 	unsigned int ttwu_move_balance;
8571da177e4SLinus Torvalds #endif
858a5d8c348SIngo Molnar #ifdef CONFIG_SCHED_DEBUG
859a5d8c348SIngo Molnar 	char *name;
860a5d8c348SIngo Molnar #endif
861dce840a0SPeter Zijlstra 	union {
862dce840a0SPeter Zijlstra 		void *private;		/* used during construction */
863dce840a0SPeter Zijlstra 		struct rcu_head rcu;	/* used during destruction */
864dce840a0SPeter Zijlstra 	};
8656c99e9adSRusty Russell 
866669c55e9SPeter Zijlstra 	unsigned int span_weight;
8674200efd9SIngo Molnar 	/*
8684200efd9SIngo Molnar 	 * Span of all CPUs in this domain.
8694200efd9SIngo Molnar 	 *
8704200efd9SIngo Molnar 	 * NOTE: this field is variable length. (Allocated dynamically
8714200efd9SIngo Molnar 	 * by attaching extra space to the end of the structure,
8724200efd9SIngo Molnar 	 * depending on how many CPUs the kernel has booted up with)
8734200efd9SIngo Molnar 	 */
8744200efd9SIngo Molnar 	unsigned long span[0];
8751da177e4SLinus Torvalds };
8761da177e4SLinus Torvalds 
877758b2cdcSRusty Russell static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
878758b2cdcSRusty Russell {
8796c99e9adSRusty Russell 	return to_cpumask(sd->span);
880758b2cdcSRusty Russell }
881758b2cdcSRusty Russell 
882acc3f5d7SRusty Russell extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
8831d3504fcSHidetoshi Seto 				    struct sched_domain_attr *dattr_new);
884029190c5SPaul Jackson 
885acc3f5d7SRusty Russell /* Allocate an array of sched domains, for partition_sched_domains(). */
886acc3f5d7SRusty Russell cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
887acc3f5d7SRusty Russell void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
888acc3f5d7SRusty Russell 
88939be3501SPeter Zijlstra bool cpus_share_cache(int this_cpu, int that_cpu);
89039be3501SPeter Zijlstra 
8911b427c15SIngo Molnar #else /* CONFIG_SMP */
8921da177e4SLinus Torvalds 
8931b427c15SIngo Molnar struct sched_domain_attr;
8941b427c15SIngo Molnar 
8951b427c15SIngo Molnar static inline void
896acc3f5d7SRusty Russell partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
8971b427c15SIngo Molnar 			struct sched_domain_attr *dattr_new)
898d02c7a8cSCon Kolivas {
899d02c7a8cSCon Kolivas }
90039be3501SPeter Zijlstra 
90139be3501SPeter Zijlstra static inline bool cpus_share_cache(int this_cpu, int that_cpu)
90239be3501SPeter Zijlstra {
90339be3501SPeter Zijlstra 	return true;
90439be3501SPeter Zijlstra }
90539be3501SPeter Zijlstra 
9061b427c15SIngo Molnar #endif	/* !CONFIG_SMP */
9071da177e4SLinus Torvalds 
90847fe38fcSPeter Zijlstra 
9091da177e4SLinus Torvalds struct io_context;			/* See blkdev.h */
9101da177e4SLinus Torvalds 
9111da177e4SLinus Torvalds 
912383f2835SChen, Kenneth W #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
91336c8b586SIngo Molnar extern void prefetch_stack(struct task_struct *t);
914383f2835SChen, Kenneth W #else
915383f2835SChen, Kenneth W static inline void prefetch_stack(struct task_struct *t) { }
916383f2835SChen, Kenneth W #endif
9171da177e4SLinus Torvalds 
9181da177e4SLinus Torvalds struct audit_context;		/* See audit.c */
9191da177e4SLinus Torvalds struct mempolicy;
920b92ce558SJens Axboe struct pipe_inode_info;
9214865ecf1SSerge E. Hallyn struct uts_namespace;
9221da177e4SLinus Torvalds 
92320b8a59fSIngo Molnar struct load_weight {
92420b8a59fSIngo Molnar 	unsigned long weight, inv_weight;
92520b8a59fSIngo Molnar };
92620b8a59fSIngo Molnar 
9279d85f21cSPaul Turner struct sched_avg {
9289d85f21cSPaul Turner 	/*
9299d85f21cSPaul Turner 	 * These sums represent an infinite geometric series and so are bound
930239003eaSKamalesh Babulal 	 * above by 1024/(1-y).  Thus we only need a u32 to store them for all
9319d85f21cSPaul Turner 	 * choices of y < 1-2^(-32)*1024.
9329d85f21cSPaul Turner 	 */
9339d85f21cSPaul Turner 	u32 runnable_avg_sum, runnable_avg_period;
9349d85f21cSPaul Turner 	u64 last_runnable_update;
9359ee474f5SPaul Turner 	s64 decay_count;
9362dac754eSPaul Turner 	unsigned long load_avg_contrib;
9379d85f21cSPaul Turner };
9389d85f21cSPaul Turner 
93994c18227SIngo Molnar #ifdef CONFIG_SCHEDSTATS
94041acab88SLucas De Marchi struct sched_statistics {
94194c18227SIngo Molnar 	u64			wait_start;
94294c18227SIngo Molnar 	u64			wait_max;
9436d082592SArjan van de Ven 	u64			wait_count;
9446d082592SArjan van de Ven 	u64			wait_sum;
9458f0dfc34SArjan van de Ven 	u64			iowait_count;
9468f0dfc34SArjan van de Ven 	u64			iowait_sum;
94794c18227SIngo Molnar 
94894c18227SIngo Molnar 	u64			sleep_start;
94920b8a59fSIngo Molnar 	u64			sleep_max;
95094c18227SIngo Molnar 	s64			sum_sleep_runtime;
95194c18227SIngo Molnar 
95294c18227SIngo Molnar 	u64			block_start;
95320b8a59fSIngo Molnar 	u64			block_max;
95420b8a59fSIngo Molnar 	u64			exec_max;
955eba1ed4bSIngo Molnar 	u64			slice_max;
956cc367732SIngo Molnar 
957cc367732SIngo Molnar 	u64			nr_migrations_cold;
958cc367732SIngo Molnar 	u64			nr_failed_migrations_affine;
959cc367732SIngo Molnar 	u64			nr_failed_migrations_running;
960cc367732SIngo Molnar 	u64			nr_failed_migrations_hot;
961cc367732SIngo Molnar 	u64			nr_forced_migrations;
962cc367732SIngo Molnar 
963cc367732SIngo Molnar 	u64			nr_wakeups;
964cc367732SIngo Molnar 	u64			nr_wakeups_sync;
965cc367732SIngo Molnar 	u64			nr_wakeups_migrate;
966cc367732SIngo Molnar 	u64			nr_wakeups_local;
967cc367732SIngo Molnar 	u64			nr_wakeups_remote;
968cc367732SIngo Molnar 	u64			nr_wakeups_affine;
969cc367732SIngo Molnar 	u64			nr_wakeups_affine_attempts;
970cc367732SIngo Molnar 	u64			nr_wakeups_passive;
971cc367732SIngo Molnar 	u64			nr_wakeups_idle;
97241acab88SLucas De Marchi };
97341acab88SLucas De Marchi #endif
97441acab88SLucas De Marchi 
97541acab88SLucas De Marchi struct sched_entity {
97641acab88SLucas De Marchi 	struct load_weight	load;		/* for load-balancing */
97741acab88SLucas De Marchi 	struct rb_node		run_node;
97841acab88SLucas De Marchi 	struct list_head	group_node;
97941acab88SLucas De Marchi 	unsigned int		on_rq;
98041acab88SLucas De Marchi 
98141acab88SLucas De Marchi 	u64			exec_start;
98241acab88SLucas De Marchi 	u64			sum_exec_runtime;
98341acab88SLucas De Marchi 	u64			vruntime;
98441acab88SLucas De Marchi 	u64			prev_sum_exec_runtime;
98541acab88SLucas De Marchi 
98641acab88SLucas De Marchi 	u64			nr_migrations;
98741acab88SLucas De Marchi 
98841acab88SLucas De Marchi #ifdef CONFIG_SCHEDSTATS
98941acab88SLucas De Marchi 	struct sched_statistics statistics;
99094c18227SIngo Molnar #endif
99194c18227SIngo Molnar 
99220b8a59fSIngo Molnar #ifdef CONFIG_FAIR_GROUP_SCHED
99320b8a59fSIngo Molnar 	struct sched_entity	*parent;
99420b8a59fSIngo Molnar 	/* rq on which this entity is (to be) queued: */
99520b8a59fSIngo Molnar 	struct cfs_rq		*cfs_rq;
99620b8a59fSIngo Molnar 	/* rq "owned" by this entity/group: */
99720b8a59fSIngo Molnar 	struct cfs_rq		*my_q;
99820b8a59fSIngo Molnar #endif
9998bd75c77SClark Williams 
1000141965c7SAlex Shi #ifdef CONFIG_SMP
1001f4e26b12SPaul Turner 	/* Per-entity load-tracking */
10029d85f21cSPaul Turner 	struct sched_avg	avg;
10039d85f21cSPaul Turner #endif
100420b8a59fSIngo Molnar };
100570b97a7fSIngo Molnar 
1006fa717060SPeter Zijlstra struct sched_rt_entity {
1007fa717060SPeter Zijlstra 	struct list_head run_list;
100878f2c7dbSPeter Zijlstra 	unsigned long timeout;
100957d2aa00SYing Xue 	unsigned long watchdog_stamp;
1010bee367edSRichard Kennedy 	unsigned int time_slice;
10116f505b16SPeter Zijlstra 
101258d6c2d7SPeter Zijlstra 	struct sched_rt_entity *back;
1013052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
10146f505b16SPeter Zijlstra 	struct sched_rt_entity	*parent;
10156f505b16SPeter Zijlstra 	/* rq on which this entity is (to be) queued: */
10166f505b16SPeter Zijlstra 	struct rt_rq		*rt_rq;
10176f505b16SPeter Zijlstra 	/* rq "owned" by this entity/group: */
10186f505b16SPeter Zijlstra 	struct rt_rq		*my_q;
10196f505b16SPeter Zijlstra #endif
1020fa717060SPeter Zijlstra };
1021fa717060SPeter Zijlstra 
10228bd75c77SClark Williams 
102386848966SPaul E. McKenney struct rcu_node;
102486848966SPaul E. McKenney 
10258dc85d54SPeter Zijlstra enum perf_event_task_context {
10268dc85d54SPeter Zijlstra 	perf_invalid_context = -1,
10278dc85d54SPeter Zijlstra 	perf_hw_context = 0,
102889a1e187SPeter Zijlstra 	perf_sw_context,
10298dc85d54SPeter Zijlstra 	perf_nr_task_contexts,
10308dc85d54SPeter Zijlstra };
10318dc85d54SPeter Zijlstra 
10321da177e4SLinus Torvalds struct task_struct {
10331da177e4SLinus Torvalds 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1034f7e4217bSRoman Zippel 	void *stack;
10351da177e4SLinus Torvalds 	atomic_t usage;
103697dc32cdSWilliam Cohen 	unsigned int flags;	/* per process flags, defined below */
103797dc32cdSWilliam Cohen 	unsigned int ptrace;
10381da177e4SLinus Torvalds 
10392dd73a4fSPeter Williams #ifdef CONFIG_SMP
1040fa14ff4aSPeter Zijlstra 	struct llist_node wake_entry;
10413ca7a440SPeter Zijlstra 	int on_cpu;
104262470419SMichael Wang 	struct task_struct *last_wakee;
104362470419SMichael Wang 	unsigned long wakee_flips;
104462470419SMichael Wang 	unsigned long wakee_flip_decay_ts;
10454866cde0SNick Piggin #endif
1046fd2f4419SPeter Zijlstra 	int on_rq;
104750e645a8SIngo Molnar 
1048b29739f9SIngo Molnar 	int prio, static_prio, normal_prio;
1049c7aceabaSRichard Kennedy 	unsigned int rt_priority;
10505522d5d5SIngo Molnar 	const struct sched_class *sched_class;
105120b8a59fSIngo Molnar 	struct sched_entity se;
1052fa717060SPeter Zijlstra 	struct sched_rt_entity rt;
10538323f26cSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
10548323f26cSPeter Zijlstra 	struct task_group *sched_task_group;
10558323f26cSPeter Zijlstra #endif
10561da177e4SLinus Torvalds 
1057e107be36SAvi Kivity #ifdef CONFIG_PREEMPT_NOTIFIERS
1058e107be36SAvi Kivity 	/* list of struct preempt_notifier: */
1059e107be36SAvi Kivity 	struct hlist_head preempt_notifiers;
1060e107be36SAvi Kivity #endif
1061e107be36SAvi Kivity 
106218796aa0SAlexey Dobriyan 	/*
106318796aa0SAlexey Dobriyan 	 * fpu_counter contains the number of consecutive context switches
106418796aa0SAlexey Dobriyan 	 * that the FPU is used. If this is over a threshold, the lazy fpu
106518796aa0SAlexey Dobriyan 	 * saving becomes unlazy to save the trap. This is an unsigned char
106618796aa0SAlexey Dobriyan 	 * so that after 256 times the counter wraps and the behavior turns
106718796aa0SAlexey Dobriyan 	 * lazy again; this to deal with bursty apps that only use FPU for
106818796aa0SAlexey Dobriyan 	 * a short time
106918796aa0SAlexey Dobriyan 	 */
107018796aa0SAlexey Dobriyan 	unsigned char fpu_counter;
10716c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE
10722056a782SJens Axboe 	unsigned int btrace_seq;
10736c5c9341SAlexey Dobriyan #endif
10741da177e4SLinus Torvalds 
107597dc32cdSWilliam Cohen 	unsigned int policy;
107629baa747SPeter Zijlstra 	int nr_cpus_allowed;
10771da177e4SLinus Torvalds 	cpumask_t cpus_allowed;
10781da177e4SLinus Torvalds 
1079a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
1080e260be67SPaul E. McKenney 	int rcu_read_lock_nesting;
1081f41d911fSPaul E. McKenney 	char rcu_read_unlock_special;
1082f41d911fSPaul E. McKenney 	struct list_head rcu_node_entry;
1083a57eb940SPaul E. McKenney #endif /* #ifdef CONFIG_PREEMPT_RCU */
1084a57eb940SPaul E. McKenney #ifdef CONFIG_TREE_PREEMPT_RCU
1085a57eb940SPaul E. McKenney 	struct rcu_node *rcu_blocked_node;
1086f41d911fSPaul E. McKenney #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
108724278d14SPaul E. McKenney #ifdef CONFIG_RCU_BOOST
108824278d14SPaul E. McKenney 	struct rt_mutex *rcu_boost_mutex;
108924278d14SPaul E. McKenney #endif /* #ifdef CONFIG_RCU_BOOST */
1090e260be67SPaul E. McKenney 
109152f17b6cSChandra Seetharaman #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
10921da177e4SLinus Torvalds 	struct sched_info sched_info;
10931da177e4SLinus Torvalds #endif
10941da177e4SLinus Torvalds 
10951da177e4SLinus Torvalds 	struct list_head tasks;
1096806c09a7SDario Faggioli #ifdef CONFIG_SMP
1097917b627dSGregory Haskins 	struct plist_node pushable_tasks;
1098806c09a7SDario Faggioli #endif
10991da177e4SLinus Torvalds 
11001da177e4SLinus Torvalds 	struct mm_struct *mm, *active_mm;
11014471a675SJiri Kosina #ifdef CONFIG_COMPAT_BRK
11024471a675SJiri Kosina 	unsigned brk_randomized:1;
11034471a675SJiri Kosina #endif
110434e55232SKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING)
110534e55232SKAMEZAWA Hiroyuki 	struct task_rss_stat	rss_stat;
110634e55232SKAMEZAWA Hiroyuki #endif
11071da177e4SLinus Torvalds /* task state */
110897dc32cdSWilliam Cohen 	int exit_state;
11091da177e4SLinus Torvalds 	int exit_code, exit_signal;
11101da177e4SLinus Torvalds 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1111a8f072c1STejun Heo 	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
11129b89f6baSAndrei Epure 
11139b89f6baSAndrei Epure 	/* Used for emulating ABI behavior of previous Linux versions */
111497dc32cdSWilliam Cohen 	unsigned int personality;
11159b89f6baSAndrei Epure 
11161da177e4SLinus Torvalds 	unsigned did_exec:1;
1117f9ce1f1cSKentaro Takeda 	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1118f9ce1f1cSKentaro Takeda 				 * execve */
11198f0dfc34SArjan van de Ven 	unsigned in_iowait:1;
11208f0dfc34SArjan van de Ven 
1121259e5e6cSAndy Lutomirski 	/* task may not gain privileges */
1122259e5e6cSAndy Lutomirski 	unsigned no_new_privs:1;
1123ca94c442SLennart Poettering 
1124ca94c442SLennart Poettering 	/* Revert to default priority/policy when forking */
1125ca94c442SLennart Poettering 	unsigned sched_reset_on_fork:1;
1126a8e4f2eaSPeter Zijlstra 	unsigned sched_contributes_to_load:1;
1127ca94c442SLennart Poettering 
11281da177e4SLinus Torvalds 	pid_t pid;
11291da177e4SLinus Torvalds 	pid_t tgid;
11300a425405SArjan van de Ven 
11311314562aSHiroshi Shimamoto #ifdef CONFIG_CC_STACKPROTECTOR
11320a425405SArjan van de Ven 	/* Canary value for the -fstack-protector gcc feature */
11330a425405SArjan van de Ven 	unsigned long stack_canary;
11341314562aSHiroshi Shimamoto #endif
11351da177e4SLinus Torvalds 	/*
11361da177e4SLinus Torvalds 	 * pointers to (original) parent process, youngest child, younger sibling,
11371da177e4SLinus Torvalds 	 * older sibling, respectively.  (p->father can be replaced with
1138f470021aSRoland McGrath 	 * p->real_parent->pid)
11391da177e4SLinus Torvalds 	 */
1140abd63bc3SKees Cook 	struct task_struct __rcu *real_parent; /* real parent process */
1141abd63bc3SKees Cook 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
11421da177e4SLinus Torvalds 	/*
1143f470021aSRoland McGrath 	 * children/sibling forms the list of my natural children
11441da177e4SLinus Torvalds 	 */
11451da177e4SLinus Torvalds 	struct list_head children;	/* list of my children */
11461da177e4SLinus Torvalds 	struct list_head sibling;	/* linkage in my parent's children list */
11471da177e4SLinus Torvalds 	struct task_struct *group_leader;	/* threadgroup leader */
11481da177e4SLinus Torvalds 
1149f470021aSRoland McGrath 	/*
1150f470021aSRoland McGrath 	 * ptraced is the list of tasks this task is using ptrace on.
1151f470021aSRoland McGrath 	 * This includes both natural children and PTRACE_ATTACH targets.
1152f470021aSRoland McGrath 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1153f470021aSRoland McGrath 	 */
1154f470021aSRoland McGrath 	struct list_head ptraced;
1155f470021aSRoland McGrath 	struct list_head ptrace_entry;
1156f470021aSRoland McGrath 
11571da177e4SLinus Torvalds 	/* PID/PID hash table linkage. */
115892476d7fSEric W. Biederman 	struct pid_link pids[PIDTYPE_MAX];
115947e65328SOleg Nesterov 	struct list_head thread_group;
11601da177e4SLinus Torvalds 
11611da177e4SLinus Torvalds 	struct completion *vfork_done;		/* for vfork() */
11621da177e4SLinus Torvalds 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
11631da177e4SLinus Torvalds 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
11641da177e4SLinus Torvalds 
1165c66f08beSMichael Neuling 	cputime_t utime, stime, utimescaled, stimescaled;
11669ac52315SLaurent Vivier 	cputime_t gtime;
11679fbc42eaSFrederic Weisbecker #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1168d37f761dSFrederic Weisbecker 	struct cputime prev_cputime;
1169d99ca3b9SHidetoshi Seto #endif
11706a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
11716a61671bSFrederic Weisbecker 	seqlock_t vtime_seqlock;
11726a61671bSFrederic Weisbecker 	unsigned long long vtime_snap;
11736a61671bSFrederic Weisbecker 	enum {
11746a61671bSFrederic Weisbecker 		VTIME_SLEEPING = 0,
11756a61671bSFrederic Weisbecker 		VTIME_USER,
11766a61671bSFrederic Weisbecker 		VTIME_SYS,
11776a61671bSFrederic Weisbecker 	} vtime_snap_whence;
11786a61671bSFrederic Weisbecker #endif
11791da177e4SLinus Torvalds 	unsigned long nvcsw, nivcsw; /* context switch counts */
1180924b42d5STomas Janousek 	struct timespec start_time; 		/* monotonic time */
1181924b42d5STomas Janousek 	struct timespec real_start_time;	/* boot based time */
11821da177e4SLinus Torvalds /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
11831da177e4SLinus Torvalds 	unsigned long min_flt, maj_flt;
11841da177e4SLinus Torvalds 
1185f06febc9SFrank Mayhar 	struct task_cputime cputime_expires;
11861da177e4SLinus Torvalds 	struct list_head cpu_timers[3];
11871da177e4SLinus Torvalds 
11881da177e4SLinus Torvalds /* process credentials */
11891b0ba1c9SArnd Bergmann 	const struct cred __rcu *real_cred; /* objective and real subjective task
11903b11a1deSDavid Howells 					 * credentials (COW) */
11911b0ba1c9SArnd Bergmann 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
11923b11a1deSDavid Howells 					 * credentials (COW) */
119336772092SPaolo 'Blaisorblade' Giarrusso 	char comm[TASK_COMM_LEN]; /* executable name excluding path
119436772092SPaolo 'Blaisorblade' Giarrusso 				     - access with [gs]et_task_comm (which lock
119536772092SPaolo 'Blaisorblade' Giarrusso 				       it with task_lock())
1196221af7f8SLinus Torvalds 				     - initialized normally by setup_new_exec */
11971da177e4SLinus Torvalds /* file system info */
11981da177e4SLinus Torvalds 	int link_count, total_link_count;
11993d5b6fccSAlexey Dobriyan #ifdef CONFIG_SYSVIPC
12001da177e4SLinus Torvalds /* ipc stuff */
12011da177e4SLinus Torvalds 	struct sysv_sem sysvsem;
12023d5b6fccSAlexey Dobriyan #endif
1203e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK
120482a1fcb9SIngo Molnar /* hung task detection */
120582a1fcb9SIngo Molnar 	unsigned long last_switch_count;
120682a1fcb9SIngo Molnar #endif
12071da177e4SLinus Torvalds /* CPU-specific state of this task */
12081da177e4SLinus Torvalds 	struct thread_struct thread;
12091da177e4SLinus Torvalds /* filesystem information */
12101da177e4SLinus Torvalds 	struct fs_struct *fs;
12111da177e4SLinus Torvalds /* open file information */
12121da177e4SLinus Torvalds 	struct files_struct *files;
12131651e14eSSerge E. Hallyn /* namespaces */
1214ab516013SSerge E. Hallyn 	struct nsproxy *nsproxy;
12151da177e4SLinus Torvalds /* signal handlers */
12161da177e4SLinus Torvalds 	struct signal_struct *signal;
12171da177e4SLinus Torvalds 	struct sighand_struct *sighand;
12181da177e4SLinus Torvalds 
12191da177e4SLinus Torvalds 	sigset_t blocked, real_blocked;
1220f3de272bSRoland McGrath 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
12211da177e4SLinus Torvalds 	struct sigpending pending;
12221da177e4SLinus Torvalds 
12231da177e4SLinus Torvalds 	unsigned long sas_ss_sp;
12241da177e4SLinus Torvalds 	size_t sas_ss_size;
12251da177e4SLinus Torvalds 	int (*notifier)(void *priv);
12261da177e4SLinus Torvalds 	void *notifier_data;
12271da177e4SLinus Torvalds 	sigset_t *notifier_mask;
122867d12145SAl Viro 	struct callback_head *task_works;
1229e73f8959SOleg Nesterov 
12301da177e4SLinus Torvalds 	struct audit_context *audit_context;
1231bfef93a5SAl Viro #ifdef CONFIG_AUDITSYSCALL
1232e1760bd5SEric W. Biederman 	kuid_t loginuid;
12334746ec5bSEric Paris 	unsigned int sessionid;
1234bfef93a5SAl Viro #endif
1235932ecebbSWill Drewry 	struct seccomp seccomp;
12361da177e4SLinus Torvalds 
12371da177e4SLinus Torvalds /* Thread group tracking */
12381da177e4SLinus Torvalds    	u32 parent_exec_id;
12391da177e4SLinus Torvalds    	u32 self_exec_id;
124058568d2aSMiao Xie /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
124158568d2aSMiao Xie  * mempolicy */
12421da177e4SLinus Torvalds 	spinlock_t alloc_lock;
12431da177e4SLinus Torvalds 
1244b29739f9SIngo Molnar 	/* Protection of the PI data structures: */
12451d615482SThomas Gleixner 	raw_spinlock_t pi_lock;
1246b29739f9SIngo Molnar 
124723f78d4aSIngo Molnar #ifdef CONFIG_RT_MUTEXES
124823f78d4aSIngo Molnar 	/* PI waiters blocked on a rt_mutex held by this task */
124923f78d4aSIngo Molnar 	struct plist_head pi_waiters;
125023f78d4aSIngo Molnar 	/* Deadlock detection and priority inheritance handling */
125123f78d4aSIngo Molnar 	struct rt_mutex_waiter *pi_blocked_on;
125223f78d4aSIngo Molnar #endif
125323f78d4aSIngo Molnar 
1254408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES
1255408894eeSIngo Molnar 	/* mutex deadlock detection */
1256408894eeSIngo Molnar 	struct mutex_waiter *blocked_on;
1257408894eeSIngo Molnar #endif
1258de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS
1259de30a2b3SIngo Molnar 	unsigned int irq_events;
1260de30a2b3SIngo Molnar 	unsigned long hardirq_enable_ip;
1261de30a2b3SIngo Molnar 	unsigned long hardirq_disable_ip;
1262fa1452e8SHiroshi Shimamoto 	unsigned int hardirq_enable_event;
1263de30a2b3SIngo Molnar 	unsigned int hardirq_disable_event;
1264fa1452e8SHiroshi Shimamoto 	int hardirqs_enabled;
1265de30a2b3SIngo Molnar 	int hardirq_context;
1266fa1452e8SHiroshi Shimamoto 	unsigned long softirq_disable_ip;
1267fa1452e8SHiroshi Shimamoto 	unsigned long softirq_enable_ip;
1268fa1452e8SHiroshi Shimamoto 	unsigned int softirq_disable_event;
1269fa1452e8SHiroshi Shimamoto 	unsigned int softirq_enable_event;
1270fa1452e8SHiroshi Shimamoto 	int softirqs_enabled;
1271de30a2b3SIngo Molnar 	int softirq_context;
1272de30a2b3SIngo Molnar #endif
1273fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP
1274bdb9441eSPeter Zijlstra # define MAX_LOCK_DEPTH 48UL
1275fbb9ce95SIngo Molnar 	u64 curr_chain_key;
1276fbb9ce95SIngo Molnar 	int lockdep_depth;
1277fbb9ce95SIngo Molnar 	unsigned int lockdep_recursion;
1278c7aceabaSRichard Kennedy 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1279cf40bd16SNick Piggin 	gfp_t lockdep_reclaim_gfp;
1280fbb9ce95SIngo Molnar #endif
1281408894eeSIngo Molnar 
12821da177e4SLinus Torvalds /* journalling filesystem info */
12831da177e4SLinus Torvalds 	void *journal_info;
12841da177e4SLinus Torvalds 
1285d89d8796SNeil Brown /* stacked block device info */
1286bddd87c7SAkinobu Mita 	struct bio_list *bio_list;
1287d89d8796SNeil Brown 
128873c10101SJens Axboe #ifdef CONFIG_BLOCK
128973c10101SJens Axboe /* stack plugging */
129073c10101SJens Axboe 	struct blk_plug *plug;
129173c10101SJens Axboe #endif
129273c10101SJens Axboe 
12931da177e4SLinus Torvalds /* VM state */
12941da177e4SLinus Torvalds 	struct reclaim_state *reclaim_state;
12951da177e4SLinus Torvalds 
12961da177e4SLinus Torvalds 	struct backing_dev_info *backing_dev_info;
12971da177e4SLinus Torvalds 
12981da177e4SLinus Torvalds 	struct io_context *io_context;
12991da177e4SLinus Torvalds 
13001da177e4SLinus Torvalds 	unsigned long ptrace_message;
13011da177e4SLinus Torvalds 	siginfo_t *last_siginfo; /* For ptrace use.  */
13027c3ab738SAndrew Morton 	struct task_io_accounting ioac;
13038f0ab514SJay Lan #if defined(CONFIG_TASK_XACCT)
13041da177e4SLinus Torvalds 	u64 acct_rss_mem1;	/* accumulated rss usage */
13051da177e4SLinus Torvalds 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
130649b5cf34SJonathan Lim 	cputime_t acct_timexpd;	/* stime + utime since last update */
13071da177e4SLinus Torvalds #endif
13081da177e4SLinus Torvalds #ifdef CONFIG_CPUSETS
130958568d2aSMiao Xie 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1310cc9a6c87SMel Gorman 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1311825a46afSPaul Jackson 	int cpuset_mem_spread_rotor;
13126adef3ebSJack Steiner 	int cpuset_slab_spread_rotor;
13131da177e4SLinus Torvalds #endif
1314ddbcc7e8SPaul Menage #ifdef CONFIG_CGROUPS
1315817929ecSPaul Menage 	/* Control Group info protected by css_set_lock */
13162c392b8cSArnd Bergmann 	struct css_set __rcu *cgroups;
1317817929ecSPaul Menage 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1318817929ecSPaul Menage 	struct list_head cg_list;
1319ddbcc7e8SPaul Menage #endif
132042b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX
13210771dfefSIngo Molnar 	struct robust_list_head __user *robust_list;
132234f192c6SIngo Molnar #ifdef CONFIG_COMPAT
132334f192c6SIngo Molnar 	struct compat_robust_list_head __user *compat_robust_list;
132434f192c6SIngo Molnar #endif
1325c87e2837SIngo Molnar 	struct list_head pi_state_list;
1326c87e2837SIngo Molnar 	struct futex_pi_state *pi_state_cache;
132742b2dd0aSAlexey Dobriyan #endif
1328cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
13298dc85d54SPeter Zijlstra 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1330cdd6c482SIngo Molnar 	struct mutex perf_event_mutex;
1331cdd6c482SIngo Molnar 	struct list_head perf_event_list;
1332a63eaf34SPaul Mackerras #endif
1333c7aceabaSRichard Kennedy #ifdef CONFIG_NUMA
133458568d2aSMiao Xie 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1335c7aceabaSRichard Kennedy 	short il_next;
1336207205a2SEric Dumazet 	short pref_node_fork;
1337c7aceabaSRichard Kennedy #endif
1338cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
1339cbee9f88SPeter Zijlstra 	int numa_scan_seq;
1340cbee9f88SPeter Zijlstra 	int numa_migrate_seq;
1341cbee9f88SPeter Zijlstra 	unsigned int numa_scan_period;
1342598f0ec0SMel Gorman 	unsigned int numa_scan_period_max;
1343cbee9f88SPeter Zijlstra 	u64 node_stamp;			/* migration stamp  */
1344cbee9f88SPeter Zijlstra 	struct callback_head numa_work;
1345*f809ca9aSMel Gorman 
1346*f809ca9aSMel Gorman 	unsigned long *numa_faults;
1347cbee9f88SPeter Zijlstra #endif /* CONFIG_NUMA_BALANCING */
1348cbee9f88SPeter Zijlstra 
1349e56d0903SIngo Molnar 	struct rcu_head rcu;
1350b92ce558SJens Axboe 
1351b92ce558SJens Axboe 	/*
1352b92ce558SJens Axboe 	 * cache last used pipe for splice
1353b92ce558SJens Axboe 	 */
1354b92ce558SJens Axboe 	struct pipe_inode_info *splice_pipe;
13555640f768SEric Dumazet 
13565640f768SEric Dumazet 	struct page_frag task_frag;
13575640f768SEric Dumazet 
1358ca74e92bSShailabh Nagar #ifdef	CONFIG_TASK_DELAY_ACCT
1359ca74e92bSShailabh Nagar 	struct task_delay_info *delays;
1360ca74e92bSShailabh Nagar #endif
1361f4f154fdSAkinobu Mita #ifdef CONFIG_FAULT_INJECTION
1362f4f154fdSAkinobu Mita 	int make_it_fail;
1363f4f154fdSAkinobu Mita #endif
13649d823e8fSWu Fengguang 	/*
13659d823e8fSWu Fengguang 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
13669d823e8fSWu Fengguang 	 * balance_dirty_pages() for some dirty throttling pause
13679d823e8fSWu Fengguang 	 */
13689d823e8fSWu Fengguang 	int nr_dirtied;
13699d823e8fSWu Fengguang 	int nr_dirtied_pause;
137083712358SWu Fengguang 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
13719d823e8fSWu Fengguang 
13729745512cSArjan van de Ven #ifdef CONFIG_LATENCYTOP
13739745512cSArjan van de Ven 	int latency_record_count;
13749745512cSArjan van de Ven 	struct latency_record latency_record[LT_SAVECOUNT];
13759745512cSArjan van de Ven #endif
13766976675dSArjan van de Ven 	/*
13776976675dSArjan van de Ven 	 * time slack values; these are used to round up poll() and
13786976675dSArjan van de Ven 	 * select() etc timeout values. These are in nanoseconds.
13796976675dSArjan van de Ven 	 */
13806976675dSArjan van de Ven 	unsigned long timer_slack_ns;
13816976675dSArjan van de Ven 	unsigned long default_timer_slack_ns;
1382f8d570a4SDavid Miller 
1383fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
13843ad2f3fbSDaniel Mack 	/* Index of current stored address in ret_stack */
1385f201ae23SFrederic Weisbecker 	int curr_ret_stack;
1386f201ae23SFrederic Weisbecker 	/* Stack of return addresses for return function tracing */
1387f201ae23SFrederic Weisbecker 	struct ftrace_ret_stack	*ret_stack;
13888aef2d28SSteven Rostedt 	/* time stamp for last schedule */
13898aef2d28SSteven Rostedt 	unsigned long long ftrace_timestamp;
1390f201ae23SFrederic Weisbecker 	/*
1391f201ae23SFrederic Weisbecker 	 * Number of functions that haven't been traced
1392f201ae23SFrederic Weisbecker 	 * because of depth overrun.
1393f201ae23SFrederic Weisbecker 	 */
1394f201ae23SFrederic Weisbecker 	atomic_t trace_overrun;
1395380c4b14SFrederic Weisbecker 	/* Pause for the tracing */
1396380c4b14SFrederic Weisbecker 	atomic_t tracing_graph_pause;
1397f201ae23SFrederic Weisbecker #endif
1398ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING
1399ea4e2bc4SSteven Rostedt 	/* state flags for use by tracers */
1400ea4e2bc4SSteven Rostedt 	unsigned long trace;
1401b1cff0adSSteven Rostedt 	/* bitmask and counter of trace recursion */
1402261842b7SSteven Rostedt 	unsigned long trace_recursion;
1403261842b7SSteven Rostedt #endif /* CONFIG_TRACING */
1404c255a458SAndrew Morton #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
1405569b846dSKAMEZAWA Hiroyuki 	struct memcg_batch_info {
1406569b846dSKAMEZAWA Hiroyuki 		int do_batch;	/* incremented when batch uncharge started */
1407569b846dSKAMEZAWA Hiroyuki 		struct mem_cgroup *memcg; /* target memcg of uncharge */
14087ffd4ca7SJohannes Weiner 		unsigned long nr_pages;	/* uncharged usage */
14097ffd4ca7SJohannes Weiner 		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1410569b846dSKAMEZAWA Hiroyuki 	} memcg_batch;
14110e9d92f2SGlauber Costa 	unsigned int memcg_kmem_skip_account;
1412519e5247SJohannes Weiner 	struct memcg_oom_info {
1413519e5247SJohannes Weiner 		unsigned int may_oom:1;
14143812c8c8SJohannes Weiner 		unsigned int in_memcg_oom:1;
14153812c8c8SJohannes Weiner 		unsigned int oom_locked:1;
14163812c8c8SJohannes Weiner 		int wakeups;
14173812c8c8SJohannes Weiner 		struct mem_cgroup *wait_on_memcg;
1418519e5247SJohannes Weiner 	} memcg_oom;
1419569b846dSKAMEZAWA Hiroyuki #endif
14200326f5a9SSrikar Dronamraju #ifdef CONFIG_UPROBES
14210326f5a9SSrikar Dronamraju 	struct uprobe_task *utask;
14220326f5a9SSrikar Dronamraju #endif
1423cafe5635SKent Overstreet #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1424cafe5635SKent Overstreet 	unsigned int	sequential_io;
1425cafe5635SKent Overstreet 	unsigned int	sequential_io_avg;
1426cafe5635SKent Overstreet #endif
14271da177e4SLinus Torvalds };
14281da177e4SLinus Torvalds 
142976e6eee0SRusty Russell /* Future-safe accessor for struct task_struct's cpus_allowed. */
1430a4636818SRusty Russell #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
143176e6eee0SRusty Russell 
1432cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
1433b8593bfdSMel Gorman extern void task_numa_fault(int node, int pages, bool migrated);
14341a687c2eSMel Gorman extern void set_numabalancing_state(bool enabled);
1435cbee9f88SPeter Zijlstra #else
1436b8593bfdSMel Gorman static inline void task_numa_fault(int node, int pages, bool migrated)
1437cbee9f88SPeter Zijlstra {
1438cbee9f88SPeter Zijlstra }
14391a687c2eSMel Gorman static inline void set_numabalancing_state(bool enabled)
14401a687c2eSMel Gorman {
14411a687c2eSMel Gorman }
1442cbee9f88SPeter Zijlstra #endif
1443cbee9f88SPeter Zijlstra 
1444e868171aSAlexey Dobriyan static inline struct pid *task_pid(struct task_struct *task)
144522c935f4SEric W. Biederman {
144622c935f4SEric W. Biederman 	return task->pids[PIDTYPE_PID].pid;
144722c935f4SEric W. Biederman }
144822c935f4SEric W. Biederman 
1449e868171aSAlexey Dobriyan static inline struct pid *task_tgid(struct task_struct *task)
145022c935f4SEric W. Biederman {
145122c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PID].pid;
145222c935f4SEric W. Biederman }
145322c935f4SEric W. Biederman 
14546dda81f4SOleg Nesterov /*
14556dda81f4SOleg Nesterov  * Without tasklist or rcu lock it is not safe to dereference
14566dda81f4SOleg Nesterov  * the result of task_pgrp/task_session even if task == current,
14576dda81f4SOleg Nesterov  * we can race with another thread doing sys_setsid/sys_setpgid.
14586dda81f4SOleg Nesterov  */
1459e868171aSAlexey Dobriyan static inline struct pid *task_pgrp(struct task_struct *task)
146022c935f4SEric W. Biederman {
146122c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PGID].pid;
146222c935f4SEric W. Biederman }
146322c935f4SEric W. Biederman 
1464e868171aSAlexey Dobriyan static inline struct pid *task_session(struct task_struct *task)
146522c935f4SEric W. Biederman {
146622c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_SID].pid;
146722c935f4SEric W. Biederman }
146822c935f4SEric W. Biederman 
14697af57294SPavel Emelyanov struct pid_namespace;
14707af57294SPavel Emelyanov 
14717af57294SPavel Emelyanov /*
14727af57294SPavel Emelyanov  * the helpers to get the task's different pids as they are seen
14737af57294SPavel Emelyanov  * from various namespaces
14747af57294SPavel Emelyanov  *
14757af57294SPavel Emelyanov  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
147644c4e1b2SEric W. Biederman  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
147744c4e1b2SEric W. Biederman  *                     current.
14787af57294SPavel Emelyanov  * task_xid_nr_ns()  : id seen from the ns specified;
14797af57294SPavel Emelyanov  *
14807af57294SPavel Emelyanov  * set_task_vxid()   : assigns a virtual id to a task;
14817af57294SPavel Emelyanov  *
14827af57294SPavel Emelyanov  * see also pid_nr() etc in include/linux/pid.h
14837af57294SPavel Emelyanov  */
148452ee2dfdSOleg Nesterov pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
148552ee2dfdSOleg Nesterov 			struct pid_namespace *ns);
14867af57294SPavel Emelyanov 
1487e868171aSAlexey Dobriyan static inline pid_t task_pid_nr(struct task_struct *tsk)
14887af57294SPavel Emelyanov {
14897af57294SPavel Emelyanov 	return tsk->pid;
14907af57294SPavel Emelyanov }
14917af57294SPavel Emelyanov 
149252ee2dfdSOleg Nesterov static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
149352ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
149452ee2dfdSOleg Nesterov {
149552ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
149652ee2dfdSOleg Nesterov }
14977af57294SPavel Emelyanov 
14987af57294SPavel Emelyanov static inline pid_t task_pid_vnr(struct task_struct *tsk)
14997af57294SPavel Emelyanov {
150052ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
15017af57294SPavel Emelyanov }
15027af57294SPavel Emelyanov 
15037af57294SPavel Emelyanov 
1504e868171aSAlexey Dobriyan static inline pid_t task_tgid_nr(struct task_struct *tsk)
15057af57294SPavel Emelyanov {
15067af57294SPavel Emelyanov 	return tsk->tgid;
15077af57294SPavel Emelyanov }
15087af57294SPavel Emelyanov 
15092f2a3a46SPavel Emelyanov pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
15107af57294SPavel Emelyanov 
15117af57294SPavel Emelyanov static inline pid_t task_tgid_vnr(struct task_struct *tsk)
15127af57294SPavel Emelyanov {
15137af57294SPavel Emelyanov 	return pid_vnr(task_tgid(tsk));
15147af57294SPavel Emelyanov }
15157af57294SPavel Emelyanov 
15167af57294SPavel Emelyanov 
151752ee2dfdSOleg Nesterov static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
151852ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
15197af57294SPavel Emelyanov {
152052ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
15217af57294SPavel Emelyanov }
15227af57294SPavel Emelyanov 
15237af57294SPavel Emelyanov static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
15247af57294SPavel Emelyanov {
152552ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
15267af57294SPavel Emelyanov }
15277af57294SPavel Emelyanov 
15287af57294SPavel Emelyanov 
152952ee2dfdSOleg Nesterov static inline pid_t task_session_nr_ns(struct task_struct *tsk,
153052ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
15317af57294SPavel Emelyanov {
153252ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
15337af57294SPavel Emelyanov }
15347af57294SPavel Emelyanov 
15357af57294SPavel Emelyanov static inline pid_t task_session_vnr(struct task_struct *tsk)
15367af57294SPavel Emelyanov {
153752ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
15387af57294SPavel Emelyanov }
15397af57294SPavel Emelyanov 
15401b0f7ffdSOleg Nesterov /* obsolete, do not use */
15411b0f7ffdSOleg Nesterov static inline pid_t task_pgrp_nr(struct task_struct *tsk)
15421b0f7ffdSOleg Nesterov {
15431b0f7ffdSOleg Nesterov 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
15441b0f7ffdSOleg Nesterov }
15457af57294SPavel Emelyanov 
15461da177e4SLinus Torvalds /**
15471da177e4SLinus Torvalds  * pid_alive - check that a task structure is not stale
15481da177e4SLinus Torvalds  * @p: Task structure to be checked.
15491da177e4SLinus Torvalds  *
15501da177e4SLinus Torvalds  * Test if a process is not yet dead (at most zombie state)
15511da177e4SLinus Torvalds  * If pid_alive fails, then pointers within the task structure
15521da177e4SLinus Torvalds  * can be stale and must not be dereferenced.
1553e69f6186SYacine Belkadi  *
1554e69f6186SYacine Belkadi  * Return: 1 if the process is alive. 0 otherwise.
15551da177e4SLinus Torvalds  */
1556e868171aSAlexey Dobriyan static inline int pid_alive(struct task_struct *p)
15571da177e4SLinus Torvalds {
155892476d7fSEric W. Biederman 	return p->pids[PIDTYPE_PID].pid != NULL;
15591da177e4SLinus Torvalds }
15601da177e4SLinus Torvalds 
1561f400e198SSukadev Bhattiprolu /**
1562b460cbc5SSerge E. Hallyn  * is_global_init - check if a task structure is init
15633260259fSHenne  * @tsk: Task structure to be checked.
15643260259fSHenne  *
15653260259fSHenne  * Check if a task structure is the first user space task the kernel created.
1566e69f6186SYacine Belkadi  *
1567e69f6186SYacine Belkadi  * Return: 1 if the task structure is init. 0 otherwise.
1568f400e198SSukadev Bhattiprolu  */
1569e868171aSAlexey Dobriyan static inline int is_global_init(struct task_struct *tsk)
1570b461cc03SPavel Emelyanov {
1571b461cc03SPavel Emelyanov 	return tsk->pid == 1;
1572b461cc03SPavel Emelyanov }
1573b460cbc5SSerge E. Hallyn 
15749ec52099SCedric Le Goater extern struct pid *cad_pid;
15759ec52099SCedric Le Goater 
15761da177e4SLinus Torvalds extern void free_task(struct task_struct *tsk);
15771da177e4SLinus Torvalds #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1578e56d0903SIngo Molnar 
1579158d9ebdSAndrew Morton extern void __put_task_struct(struct task_struct *t);
1580e56d0903SIngo Molnar 
1581e56d0903SIngo Molnar static inline void put_task_struct(struct task_struct *t)
1582e56d0903SIngo Molnar {
1583e56d0903SIngo Molnar 	if (atomic_dec_and_test(&t->usage))
15848c7904a0SEric W. Biederman 		__put_task_struct(t);
1585e56d0903SIngo Molnar }
15861da177e4SLinus Torvalds 
15876a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
15886a61671bSFrederic Weisbecker extern void task_cputime(struct task_struct *t,
15896a61671bSFrederic Weisbecker 			 cputime_t *utime, cputime_t *stime);
15906a61671bSFrederic Weisbecker extern void task_cputime_scaled(struct task_struct *t,
15916a61671bSFrederic Weisbecker 				cputime_t *utimescaled, cputime_t *stimescaled);
15926a61671bSFrederic Weisbecker extern cputime_t task_gtime(struct task_struct *t);
15936a61671bSFrederic Weisbecker #else
15946fac4829SFrederic Weisbecker static inline void task_cputime(struct task_struct *t,
15956fac4829SFrederic Weisbecker 				cputime_t *utime, cputime_t *stime)
15966fac4829SFrederic Weisbecker {
15976fac4829SFrederic Weisbecker 	if (utime)
15986fac4829SFrederic Weisbecker 		*utime = t->utime;
15996fac4829SFrederic Weisbecker 	if (stime)
16006fac4829SFrederic Weisbecker 		*stime = t->stime;
16016fac4829SFrederic Weisbecker }
16026fac4829SFrederic Weisbecker 
16036fac4829SFrederic Weisbecker static inline void task_cputime_scaled(struct task_struct *t,
16046fac4829SFrederic Weisbecker 				       cputime_t *utimescaled,
16056fac4829SFrederic Weisbecker 				       cputime_t *stimescaled)
16066fac4829SFrederic Weisbecker {
16076fac4829SFrederic Weisbecker 	if (utimescaled)
16086fac4829SFrederic Weisbecker 		*utimescaled = t->utimescaled;
16096fac4829SFrederic Weisbecker 	if (stimescaled)
16106fac4829SFrederic Weisbecker 		*stimescaled = t->stimescaled;
16116fac4829SFrederic Weisbecker }
16126a61671bSFrederic Weisbecker 
16136a61671bSFrederic Weisbecker static inline cputime_t task_gtime(struct task_struct *t)
16146a61671bSFrederic Weisbecker {
16156a61671bSFrederic Weisbecker 	return t->gtime;
16166a61671bSFrederic Weisbecker }
16176a61671bSFrederic Weisbecker #endif
1618e80d0a1aSFrederic Weisbecker extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1619e80d0a1aSFrederic Weisbecker extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
162049048622SBalbir Singh 
16211da177e4SLinus Torvalds /*
16221da177e4SLinus Torvalds  * Per process flags
16231da177e4SLinus Torvalds  */
16241da177e4SLinus Torvalds #define PF_EXITING	0x00000004	/* getting shut down */
1625778e9a9cSAlexey Kuznetsov #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
162694886b84SLaurent Vivier #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
162721aa9af0STejun Heo #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
16281da177e4SLinus Torvalds #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
16294db96cf0SAndi Kleen #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
16301da177e4SLinus Torvalds #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
16311da177e4SLinus Torvalds #define PF_DUMPCORE	0x00000200	/* dumped core */
16321da177e4SLinus Torvalds #define PF_SIGNALED	0x00000400	/* killed by a signal */
16331da177e4SLinus Torvalds #define PF_MEMALLOC	0x00000800	/* Allocating memory */
163472fa5997SVasiliy Kulikov #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
16351da177e4SLinus Torvalds #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1636774a1221STejun Heo #define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
16371da177e4SLinus Torvalds #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
16381da177e4SLinus Torvalds #define PF_FROZEN	0x00010000	/* frozen for system suspend */
16391da177e4SLinus Torvalds #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
16401da177e4SLinus Torvalds #define PF_KSWAPD	0x00040000	/* I am kswapd */
164121caf2fcSMing Lei #define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
16421da177e4SLinus Torvalds #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1643246bb0b1SOleg Nesterov #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1644b31dc66aSJens Axboe #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1645b31dc66aSJens Axboe #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1646b31dc66aSJens Axboe #define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1647b31dc66aSJens Axboe #define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
164814a40ffcSTejun Heo #define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
16494db96cf0SAndi Kleen #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1650c61afb18SPaul Jackson #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
165161a87122SThomas Gleixner #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
165258a69cb4STejun Heo #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
16532b44c4dbSColin Cross #define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
16541da177e4SLinus Torvalds 
16551da177e4SLinus Torvalds /*
16561da177e4SLinus Torvalds  * Only the _current_ task can read/write to tsk->flags, but other
16571da177e4SLinus Torvalds  * tasks can access tsk->flags in readonly mode for example
16581da177e4SLinus Torvalds  * with tsk_used_math (like during threaded core dumping).
16591da177e4SLinus Torvalds  * There is however an exception to this rule during ptrace
16601da177e4SLinus Torvalds  * or during fork: the ptracer task is allowed to write to the
16611da177e4SLinus Torvalds  * child->flags of its traced child (same goes for fork, the parent
16621da177e4SLinus Torvalds  * can write to the child->flags), because we're guaranteed the
16631da177e4SLinus Torvalds  * child is not running and in turn not changing child->flags
16641da177e4SLinus Torvalds  * at the same time the parent does it.
16651da177e4SLinus Torvalds  */
16661da177e4SLinus Torvalds #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
16671da177e4SLinus Torvalds #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
16681da177e4SLinus Torvalds #define clear_used_math() clear_stopped_child_used_math(current)
16691da177e4SLinus Torvalds #define set_used_math() set_stopped_child_used_math(current)
16701da177e4SLinus Torvalds #define conditional_stopped_child_used_math(condition, child) \
16711da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
16721da177e4SLinus Torvalds #define conditional_used_math(condition) \
16731da177e4SLinus Torvalds 	conditional_stopped_child_used_math(condition, current)
16741da177e4SLinus Torvalds #define copy_to_stopped_child_used_math(child) \
16751da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
16761da177e4SLinus Torvalds /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
16771da177e4SLinus Torvalds #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
16781da177e4SLinus Torvalds #define used_math() tsk_used_math(current)
16791da177e4SLinus Torvalds 
168021caf2fcSMing Lei /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
168121caf2fcSMing Lei static inline gfp_t memalloc_noio_flags(gfp_t flags)
168221caf2fcSMing Lei {
168321caf2fcSMing Lei 	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
168421caf2fcSMing Lei 		flags &= ~__GFP_IO;
168521caf2fcSMing Lei 	return flags;
168621caf2fcSMing Lei }
168721caf2fcSMing Lei 
168821caf2fcSMing Lei static inline unsigned int memalloc_noio_save(void)
168921caf2fcSMing Lei {
169021caf2fcSMing Lei 	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
169121caf2fcSMing Lei 	current->flags |= PF_MEMALLOC_NOIO;
169221caf2fcSMing Lei 	return flags;
169321caf2fcSMing Lei }
169421caf2fcSMing Lei 
169521caf2fcSMing Lei static inline void memalloc_noio_restore(unsigned int flags)
169621caf2fcSMing Lei {
169721caf2fcSMing Lei 	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
169821caf2fcSMing Lei }
169921caf2fcSMing Lei 
1700e5c1902eSTejun Heo /*
1701a8f072c1STejun Heo  * task->jobctl flags
1702e5c1902eSTejun Heo  */
1703a8f072c1STejun Heo #define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
1704e5c1902eSTejun Heo 
1705a8f072c1STejun Heo #define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
1706a8f072c1STejun Heo #define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
1707a8f072c1STejun Heo #define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
170873ddff2bSTejun Heo #define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
1709fb1d910cSTejun Heo #define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
1710a8f072c1STejun Heo #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
1711544b2c91STejun Heo #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
1712a8f072c1STejun Heo 
1713a8f072c1STejun Heo #define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT)
1714a8f072c1STejun Heo #define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT)
1715a8f072c1STejun Heo #define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT)
171673ddff2bSTejun Heo #define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT)
1717fb1d910cSTejun Heo #define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT)
1718a8f072c1STejun Heo #define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT)
1719544b2c91STejun Heo #define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT)
1720a8f072c1STejun Heo 
1721fb1d910cSTejun Heo #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
172273ddff2bSTejun Heo #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
17233759a0d9STejun Heo 
17247dd3db54STejun Heo extern bool task_set_jobctl_pending(struct task_struct *task,
17257dd3db54STejun Heo 				    unsigned int mask);
172673ddff2bSTejun Heo extern void task_clear_jobctl_trapping(struct task_struct *task);
17273759a0d9STejun Heo extern void task_clear_jobctl_pending(struct task_struct *task,
17283759a0d9STejun Heo 				      unsigned int mask);
172939efa3efSTejun Heo 
1730a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
1731f41d911fSPaul E. McKenney 
1732f41d911fSPaul E. McKenney #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
17331aa03f11SPaul E. McKenney #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1734f41d911fSPaul E. McKenney 
1735f41d911fSPaul E. McKenney static inline void rcu_copy_process(struct task_struct *p)
1736f41d911fSPaul E. McKenney {
1737f41d911fSPaul E. McKenney 	p->rcu_read_lock_nesting = 0;
1738f41d911fSPaul E. McKenney 	p->rcu_read_unlock_special = 0;
1739a57eb940SPaul E. McKenney #ifdef CONFIG_TREE_PREEMPT_RCU
1740dd5d19baSPaul E. McKenney 	p->rcu_blocked_node = NULL;
174124278d14SPaul E. McKenney #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
174224278d14SPaul E. McKenney #ifdef CONFIG_RCU_BOOST
174324278d14SPaul E. McKenney 	p->rcu_boost_mutex = NULL;
174424278d14SPaul E. McKenney #endif /* #ifdef CONFIG_RCU_BOOST */
1745f41d911fSPaul E. McKenney 	INIT_LIST_HEAD(&p->rcu_node_entry);
1746f41d911fSPaul E. McKenney }
1747f41d911fSPaul E. McKenney 
1748f41d911fSPaul E. McKenney #else
1749f41d911fSPaul E. McKenney 
1750f41d911fSPaul E. McKenney static inline void rcu_copy_process(struct task_struct *p)
1751f41d911fSPaul E. McKenney {
1752f41d911fSPaul E. McKenney }
1753f41d911fSPaul E. McKenney 
1754f41d911fSPaul E. McKenney #endif
1755f41d911fSPaul E. McKenney 
1756907aed48SMel Gorman static inline void tsk_restore_flags(struct task_struct *task,
1757907aed48SMel Gorman 				unsigned long orig_flags, unsigned long flags)
1758907aed48SMel Gorman {
1759907aed48SMel Gorman 	task->flags &= ~flags;
1760907aed48SMel Gorman 	task->flags |= orig_flags & flags;
1761907aed48SMel Gorman }
1762907aed48SMel Gorman 
17631da177e4SLinus Torvalds #ifdef CONFIG_SMP
17641e1b6c51SKOSAKI Motohiro extern void do_set_cpus_allowed(struct task_struct *p,
17651e1b6c51SKOSAKI Motohiro 			       const struct cpumask *new_mask);
17661e1b6c51SKOSAKI Motohiro 
1767cd8ba7cdSMike Travis extern int set_cpus_allowed_ptr(struct task_struct *p,
176896f874e2SRusty Russell 				const struct cpumask *new_mask);
17691da177e4SLinus Torvalds #else
17701e1b6c51SKOSAKI Motohiro static inline void do_set_cpus_allowed(struct task_struct *p,
17711e1b6c51SKOSAKI Motohiro 				      const struct cpumask *new_mask)
17721e1b6c51SKOSAKI Motohiro {
17731e1b6c51SKOSAKI Motohiro }
1774cd8ba7cdSMike Travis static inline int set_cpus_allowed_ptr(struct task_struct *p,
177596f874e2SRusty Russell 				       const struct cpumask *new_mask)
17761da177e4SLinus Torvalds {
177796f874e2SRusty Russell 	if (!cpumask_test_cpu(0, new_mask))
17781da177e4SLinus Torvalds 		return -EINVAL;
17791da177e4SLinus Torvalds 	return 0;
17801da177e4SLinus Torvalds }
17811da177e4SLinus Torvalds #endif
1782e0ad9556SRusty Russell 
17833451d024SFrederic Weisbecker #ifdef CONFIG_NO_HZ_COMMON
17845167e8d5SPeter Zijlstra void calc_load_enter_idle(void);
17855167e8d5SPeter Zijlstra void calc_load_exit_idle(void);
17865167e8d5SPeter Zijlstra #else
17875167e8d5SPeter Zijlstra static inline void calc_load_enter_idle(void) { }
17885167e8d5SPeter Zijlstra static inline void calc_load_exit_idle(void) { }
17893451d024SFrederic Weisbecker #endif /* CONFIG_NO_HZ_COMMON */
17905167e8d5SPeter Zijlstra 
1791e0ad9556SRusty Russell #ifndef CONFIG_CPUMASK_OFFSTACK
1792cd8ba7cdSMike Travis static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1793cd8ba7cdSMike Travis {
1794cd8ba7cdSMike Travis 	return set_cpus_allowed_ptr(p, &new_mask);
1795cd8ba7cdSMike Travis }
1796e0ad9556SRusty Russell #endif
17971da177e4SLinus Torvalds 
1798b342501cSIngo Molnar /*
1799c676329aSPeter Zijlstra  * Do not use outside of architecture code which knows its limitations.
1800c676329aSPeter Zijlstra  *
1801c676329aSPeter Zijlstra  * sched_clock() has no promise of monotonicity or bounded drift between
1802c676329aSPeter Zijlstra  * CPUs, use (which you should not) requires disabling IRQs.
1803c676329aSPeter Zijlstra  *
1804c676329aSPeter Zijlstra  * Please use one of the three interfaces below.
1805b342501cSIngo Molnar  */
18061bbfa6f2SMike Frysinger extern unsigned long long notrace sched_clock(void);
1807c676329aSPeter Zijlstra /*
1808489a71b0SHiroshi Shimamoto  * See the comment in kernel/sched/clock.c
1809c676329aSPeter Zijlstra  */
1810c676329aSPeter Zijlstra extern u64 cpu_clock(int cpu);
1811c676329aSPeter Zijlstra extern u64 local_clock(void);
1812c676329aSPeter Zijlstra extern u64 sched_clock_cpu(int cpu);
1813c676329aSPeter Zijlstra 
1814e436d800SIngo Molnar 
1815c1955a3dSPeter Zijlstra extern void sched_clock_init(void);
1816c1955a3dSPeter Zijlstra 
18173e51f33fSPeter Zijlstra #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
18183e51f33fSPeter Zijlstra static inline void sched_clock_tick(void)
18193e51f33fSPeter Zijlstra {
18203e51f33fSPeter Zijlstra }
18213e51f33fSPeter Zijlstra 
18223e51f33fSPeter Zijlstra static inline void sched_clock_idle_sleep_event(void)
18233e51f33fSPeter Zijlstra {
18243e51f33fSPeter Zijlstra }
18253e51f33fSPeter Zijlstra 
18263e51f33fSPeter Zijlstra static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
18273e51f33fSPeter Zijlstra {
18283e51f33fSPeter Zijlstra }
18293e51f33fSPeter Zijlstra #else
1830c676329aSPeter Zijlstra /*
1831c676329aSPeter Zijlstra  * Architectures can set this to 1 if they have specified
1832c676329aSPeter Zijlstra  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1833c676329aSPeter Zijlstra  * but then during bootup it turns out that sched_clock()
1834c676329aSPeter Zijlstra  * is reliable after all:
1835c676329aSPeter Zijlstra  */
1836c676329aSPeter Zijlstra extern int sched_clock_stable;
1837c676329aSPeter Zijlstra 
18383e51f33fSPeter Zijlstra extern void sched_clock_tick(void);
18393e51f33fSPeter Zijlstra extern void sched_clock_idle_sleep_event(void);
18403e51f33fSPeter Zijlstra extern void sched_clock_idle_wakeup_event(u64 delta_ns);
18413e51f33fSPeter Zijlstra #endif
18423e51f33fSPeter Zijlstra 
1843b52bfee4SVenkatesh Pallipadi #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1844b52bfee4SVenkatesh Pallipadi /*
1845b52bfee4SVenkatesh Pallipadi  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
1846b52bfee4SVenkatesh Pallipadi  * The reason for this explicit opt-in is not to have perf penalty with
1847b52bfee4SVenkatesh Pallipadi  * slow sched_clocks.
1848b52bfee4SVenkatesh Pallipadi  */
1849b52bfee4SVenkatesh Pallipadi extern void enable_sched_clock_irqtime(void);
1850b52bfee4SVenkatesh Pallipadi extern void disable_sched_clock_irqtime(void);
1851b52bfee4SVenkatesh Pallipadi #else
1852b52bfee4SVenkatesh Pallipadi static inline void enable_sched_clock_irqtime(void) {}
1853b52bfee4SVenkatesh Pallipadi static inline void disable_sched_clock_irqtime(void) {}
1854b52bfee4SVenkatesh Pallipadi #endif
1855b52bfee4SVenkatesh Pallipadi 
185636c8b586SIngo Molnar extern unsigned long long
185741b86e9cSIngo Molnar task_sched_runtime(struct task_struct *task);
18581da177e4SLinus Torvalds 
18591da177e4SLinus Torvalds /* sched_exec is called by processes performing an exec */
18601da177e4SLinus Torvalds #ifdef CONFIG_SMP
18611da177e4SLinus Torvalds extern void sched_exec(void);
18621da177e4SLinus Torvalds #else
18631da177e4SLinus Torvalds #define sched_exec()   {}
18641da177e4SLinus Torvalds #endif
18651da177e4SLinus Torvalds 
18662aa44d05SIngo Molnar extern void sched_clock_idle_sleep_event(void);
18672aa44d05SIngo Molnar extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1868bb29ab26SIngo Molnar 
18691da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU
18701da177e4SLinus Torvalds extern void idle_task_exit(void);
18711da177e4SLinus Torvalds #else
18721da177e4SLinus Torvalds static inline void idle_task_exit(void) {}
18731da177e4SLinus Torvalds #endif
18741da177e4SLinus Torvalds 
18753451d024SFrederic Weisbecker #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
18761c20091eSFrederic Weisbecker extern void wake_up_nohz_cpu(int cpu);
187706d8308cSThomas Gleixner #else
18781c20091eSFrederic Weisbecker static inline void wake_up_nohz_cpu(int cpu) { }
187906d8308cSThomas Gleixner #endif
188006d8308cSThomas Gleixner 
1881ce831b38SFrederic Weisbecker #ifdef CONFIG_NO_HZ_FULL
1882ce831b38SFrederic Weisbecker extern bool sched_can_stop_tick(void);
1883265f22a9SFrederic Weisbecker extern u64 scheduler_tick_max_deferment(void);
1884ce831b38SFrederic Weisbecker #else
1885ce831b38SFrederic Weisbecker static inline bool sched_can_stop_tick(void) { return false; }
1886bf0f6f24SIngo Molnar #endif
1887bf0f6f24SIngo Molnar 
18885091faa4SMike Galbraith #ifdef CONFIG_SCHED_AUTOGROUP
18895091faa4SMike Galbraith extern void sched_autogroup_create_attach(struct task_struct *p);
18905091faa4SMike Galbraith extern void sched_autogroup_detach(struct task_struct *p);
18915091faa4SMike Galbraith extern void sched_autogroup_fork(struct signal_struct *sig);
18925091faa4SMike Galbraith extern void sched_autogroup_exit(struct signal_struct *sig);
18935091faa4SMike Galbraith #ifdef CONFIG_PROC_FS
18945091faa4SMike Galbraith extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
18952e5b5b3aSHiroshi Shimamoto extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
18965091faa4SMike Galbraith #endif
18975091faa4SMike Galbraith #else
18985091faa4SMike Galbraith static inline void sched_autogroup_create_attach(struct task_struct *p) { }
18995091faa4SMike Galbraith static inline void sched_autogroup_detach(struct task_struct *p) { }
19005091faa4SMike Galbraith static inline void sched_autogroup_fork(struct signal_struct *sig) { }
19015091faa4SMike Galbraith static inline void sched_autogroup_exit(struct signal_struct *sig) { }
19025091faa4SMike Galbraith #endif
19035091faa4SMike Galbraith 
1904d95f4122SMike Galbraith extern bool yield_to(struct task_struct *p, bool preempt);
190536c8b586SIngo Molnar extern void set_user_nice(struct task_struct *p, long nice);
190636c8b586SIngo Molnar extern int task_prio(const struct task_struct *p);
190736c8b586SIngo Molnar extern int task_nice(const struct task_struct *p);
190836c8b586SIngo Molnar extern int can_nice(const struct task_struct *p, const int nice);
190936c8b586SIngo Molnar extern int task_curr(const struct task_struct *p);
19101da177e4SLinus Torvalds extern int idle_cpu(int cpu);
1911fe7de49fSKOSAKI Motohiro extern int sched_setscheduler(struct task_struct *, int,
1912fe7de49fSKOSAKI Motohiro 			      const struct sched_param *);
1913961ccdddSRusty Russell extern int sched_setscheduler_nocheck(struct task_struct *, int,
1914fe7de49fSKOSAKI Motohiro 				      const struct sched_param *);
191536c8b586SIngo Molnar extern struct task_struct *idle_task(int cpu);
1916c4f30608SPaul E. McKenney /**
1917c4f30608SPaul E. McKenney  * is_idle_task - is the specified task an idle task?
1918fa757281SRandy Dunlap  * @p: the task in question.
1919e69f6186SYacine Belkadi  *
1920e69f6186SYacine Belkadi  * Return: 1 if @p is an idle task. 0 otherwise.
1921c4f30608SPaul E. McKenney  */
19227061ca3bSPaul E. McKenney static inline bool is_idle_task(const struct task_struct *p)
1923c4f30608SPaul E. McKenney {
1924c4f30608SPaul E. McKenney 	return p->pid == 0;
1925c4f30608SPaul E. McKenney }
192636c8b586SIngo Molnar extern struct task_struct *curr_task(int cpu);
192736c8b586SIngo Molnar extern void set_curr_task(int cpu, struct task_struct *p);
19281da177e4SLinus Torvalds 
19291da177e4SLinus Torvalds void yield(void);
19301da177e4SLinus Torvalds 
19311da177e4SLinus Torvalds /*
19321da177e4SLinus Torvalds  * The default (Linux) execution domain.
19331da177e4SLinus Torvalds  */
19341da177e4SLinus Torvalds extern struct exec_domain	default_exec_domain;
19351da177e4SLinus Torvalds 
19361da177e4SLinus Torvalds union thread_union {
19371da177e4SLinus Torvalds 	struct thread_info thread_info;
19381da177e4SLinus Torvalds 	unsigned long stack[THREAD_SIZE/sizeof(long)];
19391da177e4SLinus Torvalds };
19401da177e4SLinus Torvalds 
19411da177e4SLinus Torvalds #ifndef __HAVE_ARCH_KSTACK_END
19421da177e4SLinus Torvalds static inline int kstack_end(void *addr)
19431da177e4SLinus Torvalds {
19441da177e4SLinus Torvalds 	/* Reliable end of stack detection:
19451da177e4SLinus Torvalds 	 * Some APM bios versions misalign the stack
19461da177e4SLinus Torvalds 	 */
19471da177e4SLinus Torvalds 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
19481da177e4SLinus Torvalds }
19491da177e4SLinus Torvalds #endif
19501da177e4SLinus Torvalds 
19511da177e4SLinus Torvalds extern union thread_union init_thread_union;
19521da177e4SLinus Torvalds extern struct task_struct init_task;
19531da177e4SLinus Torvalds 
19541da177e4SLinus Torvalds extern struct   mm_struct init_mm;
19551da177e4SLinus Torvalds 
1956198fe21bSPavel Emelyanov extern struct pid_namespace init_pid_ns;
1957198fe21bSPavel Emelyanov 
1958198fe21bSPavel Emelyanov /*
1959198fe21bSPavel Emelyanov  * find a task by one of its numerical ids
1960198fe21bSPavel Emelyanov  *
1961198fe21bSPavel Emelyanov  * find_task_by_pid_ns():
1962198fe21bSPavel Emelyanov  *      finds a task by its pid in the specified namespace
1963228ebcbeSPavel Emelyanov  * find_task_by_vpid():
1964228ebcbeSPavel Emelyanov  *      finds a task by its virtual pid
1965198fe21bSPavel Emelyanov  *
1966e49859e7SPavel Emelyanov  * see also find_vpid() etc in include/linux/pid.h
1967198fe21bSPavel Emelyanov  */
1968198fe21bSPavel Emelyanov 
1969228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_vpid(pid_t nr);
1970228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_pid_ns(pid_t nr,
1971228ebcbeSPavel Emelyanov 		struct pid_namespace *ns);
1972198fe21bSPavel Emelyanov 
19731da177e4SLinus Torvalds /* per-UID process charging. */
19747b44ab97SEric W. Biederman extern struct user_struct * alloc_uid(kuid_t);
19751da177e4SLinus Torvalds static inline struct user_struct *get_uid(struct user_struct *u)
19761da177e4SLinus Torvalds {
19771da177e4SLinus Torvalds 	atomic_inc(&u->__count);
19781da177e4SLinus Torvalds 	return u;
19791da177e4SLinus Torvalds }
19801da177e4SLinus Torvalds extern void free_uid(struct user_struct *);
19811da177e4SLinus Torvalds 
19821da177e4SLinus Torvalds #include <asm/current.h>
19831da177e4SLinus Torvalds 
1984f0af911aSTorben Hohn extern void xtime_update(unsigned long ticks);
19851da177e4SLinus Torvalds 
1986b3c97528SHarvey Harrison extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1987b3c97528SHarvey Harrison extern int wake_up_process(struct task_struct *tsk);
19883e51e3edSSamir Bellabes extern void wake_up_new_task(struct task_struct *tsk);
19891da177e4SLinus Torvalds #ifdef CONFIG_SMP
19901da177e4SLinus Torvalds  extern void kick_process(struct task_struct *tsk);
19911da177e4SLinus Torvalds #else
19921da177e4SLinus Torvalds  static inline void kick_process(struct task_struct *tsk) { }
19931da177e4SLinus Torvalds #endif
19943e51e3edSSamir Bellabes extern void sched_fork(struct task_struct *p);
1995ad46c2c4SIngo Molnar extern void sched_dead(struct task_struct *p);
19961da177e4SLinus Torvalds 
19971da177e4SLinus Torvalds extern void proc_caches_init(void);
19981da177e4SLinus Torvalds extern void flush_signals(struct task_struct *);
19993bcac026SDavid Howells extern void __flush_signals(struct task_struct *);
200010ab825bSOleg Nesterov extern void ignore_signals(struct task_struct *);
20011da177e4SLinus Torvalds extern void flush_signal_handlers(struct task_struct *, int force_default);
20021da177e4SLinus Torvalds extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
20031da177e4SLinus Torvalds 
20041da177e4SLinus Torvalds static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
20051da177e4SLinus Torvalds {
20061da177e4SLinus Torvalds 	unsigned long flags;
20071da177e4SLinus Torvalds 	int ret;
20081da177e4SLinus Torvalds 
20091da177e4SLinus Torvalds 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
20101da177e4SLinus Torvalds 	ret = dequeue_signal(tsk, mask, info);
20111da177e4SLinus Torvalds 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
20121da177e4SLinus Torvalds 
20131da177e4SLinus Torvalds 	return ret;
20141da177e4SLinus Torvalds }
20151da177e4SLinus Torvalds 
20161da177e4SLinus Torvalds extern void block_all_signals(int (*notifier)(void *priv), void *priv,
20171da177e4SLinus Torvalds 			      sigset_t *mask);
20181da177e4SLinus Torvalds extern void unblock_all_signals(void);
20191da177e4SLinus Torvalds extern void release_task(struct task_struct * p);
20201da177e4SLinus Torvalds extern int send_sig_info(int, struct siginfo *, struct task_struct *);
20211da177e4SLinus Torvalds extern int force_sigsegv(int, struct task_struct *);
20221da177e4SLinus Torvalds extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2023c4b92fc1SEric W. Biederman extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2024c4b92fc1SEric W. Biederman extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2025d178bc3aSSerge Hallyn extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2026d178bc3aSSerge Hallyn 				const struct cred *, u32);
2027c4b92fc1SEric W. Biederman extern int kill_pgrp(struct pid *pid, int sig, int priv);
2028c4b92fc1SEric W. Biederman extern int kill_pid(struct pid *pid, int sig, int priv);
2029c3de4b38SMatthew Wilcox extern int kill_proc_info(int, struct siginfo *, pid_t);
203086773473SOleg Nesterov extern __must_check bool do_notify_parent(struct task_struct *, int);
2031a7f0765eSOleg Nesterov extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
20321da177e4SLinus Torvalds extern void force_sig(int, struct task_struct *);
20331da177e4SLinus Torvalds extern int send_sig(int, struct task_struct *, int);
203409faef11SOleg Nesterov extern int zap_other_threads(struct task_struct *p);
20351da177e4SLinus Torvalds extern struct sigqueue *sigqueue_alloc(void);
20361da177e4SLinus Torvalds extern void sigqueue_free(struct sigqueue *);
2037ac5c2153SOleg Nesterov extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
20389ac95f2fSOleg Nesterov extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
20391da177e4SLinus Torvalds 
204051a7b448SAl Viro static inline void restore_saved_sigmask(void)
204151a7b448SAl Viro {
204251a7b448SAl Viro 	if (test_and_clear_restore_sigmask())
204377097ae5SAl Viro 		__set_current_blocked(&current->saved_sigmask);
204451a7b448SAl Viro }
204551a7b448SAl Viro 
2046b7f9a11aSAl Viro static inline sigset_t *sigmask_to_save(void)
2047b7f9a11aSAl Viro {
2048b7f9a11aSAl Viro 	sigset_t *res = &current->blocked;
2049b7f9a11aSAl Viro 	if (unlikely(test_restore_sigmask()))
2050b7f9a11aSAl Viro 		res = &current->saved_sigmask;
2051b7f9a11aSAl Viro 	return res;
2052b7f9a11aSAl Viro }
2053b7f9a11aSAl Viro 
20549ec52099SCedric Le Goater static inline int kill_cad_pid(int sig, int priv)
20559ec52099SCedric Le Goater {
20569ec52099SCedric Le Goater 	return kill_pid(cad_pid, sig, priv);
20579ec52099SCedric Le Goater }
20589ec52099SCedric Le Goater 
20591da177e4SLinus Torvalds /* These can be the second arg to send_sig_info/send_group_sig_info.  */
20601da177e4SLinus Torvalds #define SEND_SIG_NOINFO ((struct siginfo *) 0)
20611da177e4SLinus Torvalds #define SEND_SIG_PRIV	((struct siginfo *) 1)
20621da177e4SLinus Torvalds #define SEND_SIG_FORCED	((struct siginfo *) 2)
20631da177e4SLinus Torvalds 
20642a855dd0SSebastian Andrzej Siewior /*
20652a855dd0SSebastian Andrzej Siewior  * True if we are on the alternate signal stack.
20662a855dd0SSebastian Andrzej Siewior  */
20671da177e4SLinus Torvalds static inline int on_sig_stack(unsigned long sp)
20681da177e4SLinus Torvalds {
20692a855dd0SSebastian Andrzej Siewior #ifdef CONFIG_STACK_GROWSUP
20702a855dd0SSebastian Andrzej Siewior 	return sp >= current->sas_ss_sp &&
20712a855dd0SSebastian Andrzej Siewior 		sp - current->sas_ss_sp < current->sas_ss_size;
20722a855dd0SSebastian Andrzej Siewior #else
20732a855dd0SSebastian Andrzej Siewior 	return sp > current->sas_ss_sp &&
20742a855dd0SSebastian Andrzej Siewior 		sp - current->sas_ss_sp <= current->sas_ss_size;
20752a855dd0SSebastian Andrzej Siewior #endif
20761da177e4SLinus Torvalds }
20771da177e4SLinus Torvalds 
20781da177e4SLinus Torvalds static inline int sas_ss_flags(unsigned long sp)
20791da177e4SLinus Torvalds {
20801da177e4SLinus Torvalds 	return (current->sas_ss_size == 0 ? SS_DISABLE
20811da177e4SLinus Torvalds 		: on_sig_stack(sp) ? SS_ONSTACK : 0);
20821da177e4SLinus Torvalds }
20831da177e4SLinus Torvalds 
20845a1b98d3SAl Viro static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
20855a1b98d3SAl Viro {
20865a1b98d3SAl Viro 	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
20875a1b98d3SAl Viro #ifdef CONFIG_STACK_GROWSUP
20885a1b98d3SAl Viro 		return current->sas_ss_sp;
20895a1b98d3SAl Viro #else
20905a1b98d3SAl Viro 		return current->sas_ss_sp + current->sas_ss_size;
20915a1b98d3SAl Viro #endif
20925a1b98d3SAl Viro 	return sp;
20935a1b98d3SAl Viro }
20945a1b98d3SAl Viro 
20951da177e4SLinus Torvalds /*
20961da177e4SLinus Torvalds  * Routines for handling mm_structs
20971da177e4SLinus Torvalds  */
20981da177e4SLinus Torvalds extern struct mm_struct * mm_alloc(void);
20991da177e4SLinus Torvalds 
21001da177e4SLinus Torvalds /* mmdrop drops the mm and the page tables */
2101b3c97528SHarvey Harrison extern void __mmdrop(struct mm_struct *);
21021da177e4SLinus Torvalds static inline void mmdrop(struct mm_struct * mm)
21031da177e4SLinus Torvalds {
21046fb43d7bSIngo Molnar 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
21051da177e4SLinus Torvalds 		__mmdrop(mm);
21061da177e4SLinus Torvalds }
21071da177e4SLinus Torvalds 
21081da177e4SLinus Torvalds /* mmput gets rid of the mappings and all user-space */
21091da177e4SLinus Torvalds extern void mmput(struct mm_struct *);
21101da177e4SLinus Torvalds /* Grab a reference to a task's mm, if it is not already going away */
21111da177e4SLinus Torvalds extern struct mm_struct *get_task_mm(struct task_struct *task);
21128cdb878dSChristopher Yeoh /*
21138cdb878dSChristopher Yeoh  * Grab a reference to a task's mm, if it is not already going away
21148cdb878dSChristopher Yeoh  * and ptrace_may_access with the mode parameter passed to it
21158cdb878dSChristopher Yeoh  * succeeds.
21168cdb878dSChristopher Yeoh  */
21178cdb878dSChristopher Yeoh extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
21181da177e4SLinus Torvalds /* Remove the current tasks stale references to the old mm_struct */
21191da177e4SLinus Torvalds extern void mm_release(struct task_struct *, struct mm_struct *);
2120402b0862SCarsten Otte /* Allocate a new mm structure and copy contents from tsk->mm */
2121402b0862SCarsten Otte extern struct mm_struct *dup_mm(struct task_struct *tsk);
21221da177e4SLinus Torvalds 
21236f2c55b8SAlexey Dobriyan extern int copy_thread(unsigned long, unsigned long, unsigned long,
2124afa86fc4SAl Viro 			struct task_struct *);
21251da177e4SLinus Torvalds extern void flush_thread(void);
21261da177e4SLinus Torvalds extern void exit_thread(void);
21271da177e4SLinus Torvalds 
21281da177e4SLinus Torvalds extern void exit_files(struct task_struct *);
2129a7e5328aSOleg Nesterov extern void __cleanup_sighand(struct sighand_struct *);
2130cbaffba1SOleg Nesterov 
21311da177e4SLinus Torvalds extern void exit_itimers(struct signal_struct *);
2132cbaffba1SOleg Nesterov extern void flush_itimer_signals(void);
21331da177e4SLinus Torvalds 
21349402c95fSJoe Perches extern void do_group_exit(int);
21351da177e4SLinus Torvalds 
21361da177e4SLinus Torvalds extern int allow_signal(int);
21371da177e4SLinus Torvalds extern int disallow_signal(int);
21381da177e4SLinus Torvalds 
2139d7627467SDavid Howells extern int do_execve(const char *,
2140d7627467SDavid Howells 		     const char __user * const __user *,
2141da3d4c5fSAl Viro 		     const char __user * const __user *);
2142e80d6661SAl Viro extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
214336c8b586SIngo Molnar struct task_struct *fork_idle(int);
21442aa3a7f8SAl Viro extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
21451da177e4SLinus Torvalds 
21461da177e4SLinus Torvalds extern void set_task_comm(struct task_struct *tsk, char *from);
214759714d65SAndrew Morton extern char *get_task_comm(char *to, struct task_struct *tsk);
21481da177e4SLinus Torvalds 
21491da177e4SLinus Torvalds #ifdef CONFIG_SMP
2150317f3941SPeter Zijlstra void scheduler_ipi(void);
215185ba2d86SRoland McGrath extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
21521da177e4SLinus Torvalds #else
2153184748ccSPeter Zijlstra static inline void scheduler_ipi(void) { }
215485ba2d86SRoland McGrath static inline unsigned long wait_task_inactive(struct task_struct *p,
215585ba2d86SRoland McGrath 					       long match_state)
215685ba2d86SRoland McGrath {
215785ba2d86SRoland McGrath 	return 1;
215885ba2d86SRoland McGrath }
21591da177e4SLinus Torvalds #endif
21601da177e4SLinus Torvalds 
216105725f7eSJiri Pirko #define next_task(p) \
216205725f7eSJiri Pirko 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
21631da177e4SLinus Torvalds 
21641da177e4SLinus Torvalds #define for_each_process(p) \
21651da177e4SLinus Torvalds 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
21661da177e4SLinus Torvalds 
21675bb459bbSOleg Nesterov extern bool current_is_single_threaded(void);
2168d84f4f99SDavid Howells 
21691da177e4SLinus Torvalds /*
21701da177e4SLinus Torvalds  * Careful: do_each_thread/while_each_thread is a double loop so
21711da177e4SLinus Torvalds  *          'break' will not work as expected - use goto instead.
21721da177e4SLinus Torvalds  */
21731da177e4SLinus Torvalds #define do_each_thread(g, t) \
21741da177e4SLinus Torvalds 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
21751da177e4SLinus Torvalds 
21761da177e4SLinus Torvalds #define while_each_thread(g, t) \
21771da177e4SLinus Torvalds 	while ((t = next_thread(t)) != g)
21781da177e4SLinus Torvalds 
21797e49827cSOleg Nesterov static inline int get_nr_threads(struct task_struct *tsk)
21807e49827cSOleg Nesterov {
2181b3ac022cSOleg Nesterov 	return tsk->signal->nr_threads;
21827e49827cSOleg Nesterov }
21837e49827cSOleg Nesterov 
2184087806b1SOleg Nesterov static inline bool thread_group_leader(struct task_struct *p)
2185087806b1SOleg Nesterov {
2186087806b1SOleg Nesterov 	return p->exit_signal >= 0;
2187087806b1SOleg Nesterov }
21881da177e4SLinus Torvalds 
21890804ef4bSEric W. Biederman /* Do to the insanities of de_thread it is possible for a process
21900804ef4bSEric W. Biederman  * to have the pid of the thread group leader without actually being
21910804ef4bSEric W. Biederman  * the thread group leader.  For iteration through the pids in proc
21920804ef4bSEric W. Biederman  * all we care about is that we have a task with the appropriate
21930804ef4bSEric W. Biederman  * pid, we don't actually care if we have the right task.
21940804ef4bSEric W. Biederman  */
2195e1403b8eSOleg Nesterov static inline bool has_group_leader_pid(struct task_struct *p)
21960804ef4bSEric W. Biederman {
2197e1403b8eSOleg Nesterov 	return task_pid(p) == p->signal->leader_pid;
21980804ef4bSEric W. Biederman }
21990804ef4bSEric W. Biederman 
2200bac0abd6SPavel Emelyanov static inline
2201e1403b8eSOleg Nesterov bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2202bac0abd6SPavel Emelyanov {
2203e1403b8eSOleg Nesterov 	return p1->signal == p2->signal;
2204bac0abd6SPavel Emelyanov }
2205bac0abd6SPavel Emelyanov 
220636c8b586SIngo Molnar static inline struct task_struct *next_thread(const struct task_struct *p)
220747e65328SOleg Nesterov {
220805725f7eSJiri Pirko 	return list_entry_rcu(p->thread_group.next,
220936c8b586SIngo Molnar 			      struct task_struct, thread_group);
221047e65328SOleg Nesterov }
221147e65328SOleg Nesterov 
2212e868171aSAlexey Dobriyan static inline int thread_group_empty(struct task_struct *p)
22131da177e4SLinus Torvalds {
221447e65328SOleg Nesterov 	return list_empty(&p->thread_group);
22151da177e4SLinus Torvalds }
22161da177e4SLinus Torvalds 
22171da177e4SLinus Torvalds #define delay_group_leader(p) \
22181da177e4SLinus Torvalds 		(thread_group_leader(p) && !thread_group_empty(p))
22191da177e4SLinus Torvalds 
22201da177e4SLinus Torvalds /*
2221260ea101SEric W. Biederman  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
222222e2c507SJens Axboe  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2223ddbcc7e8SPaul Menage  * pins the final release of task.io_context.  Also protects ->cpuset and
2224d68b46feSOleg Nesterov  * ->cgroup.subsys[]. And ->vfork_done.
22251da177e4SLinus Torvalds  *
22261da177e4SLinus Torvalds  * Nests both inside and outside of read_lock(&tasklist_lock).
22271da177e4SLinus Torvalds  * It must not be nested with write_lock_irq(&tasklist_lock),
22281da177e4SLinus Torvalds  * neither inside nor outside.
22291da177e4SLinus Torvalds  */
22301da177e4SLinus Torvalds static inline void task_lock(struct task_struct *p)
22311da177e4SLinus Torvalds {
22321da177e4SLinus Torvalds 	spin_lock(&p->alloc_lock);
22331da177e4SLinus Torvalds }
22341da177e4SLinus Torvalds 
22351da177e4SLinus Torvalds static inline void task_unlock(struct task_struct *p)
22361da177e4SLinus Torvalds {
22371da177e4SLinus Torvalds 	spin_unlock(&p->alloc_lock);
22381da177e4SLinus Torvalds }
22391da177e4SLinus Torvalds 
2240b8ed374eSNamhyung Kim extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2241f63ee72eSOleg Nesterov 							unsigned long *flags);
2242f63ee72eSOleg Nesterov 
22439388dc30SAnton Vorontsov static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
22449388dc30SAnton Vorontsov 						       unsigned long *flags)
22459388dc30SAnton Vorontsov {
22469388dc30SAnton Vorontsov 	struct sighand_struct *ret;
22479388dc30SAnton Vorontsov 
22489388dc30SAnton Vorontsov 	ret = __lock_task_sighand(tsk, flags);
22499388dc30SAnton Vorontsov 	(void)__cond_lock(&tsk->sighand->siglock, ret);
22509388dc30SAnton Vorontsov 	return ret;
22519388dc30SAnton Vorontsov }
2252b8ed374eSNamhyung Kim 
2253f63ee72eSOleg Nesterov static inline void unlock_task_sighand(struct task_struct *tsk,
2254f63ee72eSOleg Nesterov 						unsigned long *flags)
2255f63ee72eSOleg Nesterov {
2256f63ee72eSOleg Nesterov 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2257f63ee72eSOleg Nesterov }
2258f63ee72eSOleg Nesterov 
22594714d1d3SBen Blum #ifdef CONFIG_CGROUPS
2260257058aeSTejun Heo static inline void threadgroup_change_begin(struct task_struct *tsk)
22614714d1d3SBen Blum {
2262257058aeSTejun Heo 	down_read(&tsk->signal->group_rwsem);
22634714d1d3SBen Blum }
2264257058aeSTejun Heo static inline void threadgroup_change_end(struct task_struct *tsk)
22654714d1d3SBen Blum {
2266257058aeSTejun Heo 	up_read(&tsk->signal->group_rwsem);
22674714d1d3SBen Blum }
226877e4ef99STejun Heo 
226977e4ef99STejun Heo /**
227077e4ef99STejun Heo  * threadgroup_lock - lock threadgroup
227177e4ef99STejun Heo  * @tsk: member task of the threadgroup to lock
227277e4ef99STejun Heo  *
227377e4ef99STejun Heo  * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
227477e4ef99STejun Heo  * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2275e56fb287SOleg Nesterov  * change ->group_leader/pid.  This is useful for cases where the threadgroup
2276e56fb287SOleg Nesterov  * needs to stay stable across blockable operations.
227777e4ef99STejun Heo  *
227877e4ef99STejun Heo  * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
227977e4ef99STejun Heo  * synchronization.  While held, no new task will be added to threadgroup
228077e4ef99STejun Heo  * and no existing live task will have its PF_EXITING set.
228177e4ef99STejun Heo  *
2282e56fb287SOleg Nesterov  * de_thread() does threadgroup_change_{begin|end}() when a non-leader
2283e56fb287SOleg Nesterov  * sub-thread becomes a new leader.
228477e4ef99STejun Heo  */
2285257058aeSTejun Heo static inline void threadgroup_lock(struct task_struct *tsk)
22864714d1d3SBen Blum {
2287257058aeSTejun Heo 	down_write(&tsk->signal->group_rwsem);
22884714d1d3SBen Blum }
228977e4ef99STejun Heo 
229077e4ef99STejun Heo /**
229177e4ef99STejun Heo  * threadgroup_unlock - unlock threadgroup
229277e4ef99STejun Heo  * @tsk: member task of the threadgroup to unlock
229377e4ef99STejun Heo  *
229477e4ef99STejun Heo  * Reverse threadgroup_lock().
229577e4ef99STejun Heo  */
2296257058aeSTejun Heo static inline void threadgroup_unlock(struct task_struct *tsk)
22974714d1d3SBen Blum {
2298257058aeSTejun Heo 	up_write(&tsk->signal->group_rwsem);
22994714d1d3SBen Blum }
23004714d1d3SBen Blum #else
2301257058aeSTejun Heo static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2302257058aeSTejun Heo static inline void threadgroup_change_end(struct task_struct *tsk) {}
2303257058aeSTejun Heo static inline void threadgroup_lock(struct task_struct *tsk) {}
2304257058aeSTejun Heo static inline void threadgroup_unlock(struct task_struct *tsk) {}
23054714d1d3SBen Blum #endif
23064714d1d3SBen Blum 
2307f037360fSAl Viro #ifndef __HAVE_THREAD_FUNCTIONS
2308f037360fSAl Viro 
2309f7e4217bSRoman Zippel #define task_thread_info(task)	((struct thread_info *)(task)->stack)
2310f7e4217bSRoman Zippel #define task_stack_page(task)	((task)->stack)
2311a1261f54SAl Viro 
231210ebffdeSAl Viro static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
231310ebffdeSAl Viro {
231410ebffdeSAl Viro 	*task_thread_info(p) = *task_thread_info(org);
231510ebffdeSAl Viro 	task_thread_info(p)->task = p;
231610ebffdeSAl Viro }
231710ebffdeSAl Viro 
231810ebffdeSAl Viro static inline unsigned long *end_of_stack(struct task_struct *p)
231910ebffdeSAl Viro {
2320f7e4217bSRoman Zippel 	return (unsigned long *)(task_thread_info(p) + 1);
232110ebffdeSAl Viro }
232210ebffdeSAl Viro 
2323f037360fSAl Viro #endif
2324f037360fSAl Viro 
23258b05c7e6SFUJITA Tomonori static inline int object_is_on_stack(void *obj)
23268b05c7e6SFUJITA Tomonori {
23278b05c7e6SFUJITA Tomonori 	void *stack = task_stack_page(current);
23288b05c7e6SFUJITA Tomonori 
23298b05c7e6SFUJITA Tomonori 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
23308b05c7e6SFUJITA Tomonori }
23318b05c7e6SFUJITA Tomonori 
23328c9843e5SBenjamin Herrenschmidt extern void thread_info_cache_init(void);
23338c9843e5SBenjamin Herrenschmidt 
23347c9f8861SEric Sandeen #ifdef CONFIG_DEBUG_STACK_USAGE
23357c9f8861SEric Sandeen static inline unsigned long stack_not_used(struct task_struct *p)
23367c9f8861SEric Sandeen {
23377c9f8861SEric Sandeen 	unsigned long *n = end_of_stack(p);
23387c9f8861SEric Sandeen 
23397c9f8861SEric Sandeen 	do { 	/* Skip over canary */
23407c9f8861SEric Sandeen 		n++;
23417c9f8861SEric Sandeen 	} while (!*n);
23427c9f8861SEric Sandeen 
23437c9f8861SEric Sandeen 	return (unsigned long)n - (unsigned long)end_of_stack(p);
23447c9f8861SEric Sandeen }
23457c9f8861SEric Sandeen #endif
23467c9f8861SEric Sandeen 
23471da177e4SLinus Torvalds /* set thread flags in other task's structures
23481da177e4SLinus Torvalds  * - see asm/thread_info.h for TIF_xxxx flags available
23491da177e4SLinus Torvalds  */
23501da177e4SLinus Torvalds static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
23511da177e4SLinus Torvalds {
2352a1261f54SAl Viro 	set_ti_thread_flag(task_thread_info(tsk), flag);
23531da177e4SLinus Torvalds }
23541da177e4SLinus Torvalds 
23551da177e4SLinus Torvalds static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
23561da177e4SLinus Torvalds {
2357a1261f54SAl Viro 	clear_ti_thread_flag(task_thread_info(tsk), flag);
23581da177e4SLinus Torvalds }
23591da177e4SLinus Torvalds 
23601da177e4SLinus Torvalds static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
23611da177e4SLinus Torvalds {
2362a1261f54SAl Viro 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
23631da177e4SLinus Torvalds }
23641da177e4SLinus Torvalds 
23651da177e4SLinus Torvalds static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
23661da177e4SLinus Torvalds {
2367a1261f54SAl Viro 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
23681da177e4SLinus Torvalds }
23691da177e4SLinus Torvalds 
23701da177e4SLinus Torvalds static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
23711da177e4SLinus Torvalds {
2372a1261f54SAl Viro 	return test_ti_thread_flag(task_thread_info(tsk), flag);
23731da177e4SLinus Torvalds }
23741da177e4SLinus Torvalds 
23751da177e4SLinus Torvalds static inline void set_tsk_need_resched(struct task_struct *tsk)
23761da177e4SLinus Torvalds {
23771da177e4SLinus Torvalds 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
23781da177e4SLinus Torvalds }
23791da177e4SLinus Torvalds 
23801da177e4SLinus Torvalds static inline void clear_tsk_need_resched(struct task_struct *tsk)
23811da177e4SLinus Torvalds {
23821da177e4SLinus Torvalds 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
23831da177e4SLinus Torvalds }
23841da177e4SLinus Torvalds 
23858ae121acSGregory Haskins static inline int test_tsk_need_resched(struct task_struct *tsk)
23868ae121acSGregory Haskins {
23878ae121acSGregory Haskins 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
23888ae121acSGregory Haskins }
23898ae121acSGregory Haskins 
2390690cc3ffSEric W. Biederman static inline int restart_syscall(void)
2391690cc3ffSEric W. Biederman {
2392690cc3ffSEric W. Biederman 	set_tsk_thread_flag(current, TIF_SIGPENDING);
2393690cc3ffSEric W. Biederman 	return -ERESTARTNOINTR;
2394690cc3ffSEric W. Biederman }
2395690cc3ffSEric W. Biederman 
23961da177e4SLinus Torvalds static inline int signal_pending(struct task_struct *p)
23971da177e4SLinus Torvalds {
23981da177e4SLinus Torvalds 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
23991da177e4SLinus Torvalds }
24001da177e4SLinus Torvalds 
2401d9588725SRoland McGrath static inline int __fatal_signal_pending(struct task_struct *p)
2402d9588725SRoland McGrath {
2403d9588725SRoland McGrath 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2404d9588725SRoland McGrath }
2405f776d12dSMatthew Wilcox 
2406f776d12dSMatthew Wilcox static inline int fatal_signal_pending(struct task_struct *p)
2407f776d12dSMatthew Wilcox {
2408f776d12dSMatthew Wilcox 	return signal_pending(p) && __fatal_signal_pending(p);
2409f776d12dSMatthew Wilcox }
2410f776d12dSMatthew Wilcox 
241116882c1eSOleg Nesterov static inline int signal_pending_state(long state, struct task_struct *p)
241216882c1eSOleg Nesterov {
241316882c1eSOleg Nesterov 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
241416882c1eSOleg Nesterov 		return 0;
241516882c1eSOleg Nesterov 	if (!signal_pending(p))
241616882c1eSOleg Nesterov 		return 0;
241716882c1eSOleg Nesterov 
241816882c1eSOleg Nesterov 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
241916882c1eSOleg Nesterov }
242016882c1eSOleg Nesterov 
24211da177e4SLinus Torvalds /*
24221da177e4SLinus Torvalds  * cond_resched() and cond_resched_lock(): latency reduction via
24231da177e4SLinus Torvalds  * explicit rescheduling in places that are safe. The return
24241da177e4SLinus Torvalds  * value indicates whether a reschedule was done in fact.
24251da177e4SLinus Torvalds  * cond_resched_lock() will drop the spinlock before scheduling,
24261da177e4SLinus Torvalds  * cond_resched_softirq() will enable bhs before scheduling.
24271da177e4SLinus Torvalds  */
2428c3921ab7SLinus Torvalds extern int _cond_resched(void);
24296f80bd98SFrederic Weisbecker 
2430613afbf8SFrederic Weisbecker #define cond_resched() ({			\
2431613afbf8SFrederic Weisbecker 	__might_sleep(__FILE__, __LINE__, 0);	\
2432613afbf8SFrederic Weisbecker 	_cond_resched();			\
2433613afbf8SFrederic Weisbecker })
24346f80bd98SFrederic Weisbecker 
2435613afbf8SFrederic Weisbecker extern int __cond_resched_lock(spinlock_t *lock);
2436613afbf8SFrederic Weisbecker 
2437bdd4e85dSFrederic Weisbecker #ifdef CONFIG_PREEMPT_COUNT
2438716a4234SFrederic Weisbecker #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
243902b67cc3SHerbert Xu #else
2440716a4234SFrederic Weisbecker #define PREEMPT_LOCK_OFFSET	0
244102b67cc3SHerbert Xu #endif
2442716a4234SFrederic Weisbecker 
2443613afbf8SFrederic Weisbecker #define cond_resched_lock(lock) ({				\
2444716a4234SFrederic Weisbecker 	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2445613afbf8SFrederic Weisbecker 	__cond_resched_lock(lock);				\
2446613afbf8SFrederic Weisbecker })
2447613afbf8SFrederic Weisbecker 
2448613afbf8SFrederic Weisbecker extern int __cond_resched_softirq(void);
2449613afbf8SFrederic Weisbecker 
2450613afbf8SFrederic Weisbecker #define cond_resched_softirq() ({					\
245175e1056fSVenkatesh Pallipadi 	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2452613afbf8SFrederic Weisbecker 	__cond_resched_softirq();					\
2453613afbf8SFrederic Weisbecker })
24541da177e4SLinus Torvalds 
2455f6f3c437SSimon Horman static inline void cond_resched_rcu(void)
2456f6f3c437SSimon Horman {
2457f6f3c437SSimon Horman #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2458f6f3c437SSimon Horman 	rcu_read_unlock();
2459f6f3c437SSimon Horman 	cond_resched();
2460f6f3c437SSimon Horman 	rcu_read_lock();
2461f6f3c437SSimon Horman #endif
2462f6f3c437SSimon Horman }
2463f6f3c437SSimon Horman 
24641da177e4SLinus Torvalds /*
24651da177e4SLinus Torvalds  * Does a critical section need to be broken due to another
246695c354feSNick Piggin  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
246795c354feSNick Piggin  * but a general need for low latency)
24681da177e4SLinus Torvalds  */
246995c354feSNick Piggin static inline int spin_needbreak(spinlock_t *lock)
24701da177e4SLinus Torvalds {
247195c354feSNick Piggin #ifdef CONFIG_PREEMPT
247295c354feSNick Piggin 	return spin_is_contended(lock);
247395c354feSNick Piggin #else
24741da177e4SLinus Torvalds 	return 0;
247595c354feSNick Piggin #endif
24761da177e4SLinus Torvalds }
24771da177e4SLinus Torvalds 
24787bb44adeSRoland McGrath /*
2479ee761f62SThomas Gleixner  * Idle thread specific functions to determine the need_resched
2480ee761f62SThomas Gleixner  * polling state. We have two versions, one based on TS_POLLING in
2481ee761f62SThomas Gleixner  * thread_info.status and one based on TIF_POLLING_NRFLAG in
2482ee761f62SThomas Gleixner  * thread_info.flags
2483ee761f62SThomas Gleixner  */
2484ee761f62SThomas Gleixner #ifdef TS_POLLING
2485ee761f62SThomas Gleixner static inline int tsk_is_polling(struct task_struct *p)
2486ee761f62SThomas Gleixner {
2487ee761f62SThomas Gleixner 	return task_thread_info(p)->status & TS_POLLING;
2488ee761f62SThomas Gleixner }
2489ea811747SPeter Zijlstra static inline void __current_set_polling(void)
24903a98f871SThomas Gleixner {
24913a98f871SThomas Gleixner 	current_thread_info()->status |= TS_POLLING;
24923a98f871SThomas Gleixner }
24933a98f871SThomas Gleixner 
2494ea811747SPeter Zijlstra static inline bool __must_check current_set_polling_and_test(void)
2495ea811747SPeter Zijlstra {
2496ea811747SPeter Zijlstra 	__current_set_polling();
2497ea811747SPeter Zijlstra 
2498ea811747SPeter Zijlstra 	/*
2499ea811747SPeter Zijlstra 	 * Polling state must be visible before we test NEED_RESCHED,
2500ea811747SPeter Zijlstra 	 * paired by resched_task()
2501ea811747SPeter Zijlstra 	 */
2502ea811747SPeter Zijlstra 	smp_mb();
2503ea811747SPeter Zijlstra 
2504ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2505ea811747SPeter Zijlstra }
2506ea811747SPeter Zijlstra 
2507ea811747SPeter Zijlstra static inline void __current_clr_polling(void)
25083a98f871SThomas Gleixner {
25093a98f871SThomas Gleixner 	current_thread_info()->status &= ~TS_POLLING;
2510ea811747SPeter Zijlstra }
2511ea811747SPeter Zijlstra 
2512ea811747SPeter Zijlstra static inline bool __must_check current_clr_polling_and_test(void)
2513ea811747SPeter Zijlstra {
2514ea811747SPeter Zijlstra 	__current_clr_polling();
2515ea811747SPeter Zijlstra 
2516ea811747SPeter Zijlstra 	/*
2517ea811747SPeter Zijlstra 	 * Polling state must be visible before we test NEED_RESCHED,
2518ea811747SPeter Zijlstra 	 * paired by resched_task()
2519ea811747SPeter Zijlstra 	 */
2520ea811747SPeter Zijlstra 	smp_mb();
2521ea811747SPeter Zijlstra 
2522ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
25233a98f871SThomas Gleixner }
2524ee761f62SThomas Gleixner #elif defined(TIF_POLLING_NRFLAG)
2525ee761f62SThomas Gleixner static inline int tsk_is_polling(struct task_struct *p)
2526ee761f62SThomas Gleixner {
2527ee761f62SThomas Gleixner 	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2528ee761f62SThomas Gleixner }
2529ea811747SPeter Zijlstra 
2530ea811747SPeter Zijlstra static inline void __current_set_polling(void)
25313a98f871SThomas Gleixner {
25323a98f871SThomas Gleixner 	set_thread_flag(TIF_POLLING_NRFLAG);
25333a98f871SThomas Gleixner }
25343a98f871SThomas Gleixner 
2535ea811747SPeter Zijlstra static inline bool __must_check current_set_polling_and_test(void)
2536ea811747SPeter Zijlstra {
2537ea811747SPeter Zijlstra 	__current_set_polling();
2538ea811747SPeter Zijlstra 
2539ea811747SPeter Zijlstra 	/*
2540ea811747SPeter Zijlstra 	 * Polling state must be visible before we test NEED_RESCHED,
2541ea811747SPeter Zijlstra 	 * paired by resched_task()
2542ea811747SPeter Zijlstra 	 *
2543ea811747SPeter Zijlstra 	 * XXX: assumes set/clear bit are identical barrier wise.
2544ea811747SPeter Zijlstra 	 */
2545ea811747SPeter Zijlstra 	smp_mb__after_clear_bit();
2546ea811747SPeter Zijlstra 
2547ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2548ea811747SPeter Zijlstra }
2549ea811747SPeter Zijlstra 
2550ea811747SPeter Zijlstra static inline void __current_clr_polling(void)
25513a98f871SThomas Gleixner {
25523a98f871SThomas Gleixner 	clear_thread_flag(TIF_POLLING_NRFLAG);
25533a98f871SThomas Gleixner }
2554ea811747SPeter Zijlstra 
2555ea811747SPeter Zijlstra static inline bool __must_check current_clr_polling_and_test(void)
2556ea811747SPeter Zijlstra {
2557ea811747SPeter Zijlstra 	__current_clr_polling();
2558ea811747SPeter Zijlstra 
2559ea811747SPeter Zijlstra 	/*
2560ea811747SPeter Zijlstra 	 * Polling state must be visible before we test NEED_RESCHED,
2561ea811747SPeter Zijlstra 	 * paired by resched_task()
2562ea811747SPeter Zijlstra 	 */
2563ea811747SPeter Zijlstra 	smp_mb__after_clear_bit();
2564ea811747SPeter Zijlstra 
2565ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2566ea811747SPeter Zijlstra }
2567ea811747SPeter Zijlstra 
2568ee761f62SThomas Gleixner #else
2569ee761f62SThomas Gleixner static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2570ea811747SPeter Zijlstra static inline void __current_set_polling(void) { }
2571ea811747SPeter Zijlstra static inline void __current_clr_polling(void) { }
2572ea811747SPeter Zijlstra 
2573ea811747SPeter Zijlstra static inline bool __must_check current_set_polling_and_test(void)
2574ea811747SPeter Zijlstra {
2575ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2576ea811747SPeter Zijlstra }
2577ea811747SPeter Zijlstra static inline bool __must_check current_clr_polling_and_test(void)
2578ea811747SPeter Zijlstra {
2579ea811747SPeter Zijlstra 	return unlikely(tif_need_resched());
2580ea811747SPeter Zijlstra }
2581ee761f62SThomas Gleixner #endif
2582ee761f62SThomas Gleixner 
258375f93fedSPeter Zijlstra static __always_inline bool need_resched(void)
258475f93fedSPeter Zijlstra {
258575f93fedSPeter Zijlstra 	return unlikely(tif_need_resched());
258675f93fedSPeter Zijlstra }
258775f93fedSPeter Zijlstra 
2588ee761f62SThomas Gleixner /*
2589f06febc9SFrank Mayhar  * Thread group CPU time accounting.
2590f06febc9SFrank Mayhar  */
25914cd4c1b4SPeter Zijlstra void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
25924da94d49SPeter Zijlstra void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2593f06febc9SFrank Mayhar 
2594f06febc9SFrank Mayhar static inline void thread_group_cputime_init(struct signal_struct *sig)
2595f06febc9SFrank Mayhar {
2596ee30a7b2SThomas Gleixner 	raw_spin_lock_init(&sig->cputimer.lock);
2597f06febc9SFrank Mayhar }
2598f06febc9SFrank Mayhar 
2599f06febc9SFrank Mayhar /*
26007bb44adeSRoland McGrath  * Reevaluate whether the task has signals pending delivery.
26017bb44adeSRoland McGrath  * Wake the task if so.
26027bb44adeSRoland McGrath  * This is required every time the blocked sigset_t changes.
26037bb44adeSRoland McGrath  * callers must hold sighand->siglock.
26047bb44adeSRoland McGrath  */
26057bb44adeSRoland McGrath extern void recalc_sigpending_and_wake(struct task_struct *t);
26061da177e4SLinus Torvalds extern void recalc_sigpending(void);
26071da177e4SLinus Torvalds 
2608910ffdb1SOleg Nesterov extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2609910ffdb1SOleg Nesterov 
2610910ffdb1SOleg Nesterov static inline void signal_wake_up(struct task_struct *t, bool resume)
2611910ffdb1SOleg Nesterov {
2612910ffdb1SOleg Nesterov 	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2613910ffdb1SOleg Nesterov }
2614910ffdb1SOleg Nesterov static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2615910ffdb1SOleg Nesterov {
2616910ffdb1SOleg Nesterov 	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2617910ffdb1SOleg Nesterov }
26181da177e4SLinus Torvalds 
26191da177e4SLinus Torvalds /*
26201da177e4SLinus Torvalds  * Wrappers for p->thread_info->cpu access. No-op on UP.
26211da177e4SLinus Torvalds  */
26221da177e4SLinus Torvalds #ifdef CONFIG_SMP
26231da177e4SLinus Torvalds 
26241da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
26251da177e4SLinus Torvalds {
2626a1261f54SAl Viro 	return task_thread_info(p)->cpu;
26271da177e4SLinus Torvalds }
26281da177e4SLinus Torvalds 
2629c65cc870SIngo Molnar extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
26301da177e4SLinus Torvalds 
26311da177e4SLinus Torvalds #else
26321da177e4SLinus Torvalds 
26331da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
26341da177e4SLinus Torvalds {
26351da177e4SLinus Torvalds 	return 0;
26361da177e4SLinus Torvalds }
26371da177e4SLinus Torvalds 
26381da177e4SLinus Torvalds static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
26391da177e4SLinus Torvalds {
26401da177e4SLinus Torvalds }
26411da177e4SLinus Torvalds 
26421da177e4SLinus Torvalds #endif /* CONFIG_SMP */
26431da177e4SLinus Torvalds 
264496f874e2SRusty Russell extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
264596f874e2SRusty Russell extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
26465c45bf27SSiddha, Suresh B 
26477c941438SDhaval Giani #ifdef CONFIG_CGROUP_SCHED
264807e06b01SYong Zhang extern struct task_group root_task_group;
26498323f26cSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */
26509b5b7751SSrivatsa Vaddagiri 
265154e99124SDhaval Giani extern int task_can_switch_user(struct user_struct *up,
265254e99124SDhaval Giani 					struct task_struct *tsk);
265354e99124SDhaval Giani 
26544b98d11bSAlexey Dobriyan #ifdef CONFIG_TASK_XACCT
26554b98d11bSAlexey Dobriyan static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
26564b98d11bSAlexey Dobriyan {
2657940389b8SAndrea Righi 	tsk->ioac.rchar += amt;
26584b98d11bSAlexey Dobriyan }
26594b98d11bSAlexey Dobriyan 
26604b98d11bSAlexey Dobriyan static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
26614b98d11bSAlexey Dobriyan {
2662940389b8SAndrea Righi 	tsk->ioac.wchar += amt;
26634b98d11bSAlexey Dobriyan }
26644b98d11bSAlexey Dobriyan 
26654b98d11bSAlexey Dobriyan static inline void inc_syscr(struct task_struct *tsk)
26664b98d11bSAlexey Dobriyan {
2667940389b8SAndrea Righi 	tsk->ioac.syscr++;
26684b98d11bSAlexey Dobriyan }
26694b98d11bSAlexey Dobriyan 
26704b98d11bSAlexey Dobriyan static inline void inc_syscw(struct task_struct *tsk)
26714b98d11bSAlexey Dobriyan {
2672940389b8SAndrea Righi 	tsk->ioac.syscw++;
26734b98d11bSAlexey Dobriyan }
26744b98d11bSAlexey Dobriyan #else
26754b98d11bSAlexey Dobriyan static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
26764b98d11bSAlexey Dobriyan {
26774b98d11bSAlexey Dobriyan }
26784b98d11bSAlexey Dobriyan 
26794b98d11bSAlexey Dobriyan static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
26804b98d11bSAlexey Dobriyan {
26814b98d11bSAlexey Dobriyan }
26824b98d11bSAlexey Dobriyan 
26834b98d11bSAlexey Dobriyan static inline void inc_syscr(struct task_struct *tsk)
26844b98d11bSAlexey Dobriyan {
26854b98d11bSAlexey Dobriyan }
26864b98d11bSAlexey Dobriyan 
26874b98d11bSAlexey Dobriyan static inline void inc_syscw(struct task_struct *tsk)
26884b98d11bSAlexey Dobriyan {
26894b98d11bSAlexey Dobriyan }
26904b98d11bSAlexey Dobriyan #endif
26914b98d11bSAlexey Dobriyan 
269282455257SDave Hansen #ifndef TASK_SIZE_OF
269382455257SDave Hansen #define TASK_SIZE_OF(tsk)	TASK_SIZE
269482455257SDave Hansen #endif
269582455257SDave Hansen 
2696cf475ad2SBalbir Singh #ifdef CONFIG_MM_OWNER
2697cf475ad2SBalbir Singh extern void mm_update_next_owner(struct mm_struct *mm);
2698cf475ad2SBalbir Singh extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2699cf475ad2SBalbir Singh #else
2700cf475ad2SBalbir Singh static inline void mm_update_next_owner(struct mm_struct *mm)
2701cf475ad2SBalbir Singh {
2702cf475ad2SBalbir Singh }
2703cf475ad2SBalbir Singh 
2704cf475ad2SBalbir Singh static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2705cf475ad2SBalbir Singh {
2706cf475ad2SBalbir Singh }
2707cf475ad2SBalbir Singh #endif /* CONFIG_MM_OWNER */
2708cf475ad2SBalbir Singh 
27093e10e716SJiri Slaby static inline unsigned long task_rlimit(const struct task_struct *tsk,
27103e10e716SJiri Slaby 		unsigned int limit)
27113e10e716SJiri Slaby {
27123e10e716SJiri Slaby 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
27133e10e716SJiri Slaby }
27143e10e716SJiri Slaby 
27153e10e716SJiri Slaby static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
27163e10e716SJiri Slaby 		unsigned int limit)
27173e10e716SJiri Slaby {
27183e10e716SJiri Slaby 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
27193e10e716SJiri Slaby }
27203e10e716SJiri Slaby 
27213e10e716SJiri Slaby static inline unsigned long rlimit(unsigned int limit)
27223e10e716SJiri Slaby {
27233e10e716SJiri Slaby 	return task_rlimit(current, limit);
27243e10e716SJiri Slaby }
27253e10e716SJiri Slaby 
27263e10e716SJiri Slaby static inline unsigned long rlimit_max(unsigned int limit)
27273e10e716SJiri Slaby {
27283e10e716SJiri Slaby 	return task_rlimit_max(current, limit);
27293e10e716SJiri Slaby }
27303e10e716SJiri Slaby 
27311da177e4SLinus Torvalds #endif
2732