xref: /linux/include/linux/sched.h (revision d37f761dbd276790f70dcf73a287fde2c3464482)
11da177e4SLinus Torvalds #ifndef _LINUX_SCHED_H
21da177e4SLinus Torvalds #define _LINUX_SCHED_H
31da177e4SLinus Torvalds 
4607ca46eSDavid Howells #include <uapi/linux/sched.h>
5b7b3c76aSDavid Woodhouse 
6b7b3c76aSDavid Woodhouse 
7b7b3c76aSDavid Woodhouse struct sched_param {
8b7b3c76aSDavid Woodhouse 	int sched_priority;
9b7b3c76aSDavid Woodhouse };
10b7b3c76aSDavid Woodhouse 
111da177e4SLinus Torvalds #include <asm/param.h>	/* for HZ */
121da177e4SLinus Torvalds 
131da177e4SLinus Torvalds #include <linux/capability.h>
141da177e4SLinus Torvalds #include <linux/threads.h>
151da177e4SLinus Torvalds #include <linux/kernel.h>
161da177e4SLinus Torvalds #include <linux/types.h>
171da177e4SLinus Torvalds #include <linux/timex.h>
181da177e4SLinus Torvalds #include <linux/jiffies.h>
191da177e4SLinus Torvalds #include <linux/rbtree.h>
201da177e4SLinus Torvalds #include <linux/thread_info.h>
211da177e4SLinus Torvalds #include <linux/cpumask.h>
221da177e4SLinus Torvalds #include <linux/errno.h>
231da177e4SLinus Torvalds #include <linux/nodemask.h>
24c92ff1bdSMartin Schwidefsky #include <linux/mm_types.h>
251da177e4SLinus Torvalds 
261da177e4SLinus Torvalds #include <asm/page.h>
271da177e4SLinus Torvalds #include <asm/ptrace.h>
281da177e4SLinus Torvalds #include <asm/cputime.h>
291da177e4SLinus Torvalds 
301da177e4SLinus Torvalds #include <linux/smp.h>
311da177e4SLinus Torvalds #include <linux/sem.h>
321da177e4SLinus Torvalds #include <linux/signal.h>
331da177e4SLinus Torvalds #include <linux/compiler.h>
341da177e4SLinus Torvalds #include <linux/completion.h>
351da177e4SLinus Torvalds #include <linux/pid.h>
361da177e4SLinus Torvalds #include <linux/percpu.h>
371da177e4SLinus Torvalds #include <linux/topology.h>
383e26c149SPeter Zijlstra #include <linux/proportions.h>
391da177e4SLinus Torvalds #include <linux/seccomp.h>
40e56d0903SIngo Molnar #include <linux/rcupdate.h>
4105725f7eSJiri Pirko #include <linux/rculist.h>
4223f78d4aSIngo Molnar #include <linux/rtmutex.h>
431da177e4SLinus Torvalds 
44a3b6714eSDavid Woodhouse #include <linux/time.h>
45a3b6714eSDavid Woodhouse #include <linux/param.h>
46a3b6714eSDavid Woodhouse #include <linux/resource.h>
47a3b6714eSDavid Woodhouse #include <linux/timer.h>
48a3b6714eSDavid Woodhouse #include <linux/hrtimer.h>
497c3ab738SAndrew Morton #include <linux/task_io_accounting.h>
509745512cSArjan van de Ven #include <linux/latencytop.h>
519e2b2dc4SDavid Howells #include <linux/cred.h>
52fa14ff4aSPeter Zijlstra #include <linux/llist.h>
537b44ab97SEric W. Biederman #include <linux/uidgid.h>
54a3b6714eSDavid Woodhouse 
55a3b6714eSDavid Woodhouse #include <asm/processor.h>
5636d57ac4SH. J. Lu 
571da177e4SLinus Torvalds struct exec_domain;
58c87e2837SIngo Molnar struct futex_pi_state;
59286100a6SAlexey Dobriyan struct robust_list_head;
60bddd87c7SAkinobu Mita struct bio_list;
615ad4e53bSAl Viro struct fs_struct;
62cdd6c482SIngo Molnar struct perf_event_context;
6373c10101SJens Axboe struct blk_plug;
641da177e4SLinus Torvalds 
651da177e4SLinus Torvalds /*
661da177e4SLinus Torvalds  * List of flags we want to share for kernel threads,
671da177e4SLinus Torvalds  * if only because they are not used by them anyway.
681da177e4SLinus Torvalds  */
691da177e4SLinus Torvalds #define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
701da177e4SLinus Torvalds 
711da177e4SLinus Torvalds /*
721da177e4SLinus Torvalds  * These are the constant used to fake the fixed-point load-average
731da177e4SLinus Torvalds  * counting. Some notes:
741da177e4SLinus Torvalds  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
751da177e4SLinus Torvalds  *    a load-average precision of 10 bits integer + 11 bits fractional
761da177e4SLinus Torvalds  *  - if you want to count load-averages more often, you need more
771da177e4SLinus Torvalds  *    precision, or rounding will get you. With 2-second counting freq,
781da177e4SLinus Torvalds  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
791da177e4SLinus Torvalds  *    11 bit fractions.
801da177e4SLinus Torvalds  */
811da177e4SLinus Torvalds extern unsigned long avenrun[];		/* Load averages */
822d02494fSThomas Gleixner extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
831da177e4SLinus Torvalds 
841da177e4SLinus Torvalds #define FSHIFT		11		/* nr of bits of precision */
851da177e4SLinus Torvalds #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
860c2043abSLinus Torvalds #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
871da177e4SLinus Torvalds #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
881da177e4SLinus Torvalds #define EXP_5		2014		/* 1/exp(5sec/5min) */
891da177e4SLinus Torvalds #define EXP_15		2037		/* 1/exp(5sec/15min) */
901da177e4SLinus Torvalds 
911da177e4SLinus Torvalds #define CALC_LOAD(load,exp,n) \
921da177e4SLinus Torvalds 	load *= exp; \
931da177e4SLinus Torvalds 	load += n*(FIXED_1-exp); \
941da177e4SLinus Torvalds 	load >>= FSHIFT;
951da177e4SLinus Torvalds 
961da177e4SLinus Torvalds extern unsigned long total_forks;
971da177e4SLinus Torvalds extern int nr_threads;
981da177e4SLinus Torvalds DECLARE_PER_CPU(unsigned long, process_counts);
991da177e4SLinus Torvalds extern int nr_processes(void);
1001da177e4SLinus Torvalds extern unsigned long nr_running(void);
1011da177e4SLinus Torvalds extern unsigned long nr_uninterruptible(void);
1021da177e4SLinus Torvalds extern unsigned long nr_iowait(void);
1038c215bd3SPeter Zijlstra extern unsigned long nr_iowait_cpu(int cpu);
10469d25870SArjan van de Ven extern unsigned long this_cpu_load(void);
10569d25870SArjan van de Ven 
10669d25870SArjan van de Ven 
1070f004f5aSPeter Zijlstra extern void calc_global_load(unsigned long ticks);
1085aaa0b7aSPeter Zijlstra extern void update_cpu_load_nohz(void);
1091da177e4SLinus Torvalds 
1107e49fcceSSteven Rostedt extern unsigned long get_parent_ip(unsigned long addr);
1117e49fcceSSteven Rostedt 
11243ae34cbSIngo Molnar struct seq_file;
11343ae34cbSIngo Molnar struct cfs_rq;
1144cf86d77SIngo Molnar struct task_group;
11543ae34cbSIngo Molnar #ifdef CONFIG_SCHED_DEBUG
11643ae34cbSIngo Molnar extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
11743ae34cbSIngo Molnar extern void proc_sched_set_task(struct task_struct *p);
11843ae34cbSIngo Molnar extern void
1195cef9ecaSIngo Molnar print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
12043ae34cbSIngo Molnar #else
12143ae34cbSIngo Molnar static inline void
12243ae34cbSIngo Molnar proc_sched_show_task(struct task_struct *p, struct seq_file *m)
12343ae34cbSIngo Molnar {
12443ae34cbSIngo Molnar }
12543ae34cbSIngo Molnar static inline void proc_sched_set_task(struct task_struct *p)
12643ae34cbSIngo Molnar {
12743ae34cbSIngo Molnar }
12843ae34cbSIngo Molnar static inline void
1295cef9ecaSIngo Molnar print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
13043ae34cbSIngo Molnar {
13143ae34cbSIngo Molnar }
13243ae34cbSIngo Molnar #endif
1331da177e4SLinus Torvalds 
1344a8342d2SLinus Torvalds /*
1354a8342d2SLinus Torvalds  * Task state bitmask. NOTE! These bits are also
1364a8342d2SLinus Torvalds  * encoded in fs/proc/array.c: get_task_state().
1374a8342d2SLinus Torvalds  *
1384a8342d2SLinus Torvalds  * We have two separate sets of flags: task->state
1394a8342d2SLinus Torvalds  * is about runnability, while task->exit_state are
1404a8342d2SLinus Torvalds  * about the task exiting. Confusing, but this way
1414a8342d2SLinus Torvalds  * modifying one set can't modify the other one by
1424a8342d2SLinus Torvalds  * mistake.
1434a8342d2SLinus Torvalds  */
1441da177e4SLinus Torvalds #define TASK_RUNNING		0
1451da177e4SLinus Torvalds #define TASK_INTERRUPTIBLE	1
1461da177e4SLinus Torvalds #define TASK_UNINTERRUPTIBLE	2
147f021a3c2SMatthew Wilcox #define __TASK_STOPPED		4
148f021a3c2SMatthew Wilcox #define __TASK_TRACED		8
1494a8342d2SLinus Torvalds /* in tsk->exit_state */
1504a8342d2SLinus Torvalds #define EXIT_ZOMBIE		16
1514a8342d2SLinus Torvalds #define EXIT_DEAD		32
1524a8342d2SLinus Torvalds /* in tsk->state again */
153af927232SMike Galbraith #define TASK_DEAD		64
154f021a3c2SMatthew Wilcox #define TASK_WAKEKILL		128
155e9c84311SPeter Zijlstra #define TASK_WAKING		256
156e1781538SPeter Zijlstra #define TASK_STATE_MAX		512
157f021a3c2SMatthew Wilcox 
15844d90df6SPeter Zijlstra #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
15973342151SPeter Zijlstra 
160e1781538SPeter Zijlstra extern char ___assert_task_state[1 - 2*!!(
161e1781538SPeter Zijlstra 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
162f021a3c2SMatthew Wilcox 
163f021a3c2SMatthew Wilcox /* Convenience macros for the sake of set_task_state */
164f021a3c2SMatthew Wilcox #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
165f021a3c2SMatthew Wilcox #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
166f021a3c2SMatthew Wilcox #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
1671da177e4SLinus Torvalds 
16892a1f4bcSMatthew Wilcox /* Convenience macros for the sake of wake_up */
16992a1f4bcSMatthew Wilcox #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
170f021a3c2SMatthew Wilcox #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
17192a1f4bcSMatthew Wilcox 
17292a1f4bcSMatthew Wilcox /* get_task_state() */
17392a1f4bcSMatthew Wilcox #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
174f021a3c2SMatthew Wilcox 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
175f021a3c2SMatthew Wilcox 				 __TASK_TRACED)
17692a1f4bcSMatthew Wilcox 
177f021a3c2SMatthew Wilcox #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
178f021a3c2SMatthew Wilcox #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
1798f92054eSDavid Howells #define task_is_dead(task)	((task)->exit_state != 0)
18092a1f4bcSMatthew Wilcox #define task_is_stopped_or_traced(task)	\
181f021a3c2SMatthew Wilcox 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
18292a1f4bcSMatthew Wilcox #define task_contributes_to_load(task)	\
183e3c8ca83SNathan Lynch 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
184376fede8STejun Heo 				 (task->flags & PF_FROZEN) == 0)
1851da177e4SLinus Torvalds 
1861da177e4SLinus Torvalds #define __set_task_state(tsk, state_value)		\
1871da177e4SLinus Torvalds 	do { (tsk)->state = (state_value); } while (0)
1881da177e4SLinus Torvalds #define set_task_state(tsk, state_value)		\
1891da177e4SLinus Torvalds 	set_mb((tsk)->state, (state_value))
1901da177e4SLinus Torvalds 
191498d0c57SAndrew Morton /*
192498d0c57SAndrew Morton  * set_current_state() includes a barrier so that the write of current->state
193498d0c57SAndrew Morton  * is correctly serialised wrt the caller's subsequent test of whether to
194498d0c57SAndrew Morton  * actually sleep:
195498d0c57SAndrew Morton  *
196498d0c57SAndrew Morton  *	set_current_state(TASK_UNINTERRUPTIBLE);
197498d0c57SAndrew Morton  *	if (do_i_need_to_sleep())
198498d0c57SAndrew Morton  *		schedule();
199498d0c57SAndrew Morton  *
200498d0c57SAndrew Morton  * If the caller does not need such serialisation then use __set_current_state()
201498d0c57SAndrew Morton  */
2021da177e4SLinus Torvalds #define __set_current_state(state_value)			\
2031da177e4SLinus Torvalds 	do { current->state = (state_value); } while (0)
2041da177e4SLinus Torvalds #define set_current_state(state_value)		\
2051da177e4SLinus Torvalds 	set_mb(current->state, (state_value))
2061da177e4SLinus Torvalds 
2071da177e4SLinus Torvalds /* Task command name length */
2081da177e4SLinus Torvalds #define TASK_COMM_LEN 16
2091da177e4SLinus Torvalds 
2101da177e4SLinus Torvalds #include <linux/spinlock.h>
2111da177e4SLinus Torvalds 
2121da177e4SLinus Torvalds /*
2131da177e4SLinus Torvalds  * This serializes "schedule()" and also protects
2141da177e4SLinus Torvalds  * the run-queue from deletions/modifications (but
2151da177e4SLinus Torvalds  * _adding_ to the beginning of the run-queue has
2161da177e4SLinus Torvalds  * a separate lock).
2171da177e4SLinus Torvalds  */
2181da177e4SLinus Torvalds extern rwlock_t tasklist_lock;
2191da177e4SLinus Torvalds extern spinlock_t mmlist_lock;
2201da177e4SLinus Torvalds 
22136c8b586SIngo Molnar struct task_struct;
2221da177e4SLinus Torvalds 
223db1466b3SPaul E. McKenney #ifdef CONFIG_PROVE_RCU
224db1466b3SPaul E. McKenney extern int lockdep_tasklist_lock_is_held(void);
225db1466b3SPaul E. McKenney #endif /* #ifdef CONFIG_PROVE_RCU */
226db1466b3SPaul E. McKenney 
2271da177e4SLinus Torvalds extern void sched_init(void);
2281da177e4SLinus Torvalds extern void sched_init_smp(void);
2292d07b255SHarvey Harrison extern asmlinkage void schedule_tail(struct task_struct *prev);
23036c8b586SIngo Molnar extern void init_idle(struct task_struct *idle, int cpu);
2311df21055SIngo Molnar extern void init_idle_bootup_task(struct task_struct *idle);
2321da177e4SLinus Torvalds 
23389f19f04SAndrew Morton extern int runqueue_is_locked(int cpu);
234017730c1SIngo Molnar 
23546cb4b7cSSiddha, Suresh B #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
236c1cc017cSAlex Shi extern void nohz_balance_enter_idle(int cpu);
23769e1e811SSuresh Siddha extern void set_cpu_sd_state_idle(void);
23883cd4fe2SVenkatesh Pallipadi extern int get_nohz_timer_target(void);
23946cb4b7cSSiddha, Suresh B #else
240c1cc017cSAlex Shi static inline void nohz_balance_enter_idle(int cpu) { }
241fdaabd80SPeter Zijlstra static inline void set_cpu_sd_state_idle(void) { }
24246cb4b7cSSiddha, Suresh B #endif
2431da177e4SLinus Torvalds 
244e59e2ae2SIngo Molnar /*
24539bc89fdSIngo Molnar  * Only dump TASK_* tasks. (0 for all tasks)
246e59e2ae2SIngo Molnar  */
247e59e2ae2SIngo Molnar extern void show_state_filter(unsigned long state_filter);
248e59e2ae2SIngo Molnar 
249e59e2ae2SIngo Molnar static inline void show_state(void)
250e59e2ae2SIngo Molnar {
25139bc89fdSIngo Molnar 	show_state_filter(0);
252e59e2ae2SIngo Molnar }
253e59e2ae2SIngo Molnar 
2541da177e4SLinus Torvalds extern void show_regs(struct pt_regs *);
2551da177e4SLinus Torvalds 
2561da177e4SLinus Torvalds /*
2571da177e4SLinus Torvalds  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
2581da177e4SLinus Torvalds  * task), SP is the stack pointer of the first frame that should be shown in the back
2591da177e4SLinus Torvalds  * trace (or NULL if the entire call-chain of the task should be shown).
2601da177e4SLinus Torvalds  */
2611da177e4SLinus Torvalds extern void show_stack(struct task_struct *task, unsigned long *sp);
2621da177e4SLinus Torvalds 
2631da177e4SLinus Torvalds void io_schedule(void);
2641da177e4SLinus Torvalds long io_schedule_timeout(long timeout);
2651da177e4SLinus Torvalds 
2661da177e4SLinus Torvalds extern void cpu_init (void);
2671da177e4SLinus Torvalds extern void trap_init(void);
2681da177e4SLinus Torvalds extern void update_process_times(int user);
2691da177e4SLinus Torvalds extern void scheduler_tick(void);
2701da177e4SLinus Torvalds 
27182a1fcb9SIngo Molnar extern void sched_show_task(struct task_struct *p);
27282a1fcb9SIngo Molnar 
27319cc36c0SFrederic Weisbecker #ifdef CONFIG_LOCKUP_DETECTOR
2748446f1d3SIngo Molnar extern void touch_softlockup_watchdog(void);
275d6ad3e28SJason Wessel extern void touch_softlockup_watchdog_sync(void);
27604c9167fSJeremy Fitzhardinge extern void touch_all_softlockup_watchdogs(void);
277332fbdbcSDon Zickus extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
2788d65af78SAlexey Dobriyan 				  void __user *buffer,
279baf48f65SMandeep Singh Baines 				  size_t *lenp, loff_t *ppos);
2809c44bc03SIngo Molnar extern unsigned int  softlockup_panic;
281004417a6SPeter Zijlstra void lockup_detector_init(void);
2828446f1d3SIngo Molnar #else
2838446f1d3SIngo Molnar static inline void touch_softlockup_watchdog(void)
2848446f1d3SIngo Molnar {
2858446f1d3SIngo Molnar }
286d6ad3e28SJason Wessel static inline void touch_softlockup_watchdog_sync(void)
287d6ad3e28SJason Wessel {
288d6ad3e28SJason Wessel }
28904c9167fSJeremy Fitzhardinge static inline void touch_all_softlockup_watchdogs(void)
29004c9167fSJeremy Fitzhardinge {
29104c9167fSJeremy Fitzhardinge }
292004417a6SPeter Zijlstra static inline void lockup_detector_init(void)
293004417a6SPeter Zijlstra {
294004417a6SPeter Zijlstra }
2958446f1d3SIngo Molnar #endif
2968446f1d3SIngo Molnar 
297e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK
298e162b39aSMandeep Singh Baines extern unsigned int  sysctl_hung_task_panic;
299e162b39aSMandeep Singh Baines extern unsigned long sysctl_hung_task_check_count;
300e162b39aSMandeep Singh Baines extern unsigned long sysctl_hung_task_timeout_secs;
301e162b39aSMandeep Singh Baines extern unsigned long sysctl_hung_task_warnings;
302e162b39aSMandeep Singh Baines extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
3038d65af78SAlexey Dobriyan 					 void __user *buffer,
304e162b39aSMandeep Singh Baines 					 size_t *lenp, loff_t *ppos);
305e4ecda1bSMark Lord #else
306e4ecda1bSMark Lord /* Avoid need for ifdefs elsewhere in the code */
307e4ecda1bSMark Lord enum { sysctl_hung_task_timeout_secs = 0 };
308e162b39aSMandeep Singh Baines #endif
3098446f1d3SIngo Molnar 
3101da177e4SLinus Torvalds /* Attach to any functions which should be ignored in wchan output. */
3111da177e4SLinus Torvalds #define __sched		__attribute__((__section__(".sched.text")))
312deaf2227SIngo Molnar 
313deaf2227SIngo Molnar /* Linker adds these: start and end of __sched functions */
314deaf2227SIngo Molnar extern char __sched_text_start[], __sched_text_end[];
315deaf2227SIngo Molnar 
3161da177e4SLinus Torvalds /* Is this address in the __sched functions? */
3171da177e4SLinus Torvalds extern int in_sched_functions(unsigned long addr);
3181da177e4SLinus Torvalds 
3191da177e4SLinus Torvalds #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
320b3c97528SHarvey Harrison extern signed long schedule_timeout(signed long timeout);
32164ed93a2SNishanth Aravamudan extern signed long schedule_timeout_interruptible(signed long timeout);
322294d5cc2SMatthew Wilcox extern signed long schedule_timeout_killable(signed long timeout);
32364ed93a2SNishanth Aravamudan extern signed long schedule_timeout_uninterruptible(signed long timeout);
3241da177e4SLinus Torvalds asmlinkage void schedule(void);
325c5491ea7SThomas Gleixner extern void schedule_preempt_disabled(void);
326c6eb3ddaSPeter Zijlstra extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
3271da177e4SLinus Torvalds 
328ab516013SSerge E. Hallyn struct nsproxy;
329acce292cSCedric Le Goater struct user_namespace;
3301da177e4SLinus Torvalds 
331341c87bfSKAMEZAWA Hiroyuki /*
332341c87bfSKAMEZAWA Hiroyuki  * Default maximum number of active map areas, this limits the number of vmas
333341c87bfSKAMEZAWA Hiroyuki  * per mm struct. Users can overwrite this number by sysctl but there is a
334341c87bfSKAMEZAWA Hiroyuki  * problem.
335341c87bfSKAMEZAWA Hiroyuki  *
336341c87bfSKAMEZAWA Hiroyuki  * When a program's coredump is generated as ELF format, a section is created
337341c87bfSKAMEZAWA Hiroyuki  * per a vma. In ELF, the number of sections is represented in unsigned short.
338341c87bfSKAMEZAWA Hiroyuki  * This means the number of sections should be smaller than 65535 at coredump.
339341c87bfSKAMEZAWA Hiroyuki  * Because the kernel adds some informative sections to a image of program at
340341c87bfSKAMEZAWA Hiroyuki  * generating coredump, we need some margin. The number of extra sections is
341341c87bfSKAMEZAWA Hiroyuki  * 1-3 now and depends on arch. We use "5" as safe margin, here.
342341c87bfSKAMEZAWA Hiroyuki  */
343341c87bfSKAMEZAWA Hiroyuki #define MAPCOUNT_ELF_CORE_MARGIN	(5)
3444be929beSAlexey Dobriyan #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
3451da177e4SLinus Torvalds 
3461da177e4SLinus Torvalds extern int sysctl_max_map_count;
3471da177e4SLinus Torvalds 
3481da177e4SLinus Torvalds #include <linux/aio.h>
3491da177e4SLinus Torvalds 
350efc1a3b1SDavid Howells #ifdef CONFIG_MMU
351efc1a3b1SDavid Howells extern void arch_pick_mmap_layout(struct mm_struct *mm);
3521da177e4SLinus Torvalds extern unsigned long
3531da177e4SLinus Torvalds arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
3541da177e4SLinus Torvalds 		       unsigned long, unsigned long);
3551da177e4SLinus Torvalds extern unsigned long
3561da177e4SLinus Torvalds arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
3571da177e4SLinus Torvalds 			  unsigned long len, unsigned long pgoff,
3581da177e4SLinus Torvalds 			  unsigned long flags);
3591363c3cdSWolfgang Wander extern void arch_unmap_area(struct mm_struct *, unsigned long);
3601363c3cdSWolfgang Wander extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
361efc1a3b1SDavid Howells #else
362efc1a3b1SDavid Howells static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
363efc1a3b1SDavid Howells #endif
3641da177e4SLinus Torvalds 
365901608d9SOleg Nesterov 
3666c5d5238SKawai, Hidehiro extern void set_dumpable(struct mm_struct *mm, int value);
3676c5d5238SKawai, Hidehiro extern int get_dumpable(struct mm_struct *mm);
3686c5d5238SKawai, Hidehiro 
36954b50199SKees Cook /* get/set_dumpable() values */
37054b50199SKees Cook #define SUID_DUMPABLE_DISABLED	0
37154b50199SKees Cook #define SUID_DUMPABLE_ENABLED	1
37254b50199SKees Cook #define SUID_DUMPABLE_SAFE	2
37354b50199SKees Cook 
3746c5d5238SKawai, Hidehiro /* mm flags */
3753cb4a0bbSKawai, Hidehiro /* dumpable bits */
3766c5d5238SKawai, Hidehiro #define MMF_DUMPABLE      0  /* core dump is permitted */
3776c5d5238SKawai, Hidehiro #define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
378f8af4da3SHugh Dickins 
3793cb4a0bbSKawai, Hidehiro #define MMF_DUMPABLE_BITS 2
380f8af4da3SHugh Dickins #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
3813cb4a0bbSKawai, Hidehiro 
3823cb4a0bbSKawai, Hidehiro /* coredump filter bits */
3833cb4a0bbSKawai, Hidehiro #define MMF_DUMP_ANON_PRIVATE	2
3843cb4a0bbSKawai, Hidehiro #define MMF_DUMP_ANON_SHARED	3
3853cb4a0bbSKawai, Hidehiro #define MMF_DUMP_MAPPED_PRIVATE	4
3863cb4a0bbSKawai, Hidehiro #define MMF_DUMP_MAPPED_SHARED	5
38782df3973SRoland McGrath #define MMF_DUMP_ELF_HEADERS	6
388e575f111SKOSAKI Motohiro #define MMF_DUMP_HUGETLB_PRIVATE 7
389e575f111SKOSAKI Motohiro #define MMF_DUMP_HUGETLB_SHARED  8
390f8af4da3SHugh Dickins 
3913cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
392e575f111SKOSAKI Motohiro #define MMF_DUMP_FILTER_BITS	7
3933cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_MASK \
3943cb4a0bbSKawai, Hidehiro 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
3953cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_DEFAULT \
396e575f111SKOSAKI Motohiro 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
397656eb2cdSRoland McGrath 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
398656eb2cdSRoland McGrath 
399656eb2cdSRoland McGrath #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
400656eb2cdSRoland McGrath # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
401656eb2cdSRoland McGrath #else
402656eb2cdSRoland McGrath # define MMF_DUMP_MASK_DEFAULT_ELF	0
403656eb2cdSRoland McGrath #endif
404f8af4da3SHugh Dickins 					/* leave room for more dump flags */
405f8af4da3SHugh Dickins #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
406ba76149fSAndrea Arcangeli #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
407bafb282dSKonstantin Khlebnikov #define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
408f8af4da3SHugh Dickins 
4099f68f672SOleg Nesterov #define MMF_HAS_UPROBES		19	/* has uprobes */
4109f68f672SOleg Nesterov #define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
411f8ac4ec9SOleg Nesterov 
412f8af4da3SHugh Dickins #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
4136c5d5238SKawai, Hidehiro 
4141da177e4SLinus Torvalds struct sighand_struct {
4151da177e4SLinus Torvalds 	atomic_t		count;
4161da177e4SLinus Torvalds 	struct k_sigaction	action[_NSIG];
4171da177e4SLinus Torvalds 	spinlock_t		siglock;
418b8fceee1SDavide Libenzi 	wait_queue_head_t	signalfd_wqh;
4191da177e4SLinus Torvalds };
4201da177e4SLinus Torvalds 
4210e464814SKaiGai Kohei struct pacct_struct {
422f6ec29a4SKaiGai Kohei 	int			ac_flag;
423f6ec29a4SKaiGai Kohei 	long			ac_exitcode;
4240e464814SKaiGai Kohei 	unsigned long		ac_mem;
42577787bfbSKaiGai Kohei 	cputime_t		ac_utime, ac_stime;
42677787bfbSKaiGai Kohei 	unsigned long		ac_minflt, ac_majflt;
4270e464814SKaiGai Kohei };
4280e464814SKaiGai Kohei 
42942c4ab41SStanislaw Gruszka struct cpu_itimer {
43042c4ab41SStanislaw Gruszka 	cputime_t expires;
43142c4ab41SStanislaw Gruszka 	cputime_t incr;
4328356b5f9SStanislaw Gruszka 	u32 error;
4338356b5f9SStanislaw Gruszka 	u32 incr_error;
43442c4ab41SStanislaw Gruszka };
43542c4ab41SStanislaw Gruszka 
436f06febc9SFrank Mayhar /**
437*d37f761dSFrederic Weisbecker  * struct cputime - snaphsot of system and user cputime
438*d37f761dSFrederic Weisbecker  * @utime: time spent in user mode
439*d37f761dSFrederic Weisbecker  * @stime: time spent in system mode
440*d37f761dSFrederic Weisbecker  *
441*d37f761dSFrederic Weisbecker  * Gathers a generic snapshot of user and system time.
442*d37f761dSFrederic Weisbecker  */
443*d37f761dSFrederic Weisbecker struct cputime {
444*d37f761dSFrederic Weisbecker 	cputime_t utime;
445*d37f761dSFrederic Weisbecker 	cputime_t stime;
446*d37f761dSFrederic Weisbecker };
447*d37f761dSFrederic Weisbecker 
448*d37f761dSFrederic Weisbecker /**
449f06febc9SFrank Mayhar  * struct task_cputime - collected CPU time counts
450f06febc9SFrank Mayhar  * @utime:		time spent in user mode, in &cputime_t units
451f06febc9SFrank Mayhar  * @stime:		time spent in kernel mode, in &cputime_t units
452f06febc9SFrank Mayhar  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
453f06febc9SFrank Mayhar  *
454*d37f761dSFrederic Weisbecker  * This is an extension of struct cputime that includes the total runtime
455*d37f761dSFrederic Weisbecker  * spent by the task from the scheduler point of view.
456*d37f761dSFrederic Weisbecker  *
457*d37f761dSFrederic Weisbecker  * As a result, this structure groups together three kinds of CPU time
458*d37f761dSFrederic Weisbecker  * that are tracked for threads and thread groups.  Most things considering
459f06febc9SFrank Mayhar  * CPU time want to group these counts together and treat all three
460f06febc9SFrank Mayhar  * of them in parallel.
461f06febc9SFrank Mayhar  */
462f06febc9SFrank Mayhar struct task_cputime {
463f06febc9SFrank Mayhar 	cputime_t utime;
464f06febc9SFrank Mayhar 	cputime_t stime;
465f06febc9SFrank Mayhar 	unsigned long long sum_exec_runtime;
466f06febc9SFrank Mayhar };
467f06febc9SFrank Mayhar /* Alternate field names when used to cache expirations. */
468f06febc9SFrank Mayhar #define prof_exp	stime
469f06febc9SFrank Mayhar #define virt_exp	utime
470f06febc9SFrank Mayhar #define sched_exp	sum_exec_runtime
471f06febc9SFrank Mayhar 
4724cd4c1b4SPeter Zijlstra #define INIT_CPUTIME	\
4734cd4c1b4SPeter Zijlstra 	(struct task_cputime) {					\
47464861634SMartin Schwidefsky 		.utime = 0,					\
47564861634SMartin Schwidefsky 		.stime = 0,					\
4764cd4c1b4SPeter Zijlstra 		.sum_exec_runtime = 0,				\
4774cd4c1b4SPeter Zijlstra 	}
4784cd4c1b4SPeter Zijlstra 
479c99e6efeSPeter Zijlstra /*
480c99e6efeSPeter Zijlstra  * Disable preemption until the scheduler is running.
481c99e6efeSPeter Zijlstra  * Reset by start_kernel()->sched_init()->init_idle().
482d86ee480SPeter Zijlstra  *
483d86ee480SPeter Zijlstra  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
484d86ee480SPeter Zijlstra  * before the scheduler is active -- see should_resched().
485c99e6efeSPeter Zijlstra  */
486d86ee480SPeter Zijlstra #define INIT_PREEMPT_COUNT	(1 + PREEMPT_ACTIVE)
487c99e6efeSPeter Zijlstra 
488f06febc9SFrank Mayhar /**
4894cd4c1b4SPeter Zijlstra  * struct thread_group_cputimer - thread group interval timer counts
4904cd4c1b4SPeter Zijlstra  * @cputime:		thread group interval timers.
4914cd4c1b4SPeter Zijlstra  * @running:		non-zero when there are timers running and
4924cd4c1b4SPeter Zijlstra  * 			@cputime receives updates.
4934cd4c1b4SPeter Zijlstra  * @lock:		lock for fields in this struct.
494f06febc9SFrank Mayhar  *
495f06febc9SFrank Mayhar  * This structure contains the version of task_cputime, above, that is
4964cd4c1b4SPeter Zijlstra  * used for thread group CPU timer calculations.
497f06febc9SFrank Mayhar  */
4984cd4c1b4SPeter Zijlstra struct thread_group_cputimer {
4994cd4c1b4SPeter Zijlstra 	struct task_cputime cputime;
5004cd4c1b4SPeter Zijlstra 	int running;
501ee30a7b2SThomas Gleixner 	raw_spinlock_t lock;
502f06febc9SFrank Mayhar };
503f06febc9SFrank Mayhar 
5044714d1d3SBen Blum #include <linux/rwsem.h>
5055091faa4SMike Galbraith struct autogroup;
5065091faa4SMike Galbraith 
5071da177e4SLinus Torvalds /*
508e815f0a8SJonathan Neuschäfer  * NOTE! "signal_struct" does not have its own
5091da177e4SLinus Torvalds  * locking, because a shared signal_struct always
5101da177e4SLinus Torvalds  * implies a shared sighand_struct, so locking
5111da177e4SLinus Torvalds  * sighand_struct is always a proper superset of
5121da177e4SLinus Torvalds  * the locking of signal_struct.
5131da177e4SLinus Torvalds  */
5141da177e4SLinus Torvalds struct signal_struct {
515ea6d290cSOleg Nesterov 	atomic_t		sigcnt;
5161da177e4SLinus Torvalds 	atomic_t		live;
517b3ac022cSOleg Nesterov 	int			nr_threads;
5181da177e4SLinus Torvalds 
5191da177e4SLinus Torvalds 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
5201da177e4SLinus Torvalds 
5211da177e4SLinus Torvalds 	/* current thread group signal load-balancing target: */
52236c8b586SIngo Molnar 	struct task_struct	*curr_target;
5231da177e4SLinus Torvalds 
5241da177e4SLinus Torvalds 	/* shared signal handling: */
5251da177e4SLinus Torvalds 	struct sigpending	shared_pending;
5261da177e4SLinus Torvalds 
5271da177e4SLinus Torvalds 	/* thread group exit support */
5281da177e4SLinus Torvalds 	int			group_exit_code;
5291da177e4SLinus Torvalds 	/* overloaded:
5301da177e4SLinus Torvalds 	 * - notify group_exit_task when ->count is equal to notify_count
5311da177e4SLinus Torvalds 	 * - everyone except group_exit_task is stopped during signal delivery
5321da177e4SLinus Torvalds 	 *   of fatal signals, group_exit_task processes the signal.
5331da177e4SLinus Torvalds 	 */
5341da177e4SLinus Torvalds 	int			notify_count;
53507dd20e0SRichard Kennedy 	struct task_struct	*group_exit_task;
5361da177e4SLinus Torvalds 
5371da177e4SLinus Torvalds 	/* thread group stop support, overloads group_exit_code too */
5381da177e4SLinus Torvalds 	int			group_stop_count;
5391da177e4SLinus Torvalds 	unsigned int		flags; /* see SIGNAL_* flags below */
5401da177e4SLinus Torvalds 
541ebec18a6SLennart Poettering 	/*
542ebec18a6SLennart Poettering 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
543ebec18a6SLennart Poettering 	 * manager, to re-parent orphan (double-forking) child processes
544ebec18a6SLennart Poettering 	 * to this process instead of 'init'. The service manager is
545ebec18a6SLennart Poettering 	 * able to receive SIGCHLD signals and is able to investigate
546ebec18a6SLennart Poettering 	 * the process until it calls wait(). All children of this
547ebec18a6SLennart Poettering 	 * process will inherit a flag if they should look for a
548ebec18a6SLennart Poettering 	 * child_subreaper process at exit.
549ebec18a6SLennart Poettering 	 */
550ebec18a6SLennart Poettering 	unsigned int		is_child_subreaper:1;
551ebec18a6SLennart Poettering 	unsigned int		has_child_subreaper:1;
552ebec18a6SLennart Poettering 
5531da177e4SLinus Torvalds 	/* POSIX.1b Interval Timers */
5541da177e4SLinus Torvalds 	struct list_head posix_timers;
5551da177e4SLinus Torvalds 
5561da177e4SLinus Torvalds 	/* ITIMER_REAL timer for the process */
5572ff678b8SThomas Gleixner 	struct hrtimer real_timer;
558fea9d175SOleg Nesterov 	struct pid *leader_pid;
5592ff678b8SThomas Gleixner 	ktime_t it_real_incr;
5601da177e4SLinus Torvalds 
56142c4ab41SStanislaw Gruszka 	/*
56242c4ab41SStanislaw Gruszka 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
56342c4ab41SStanislaw Gruszka 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
56442c4ab41SStanislaw Gruszka 	 * values are defined to 0 and 1 respectively
56542c4ab41SStanislaw Gruszka 	 */
56642c4ab41SStanislaw Gruszka 	struct cpu_itimer it[2];
5671da177e4SLinus Torvalds 
568f06febc9SFrank Mayhar 	/*
5694cd4c1b4SPeter Zijlstra 	 * Thread group totals for process CPU timers.
5704cd4c1b4SPeter Zijlstra 	 * See thread_group_cputimer(), et al, for details.
571f06febc9SFrank Mayhar 	 */
5724cd4c1b4SPeter Zijlstra 	struct thread_group_cputimer cputimer;
573f06febc9SFrank Mayhar 
574f06febc9SFrank Mayhar 	/* Earliest-expiration cache. */
575f06febc9SFrank Mayhar 	struct task_cputime cputime_expires;
576f06febc9SFrank Mayhar 
577f06febc9SFrank Mayhar 	struct list_head cpu_timers[3];
578f06febc9SFrank Mayhar 
579ab521dc0SEric W. Biederman 	struct pid *tty_old_pgrp;
5801ec320afSCedric Le Goater 
5811da177e4SLinus Torvalds 	/* boolean value for session group leader */
5821da177e4SLinus Torvalds 	int leader;
5831da177e4SLinus Torvalds 
5841da177e4SLinus Torvalds 	struct tty_struct *tty; /* NULL if no tty */
5851da177e4SLinus Torvalds 
5865091faa4SMike Galbraith #ifdef CONFIG_SCHED_AUTOGROUP
5875091faa4SMike Galbraith 	struct autogroup *autogroup;
5885091faa4SMike Galbraith #endif
5891da177e4SLinus Torvalds 	/*
5901da177e4SLinus Torvalds 	 * Cumulative resource counters for dead threads in the group,
5911da177e4SLinus Torvalds 	 * and for reaped dead child processes forked by this group.
5921da177e4SLinus Torvalds 	 * Live threads maintain their own counters and add to these
5931da177e4SLinus Torvalds 	 * in __exit_signal, except for the group leader.
5941da177e4SLinus Torvalds 	 */
59532bd671dSPeter Zijlstra 	cputime_t utime, stime, cutime, cstime;
5969ac52315SLaurent Vivier 	cputime_t gtime;
5979ac52315SLaurent Vivier 	cputime_t cgtime;
5980cf55e1eSHidetoshi Seto #ifndef CONFIG_VIRT_CPU_ACCOUNTING
599*d37f761dSFrederic Weisbecker 	struct cputime prev_cputime;
6000cf55e1eSHidetoshi Seto #endif
6011da177e4SLinus Torvalds 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
6021da177e4SLinus Torvalds 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
6036eaeeabaSEric Dumazet 	unsigned long inblock, oublock, cinblock, coublock;
6041f10206cSJiri Pirko 	unsigned long maxrss, cmaxrss;
605940389b8SAndrea Righi 	struct task_io_accounting ioac;
6061da177e4SLinus Torvalds 
6071da177e4SLinus Torvalds 	/*
60832bd671dSPeter Zijlstra 	 * Cumulative ns of schedule CPU time fo dead threads in the
60932bd671dSPeter Zijlstra 	 * group, not including a zombie group leader, (This only differs
61032bd671dSPeter Zijlstra 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
61132bd671dSPeter Zijlstra 	 * other than jiffies.)
61232bd671dSPeter Zijlstra 	 */
61332bd671dSPeter Zijlstra 	unsigned long long sum_sched_runtime;
61432bd671dSPeter Zijlstra 
61532bd671dSPeter Zijlstra 	/*
6161da177e4SLinus Torvalds 	 * We don't bother to synchronize most readers of this at all,
6171da177e4SLinus Torvalds 	 * because there is no reader checking a limit that actually needs
6181da177e4SLinus Torvalds 	 * to get both rlim_cur and rlim_max atomically, and either one
6191da177e4SLinus Torvalds 	 * alone is a single word that can safely be read normally.
6201da177e4SLinus Torvalds 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
6211da177e4SLinus Torvalds 	 * protect this instead of the siglock, because they really
6221da177e4SLinus Torvalds 	 * have no need to disable irqs.
6231da177e4SLinus Torvalds 	 */
6241da177e4SLinus Torvalds 	struct rlimit rlim[RLIM_NLIMITS];
6251da177e4SLinus Torvalds 
6260e464814SKaiGai Kohei #ifdef CONFIG_BSD_PROCESS_ACCT
6270e464814SKaiGai Kohei 	struct pacct_struct pacct;	/* per-process accounting information */
6280e464814SKaiGai Kohei #endif
629ad4ecbcbSShailabh Nagar #ifdef CONFIG_TASKSTATS
630ad4ecbcbSShailabh Nagar 	struct taskstats *stats;
631ad4ecbcbSShailabh Nagar #endif
632522ed776SMiloslav Trmac #ifdef CONFIG_AUDIT
633522ed776SMiloslav Trmac 	unsigned audit_tty;
634522ed776SMiloslav Trmac 	struct tty_audit_buf *tty_audit_buf;
635522ed776SMiloslav Trmac #endif
6364714d1d3SBen Blum #ifdef CONFIG_CGROUPS
6374714d1d3SBen Blum 	/*
63877e4ef99STejun Heo 	 * group_rwsem prevents new tasks from entering the threadgroup and
63977e4ef99STejun Heo 	 * member tasks from exiting,a more specifically, setting of
64077e4ef99STejun Heo 	 * PF_EXITING.  fork and exit paths are protected with this rwsem
64177e4ef99STejun Heo 	 * using threadgroup_change_begin/end().  Users which require
64277e4ef99STejun Heo 	 * threadgroup to remain stable should use threadgroup_[un]lock()
64377e4ef99STejun Heo 	 * which also takes care of exec path.  Currently, cgroup is the
64477e4ef99STejun Heo 	 * only user.
6454714d1d3SBen Blum 	 */
646257058aeSTejun Heo 	struct rw_semaphore group_rwsem;
6474714d1d3SBen Blum #endif
64828b83c51SKOSAKI Motohiro 
649a63d83f4SDavid Rientjes 	int oom_score_adj;	/* OOM kill score adjustment */
650dabb16f6SMandeep Singh Baines 	int oom_score_adj_min;	/* OOM kill score adjustment minimum value.
651dabb16f6SMandeep Singh Baines 				 * Only settable by CAP_SYS_RESOURCE. */
6529b1bf12dSKOSAKI Motohiro 
6539b1bf12dSKOSAKI Motohiro 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
6549b1bf12dSKOSAKI Motohiro 					 * credential calculations
6559b1bf12dSKOSAKI Motohiro 					 * (notably. ptrace) */
6561da177e4SLinus Torvalds };
6571da177e4SLinus Torvalds 
6581da177e4SLinus Torvalds /*
6591da177e4SLinus Torvalds  * Bits in flags field of signal_struct.
6601da177e4SLinus Torvalds  */
6611da177e4SLinus Torvalds #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
662ee77f075SOleg Nesterov #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
663ee77f075SOleg Nesterov #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
664e4420551SOleg Nesterov /*
665e4420551SOleg Nesterov  * Pending notifications to parent.
666e4420551SOleg Nesterov  */
667e4420551SOleg Nesterov #define SIGNAL_CLD_STOPPED	0x00000010
668e4420551SOleg Nesterov #define SIGNAL_CLD_CONTINUED	0x00000020
669e4420551SOleg Nesterov #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
6701da177e4SLinus Torvalds 
671fae5fa44SOleg Nesterov #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
672fae5fa44SOleg Nesterov 
673ed5d2cacSOleg Nesterov /* If true, all threads except ->group_exit_task have pending SIGKILL */
674ed5d2cacSOleg Nesterov static inline int signal_group_exit(const struct signal_struct *sig)
675ed5d2cacSOleg Nesterov {
676ed5d2cacSOleg Nesterov 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
677ed5d2cacSOleg Nesterov 		(sig->group_exit_task != NULL);
678ed5d2cacSOleg Nesterov }
679ed5d2cacSOleg Nesterov 
6801da177e4SLinus Torvalds /*
6811da177e4SLinus Torvalds  * Some day this will be a full-fledged user tracking system..
6821da177e4SLinus Torvalds  */
6831da177e4SLinus Torvalds struct user_struct {
6841da177e4SLinus Torvalds 	atomic_t __count;	/* reference count */
6851da177e4SLinus Torvalds 	atomic_t processes;	/* How many processes does this user have? */
6861da177e4SLinus Torvalds 	atomic_t files;		/* How many open files does this user have? */
6871da177e4SLinus Torvalds 	atomic_t sigpending;	/* How many pending signals does this user have? */
6882d9048e2SAmy Griffis #ifdef CONFIG_INOTIFY_USER
6890eeca283SRobert Love 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
6900eeca283SRobert Love 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
6910eeca283SRobert Love #endif
6924afeff85SEric Paris #ifdef CONFIG_FANOTIFY
6934afeff85SEric Paris 	atomic_t fanotify_listeners;
6944afeff85SEric Paris #endif
6957ef9964eSDavide Libenzi #ifdef CONFIG_EPOLL
69652bd19f7SRobin Holt 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
6977ef9964eSDavide Libenzi #endif
698970a8645SAlexey Dobriyan #ifdef CONFIG_POSIX_MQUEUE
6991da177e4SLinus Torvalds 	/* protected by mq_lock	*/
7001da177e4SLinus Torvalds 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
701970a8645SAlexey Dobriyan #endif
7021da177e4SLinus Torvalds 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
7031da177e4SLinus Torvalds 
7041da177e4SLinus Torvalds #ifdef CONFIG_KEYS
7051da177e4SLinus Torvalds 	struct key *uid_keyring;	/* UID specific keyring */
7061da177e4SLinus Torvalds 	struct key *session_keyring;	/* UID's default session keyring */
7071da177e4SLinus Torvalds #endif
7081da177e4SLinus Torvalds 
7091da177e4SLinus Torvalds 	/* Hash table maintenance information */
710735de223SPavel Emelyanov 	struct hlist_node uidhash_node;
7117b44ab97SEric W. Biederman 	kuid_t uid;
71224e377a8SSrivatsa Vaddagiri 
713cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
714789f90fcSPeter Zijlstra 	atomic_long_t locked_vm;
715789f90fcSPeter Zijlstra #endif
7161da177e4SLinus Torvalds };
7171da177e4SLinus Torvalds 
718eb41d946SKay Sievers extern int uids_sysfs_init(void);
7195cb350baSDhaval Giani 
7207b44ab97SEric W. Biederman extern struct user_struct *find_user(kuid_t);
7211da177e4SLinus Torvalds 
7221da177e4SLinus Torvalds extern struct user_struct root_user;
7231da177e4SLinus Torvalds #define INIT_USER (&root_user)
7241da177e4SLinus Torvalds 
725b6dff3ecSDavid Howells 
7261da177e4SLinus Torvalds struct backing_dev_info;
7271da177e4SLinus Torvalds struct reclaim_state;
7281da177e4SLinus Torvalds 
72952f17b6cSChandra Seetharaman #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
7301da177e4SLinus Torvalds struct sched_info {
7311da177e4SLinus Torvalds 	/* cumulative counters */
7322d72376bSIngo Molnar 	unsigned long pcount;	      /* # of times run on this cpu */
7339c2c4802SKen Chen 	unsigned long long run_delay; /* time spent waiting on a runqueue */
7341da177e4SLinus Torvalds 
7351da177e4SLinus Torvalds 	/* timestamps */
736172ba844SBalbir Singh 	unsigned long long last_arrival,/* when we last ran on a cpu */
7371da177e4SLinus Torvalds 			   last_queued;	/* when we were last queued to run */
7381da177e4SLinus Torvalds };
73952f17b6cSChandra Seetharaman #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
7401da177e4SLinus Torvalds 
741ca74e92bSShailabh Nagar #ifdef CONFIG_TASK_DELAY_ACCT
742ca74e92bSShailabh Nagar struct task_delay_info {
743ca74e92bSShailabh Nagar 	spinlock_t	lock;
744ca74e92bSShailabh Nagar 	unsigned int	flags;	/* Private per-task flags */
745ca74e92bSShailabh Nagar 
746ca74e92bSShailabh Nagar 	/* For each stat XXX, add following, aligned appropriately
747ca74e92bSShailabh Nagar 	 *
748ca74e92bSShailabh Nagar 	 * struct timespec XXX_start, XXX_end;
749ca74e92bSShailabh Nagar 	 * u64 XXX_delay;
750ca74e92bSShailabh Nagar 	 * u32 XXX_count;
751ca74e92bSShailabh Nagar 	 *
752ca74e92bSShailabh Nagar 	 * Atomicity of updates to XXX_delay, XXX_count protected by
753ca74e92bSShailabh Nagar 	 * single lock above (split into XXX_lock if contention is an issue).
754ca74e92bSShailabh Nagar 	 */
7550ff92245SShailabh Nagar 
7560ff92245SShailabh Nagar 	/*
7570ff92245SShailabh Nagar 	 * XXX_count is incremented on every XXX operation, the delay
7580ff92245SShailabh Nagar 	 * associated with the operation is added to XXX_delay.
7590ff92245SShailabh Nagar 	 * XXX_delay contains the accumulated delay time in nanoseconds.
7600ff92245SShailabh Nagar 	 */
7610ff92245SShailabh Nagar 	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
7620ff92245SShailabh Nagar 	u64 blkio_delay;	/* wait for sync block io completion */
7630ff92245SShailabh Nagar 	u64 swapin_delay;	/* wait for swapin block io completion */
7640ff92245SShailabh Nagar 	u32 blkio_count;	/* total count of the number of sync block */
7650ff92245SShailabh Nagar 				/* io operations performed */
7660ff92245SShailabh Nagar 	u32 swapin_count;	/* total count of the number of swapin block */
7670ff92245SShailabh Nagar 				/* io operations performed */
768873b4771SKeika Kobayashi 
769873b4771SKeika Kobayashi 	struct timespec freepages_start, freepages_end;
770873b4771SKeika Kobayashi 	u64 freepages_delay;	/* wait for memory reclaim */
771873b4771SKeika Kobayashi 	u32 freepages_count;	/* total count of memory reclaim */
772ca74e92bSShailabh Nagar };
77352f17b6cSChandra Seetharaman #endif	/* CONFIG_TASK_DELAY_ACCT */
77452f17b6cSChandra Seetharaman 
77552f17b6cSChandra Seetharaman static inline int sched_info_on(void)
77652f17b6cSChandra Seetharaman {
77752f17b6cSChandra Seetharaman #ifdef CONFIG_SCHEDSTATS
77852f17b6cSChandra Seetharaman 	return 1;
77952f17b6cSChandra Seetharaman #elif defined(CONFIG_TASK_DELAY_ACCT)
78052f17b6cSChandra Seetharaman 	extern int delayacct_on;
78152f17b6cSChandra Seetharaman 	return delayacct_on;
78252f17b6cSChandra Seetharaman #else
78352f17b6cSChandra Seetharaman 	return 0;
784ca74e92bSShailabh Nagar #endif
78552f17b6cSChandra Seetharaman }
786ca74e92bSShailabh Nagar 
787d15bcfdbSIngo Molnar enum cpu_idle_type {
788d15bcfdbSIngo Molnar 	CPU_IDLE,
789d15bcfdbSIngo Molnar 	CPU_NOT_IDLE,
790d15bcfdbSIngo Molnar 	CPU_NEWLY_IDLE,
791d15bcfdbSIngo Molnar 	CPU_MAX_IDLE_TYPES
7921da177e4SLinus Torvalds };
7931da177e4SLinus Torvalds 
7941da177e4SLinus Torvalds /*
795c8b28116SNikhil Rao  * Increase resolution of nice-level calculations for 64-bit architectures.
796c8b28116SNikhil Rao  * The extra resolution improves shares distribution and load balancing of
797c8b28116SNikhil Rao  * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
798c8b28116SNikhil Rao  * hierarchies, especially on larger systems. This is not a user-visible change
799c8b28116SNikhil Rao  * and does not change the user-interface for setting shares/weights.
800c8b28116SNikhil Rao  *
801c8b28116SNikhil Rao  * We increase resolution only if we have enough bits to allow this increased
802c8b28116SNikhil Rao  * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
803c8b28116SNikhil Rao  * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
804c8b28116SNikhil Rao  * increased costs.
8051da177e4SLinus Torvalds  */
806e4c2fb0dSPeter Zijlstra #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load  */
807c8b28116SNikhil Rao # define SCHED_LOAD_RESOLUTION	10
808c8b28116SNikhil Rao # define scale_load(w)		((w) << SCHED_LOAD_RESOLUTION)
809c8b28116SNikhil Rao # define scale_load_down(w)	((w) >> SCHED_LOAD_RESOLUTION)
810c8b28116SNikhil Rao #else
811c8b28116SNikhil Rao # define SCHED_LOAD_RESOLUTION	0
812c8b28116SNikhil Rao # define scale_load(w)		(w)
813c8b28116SNikhil Rao # define scale_load_down(w)	(w)
814c8b28116SNikhil Rao #endif
8159aa7b369SIngo Molnar 
816c8b28116SNikhil Rao #define SCHED_LOAD_SHIFT	(10 + SCHED_LOAD_RESOLUTION)
8179aa7b369SIngo Molnar #define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
8189aa7b369SIngo Molnar 
8191399fa78SNikhil Rao /*
8201399fa78SNikhil Rao  * Increase resolution of cpu_power calculations
8211399fa78SNikhil Rao  */
8221399fa78SNikhil Rao #define SCHED_POWER_SHIFT	10
8231399fa78SNikhil Rao #define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT)
8241da177e4SLinus Torvalds 
8251399fa78SNikhil Rao /*
8261399fa78SNikhil Rao  * sched-domains (multiprocessor balancing) declarations:
8271399fa78SNikhil Rao  */
8282dd73a4fSPeter Williams #ifdef CONFIG_SMP
829b5d978e0SPeter Zijlstra #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
830b5d978e0SPeter Zijlstra #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
831b5d978e0SPeter Zijlstra #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
832b5d978e0SPeter Zijlstra #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
833c88d5910SPeter Zijlstra #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
834b5d978e0SPeter Zijlstra #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
835b5d978e0SPeter Zijlstra #define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
836b5d978e0SPeter Zijlstra #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
837b5d978e0SPeter Zijlstra #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
838532cb4c4SMichael Neuling #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
839b5d978e0SPeter Zijlstra #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
840e3589f6cSPeter Zijlstra #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
8415c45bf27SSiddha, Suresh B 
842532cb4c4SMichael Neuling extern int __weak arch_sd_sibiling_asym_packing(void);
843532cb4c4SMichael Neuling 
8449c3f75cbSPeter Zijlstra struct sched_group_power {
845e3589f6cSPeter Zijlstra 	atomic_t ref;
8461da177e4SLinus Torvalds 	/*
8471da177e4SLinus Torvalds 	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
84818a3885fSPeter Zijlstra 	 * single CPU.
8491da177e4SLinus Torvalds 	 */
8509c3f75cbSPeter Zijlstra 	unsigned int power, power_orig;
8514ec4412eSVincent Guittot 	unsigned long next_update;
85269e1e811SSuresh Siddha 	/*
85369e1e811SSuresh Siddha 	 * Number of busy cpus in this group.
85469e1e811SSuresh Siddha 	 */
85569e1e811SSuresh Siddha 	atomic_t nr_busy_cpus;
856c1174876SPeter Zijlstra 
857c1174876SPeter Zijlstra 	unsigned long cpumask[0]; /* iteration mask */
8589c3f75cbSPeter Zijlstra };
8599c3f75cbSPeter Zijlstra 
8609c3f75cbSPeter Zijlstra struct sched_group {
8619c3f75cbSPeter Zijlstra 	struct sched_group *next;	/* Must be a circular list */
8629c3f75cbSPeter Zijlstra 	atomic_t ref;
8639c3f75cbSPeter Zijlstra 
864aae6d3ddSSuresh Siddha 	unsigned int group_weight;
8659c3f75cbSPeter Zijlstra 	struct sched_group_power *sgp;
8666c99e9adSRusty Russell 
8674200efd9SIngo Molnar 	/*
8684200efd9SIngo Molnar 	 * The CPUs this group covers.
8694200efd9SIngo Molnar 	 *
8704200efd9SIngo Molnar 	 * NOTE: this field is variable length. (Allocated dynamically
8714200efd9SIngo Molnar 	 * by attaching extra space to the end of the structure,
8724200efd9SIngo Molnar 	 * depending on how many CPUs the kernel has booted up with)
8734200efd9SIngo Molnar 	 */
8744200efd9SIngo Molnar 	unsigned long cpumask[0];
8751da177e4SLinus Torvalds };
8761da177e4SLinus Torvalds 
877758b2cdcSRusty Russell static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
878758b2cdcSRusty Russell {
8796c99e9adSRusty Russell 	return to_cpumask(sg->cpumask);
880758b2cdcSRusty Russell }
881758b2cdcSRusty Russell 
882c1174876SPeter Zijlstra /*
883c1174876SPeter Zijlstra  * cpumask masking which cpus in the group are allowed to iterate up the domain
884c1174876SPeter Zijlstra  * tree.
885c1174876SPeter Zijlstra  */
886c1174876SPeter Zijlstra static inline struct cpumask *sched_group_mask(struct sched_group *sg)
887c1174876SPeter Zijlstra {
888c1174876SPeter Zijlstra 	return to_cpumask(sg->sgp->cpumask);
889c1174876SPeter Zijlstra }
890c1174876SPeter Zijlstra 
891029632fbSPeter Zijlstra /**
892029632fbSPeter Zijlstra  * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
893029632fbSPeter Zijlstra  * @group: The group whose first cpu is to be returned.
894029632fbSPeter Zijlstra  */
895029632fbSPeter Zijlstra static inline unsigned int group_first_cpu(struct sched_group *group)
896029632fbSPeter Zijlstra {
897029632fbSPeter Zijlstra 	return cpumask_first(sched_group_cpus(group));
898029632fbSPeter Zijlstra }
899029632fbSPeter Zijlstra 
9001d3504fcSHidetoshi Seto struct sched_domain_attr {
9011d3504fcSHidetoshi Seto 	int relax_domain_level;
9021d3504fcSHidetoshi Seto };
9031d3504fcSHidetoshi Seto 
9041d3504fcSHidetoshi Seto #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
9051d3504fcSHidetoshi Seto 	.relax_domain_level = -1,			\
9061d3504fcSHidetoshi Seto }
9071d3504fcSHidetoshi Seto 
90860495e77SPeter Zijlstra extern int sched_domain_level_max;
90960495e77SPeter Zijlstra 
9101da177e4SLinus Torvalds struct sched_domain {
9111da177e4SLinus Torvalds 	/* These fields must be setup */
9121da177e4SLinus Torvalds 	struct sched_domain *parent;	/* top domain must be null terminated */
9131a848870SSiddha, Suresh B 	struct sched_domain *child;	/* bottom domain must be null terminated */
9141da177e4SLinus Torvalds 	struct sched_group *groups;	/* the balancing groups of the domain */
9151da177e4SLinus Torvalds 	unsigned long min_interval;	/* Minimum balance interval ms */
9161da177e4SLinus Torvalds 	unsigned long max_interval;	/* Maximum balance interval ms */
9171da177e4SLinus Torvalds 	unsigned int busy_factor;	/* less balancing by factor if busy */
9181da177e4SLinus Torvalds 	unsigned int imbalance_pct;	/* No balance until over watermark */
9191da177e4SLinus Torvalds 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
9207897986bSNick Piggin 	unsigned int busy_idx;
9217897986bSNick Piggin 	unsigned int idle_idx;
9227897986bSNick Piggin 	unsigned int newidle_idx;
9237897986bSNick Piggin 	unsigned int wake_idx;
924147cbb4bSNick Piggin 	unsigned int forkexec_idx;
925a52bfd73SPeter Zijlstra 	unsigned int smt_gain;
9261da177e4SLinus Torvalds 	int flags;			/* See SD_* */
92760495e77SPeter Zijlstra 	int level;
9281da177e4SLinus Torvalds 
9291da177e4SLinus Torvalds 	/* Runtime fields. */
9301da177e4SLinus Torvalds 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
9311da177e4SLinus Torvalds 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
9321da177e4SLinus Torvalds 	unsigned int nr_balance_failed; /* initialise to 0 */
9331da177e4SLinus Torvalds 
9342398f2c6SPeter Zijlstra 	u64 last_update;
9352398f2c6SPeter Zijlstra 
9361da177e4SLinus Torvalds #ifdef CONFIG_SCHEDSTATS
9371da177e4SLinus Torvalds 	/* load_balance() stats */
938480b9434SKen Chen 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
939480b9434SKen Chen 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
940480b9434SKen Chen 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
941480b9434SKen Chen 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
942480b9434SKen Chen 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
943480b9434SKen Chen 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
944480b9434SKen Chen 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
945480b9434SKen Chen 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
9461da177e4SLinus Torvalds 
9471da177e4SLinus Torvalds 	/* Active load balancing */
948480b9434SKen Chen 	unsigned int alb_count;
949480b9434SKen Chen 	unsigned int alb_failed;
950480b9434SKen Chen 	unsigned int alb_pushed;
9511da177e4SLinus Torvalds 
95268767a0aSNick Piggin 	/* SD_BALANCE_EXEC stats */
953480b9434SKen Chen 	unsigned int sbe_count;
954480b9434SKen Chen 	unsigned int sbe_balanced;
955480b9434SKen Chen 	unsigned int sbe_pushed;
9561da177e4SLinus Torvalds 
95768767a0aSNick Piggin 	/* SD_BALANCE_FORK stats */
958480b9434SKen Chen 	unsigned int sbf_count;
959480b9434SKen Chen 	unsigned int sbf_balanced;
960480b9434SKen Chen 	unsigned int sbf_pushed;
96168767a0aSNick Piggin 
9621da177e4SLinus Torvalds 	/* try_to_wake_up() stats */
963480b9434SKen Chen 	unsigned int ttwu_wake_remote;
964480b9434SKen Chen 	unsigned int ttwu_move_affine;
965480b9434SKen Chen 	unsigned int ttwu_move_balance;
9661da177e4SLinus Torvalds #endif
967a5d8c348SIngo Molnar #ifdef CONFIG_SCHED_DEBUG
968a5d8c348SIngo Molnar 	char *name;
969a5d8c348SIngo Molnar #endif
970dce840a0SPeter Zijlstra 	union {
971dce840a0SPeter Zijlstra 		void *private;		/* used during construction */
972dce840a0SPeter Zijlstra 		struct rcu_head rcu;	/* used during destruction */
973dce840a0SPeter Zijlstra 	};
9746c99e9adSRusty Russell 
975669c55e9SPeter Zijlstra 	unsigned int span_weight;
9764200efd9SIngo Molnar 	/*
9774200efd9SIngo Molnar 	 * Span of all CPUs in this domain.
9784200efd9SIngo Molnar 	 *
9794200efd9SIngo Molnar 	 * NOTE: this field is variable length. (Allocated dynamically
9804200efd9SIngo Molnar 	 * by attaching extra space to the end of the structure,
9814200efd9SIngo Molnar 	 * depending on how many CPUs the kernel has booted up with)
9824200efd9SIngo Molnar 	 */
9834200efd9SIngo Molnar 	unsigned long span[0];
9841da177e4SLinus Torvalds };
9851da177e4SLinus Torvalds 
986758b2cdcSRusty Russell static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
987758b2cdcSRusty Russell {
9886c99e9adSRusty Russell 	return to_cpumask(sd->span);
989758b2cdcSRusty Russell }
990758b2cdcSRusty Russell 
991acc3f5d7SRusty Russell extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
9921d3504fcSHidetoshi Seto 				    struct sched_domain_attr *dattr_new);
993029190c5SPaul Jackson 
994acc3f5d7SRusty Russell /* Allocate an array of sched domains, for partition_sched_domains(). */
995acc3f5d7SRusty Russell cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
996acc3f5d7SRusty Russell void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
997acc3f5d7SRusty Russell 
99806aaf76aSIngo Molnar /* Test a flag in parent sched domain */
99906aaf76aSIngo Molnar static inline int test_sd_parent(struct sched_domain *sd, int flag)
100006aaf76aSIngo Molnar {
100106aaf76aSIngo Molnar 	if (sd->parent && (sd->parent->flags & flag))
100206aaf76aSIngo Molnar 		return 1;
100306aaf76aSIngo Molnar 
100406aaf76aSIngo Molnar 	return 0;
100506aaf76aSIngo Molnar }
10061da177e4SLinus Torvalds 
100747fe38fcSPeter Zijlstra unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
100847fe38fcSPeter Zijlstra unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
100947fe38fcSPeter Zijlstra 
101039be3501SPeter Zijlstra bool cpus_share_cache(int this_cpu, int that_cpu);
101139be3501SPeter Zijlstra 
10121b427c15SIngo Molnar #else /* CONFIG_SMP */
10131da177e4SLinus Torvalds 
10141b427c15SIngo Molnar struct sched_domain_attr;
10151b427c15SIngo Molnar 
10161b427c15SIngo Molnar static inline void
1017acc3f5d7SRusty Russell partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
10181b427c15SIngo Molnar 			struct sched_domain_attr *dattr_new)
1019d02c7a8cSCon Kolivas {
1020d02c7a8cSCon Kolivas }
102139be3501SPeter Zijlstra 
102239be3501SPeter Zijlstra static inline bool cpus_share_cache(int this_cpu, int that_cpu)
102339be3501SPeter Zijlstra {
102439be3501SPeter Zijlstra 	return true;
102539be3501SPeter Zijlstra }
102639be3501SPeter Zijlstra 
10271b427c15SIngo Molnar #endif	/* !CONFIG_SMP */
10281da177e4SLinus Torvalds 
102947fe38fcSPeter Zijlstra 
10301da177e4SLinus Torvalds struct io_context;			/* See blkdev.h */
10311da177e4SLinus Torvalds 
10321da177e4SLinus Torvalds 
1033383f2835SChen, Kenneth W #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
103436c8b586SIngo Molnar extern void prefetch_stack(struct task_struct *t);
1035383f2835SChen, Kenneth W #else
1036383f2835SChen, Kenneth W static inline void prefetch_stack(struct task_struct *t) { }
1037383f2835SChen, Kenneth W #endif
10381da177e4SLinus Torvalds 
10391da177e4SLinus Torvalds struct audit_context;		/* See audit.c */
10401da177e4SLinus Torvalds struct mempolicy;
1041b92ce558SJens Axboe struct pipe_inode_info;
10424865ecf1SSerge E. Hallyn struct uts_namespace;
10431da177e4SLinus Torvalds 
104420b8a59fSIngo Molnar struct rq;
104520b8a59fSIngo Molnar struct sched_domain;
104620b8a59fSIngo Molnar 
10477d478721SPeter Zijlstra /*
10487d478721SPeter Zijlstra  * wake flags
10497d478721SPeter Zijlstra  */
10507d478721SPeter Zijlstra #define WF_SYNC		0x01		/* waker goes to sleep after wakup */
1051a7558e01SPeter Zijlstra #define WF_FORK		0x02		/* child wakeup after fork */
1052f339b9dcSPeter Zijlstra #define WF_MIGRATED	0x04		/* internal use, task got migrated */
10537d478721SPeter Zijlstra 
1054371fd7e7SPeter Zijlstra #define ENQUEUE_WAKEUP		1
105574f8e4b2SPeter Zijlstra #define ENQUEUE_HEAD		2
105674f8e4b2SPeter Zijlstra #ifdef CONFIG_SMP
105774f8e4b2SPeter Zijlstra #define ENQUEUE_WAKING		4	/* sched_class::task_waking was called */
105874f8e4b2SPeter Zijlstra #else
105974f8e4b2SPeter Zijlstra #define ENQUEUE_WAKING		0
106074f8e4b2SPeter Zijlstra #endif
1061371fd7e7SPeter Zijlstra 
1062371fd7e7SPeter Zijlstra #define DEQUEUE_SLEEP		1
1063371fd7e7SPeter Zijlstra 
106420b8a59fSIngo Molnar struct sched_class {
10655522d5d5SIngo Molnar 	const struct sched_class *next;
106620b8a59fSIngo Molnar 
1067371fd7e7SPeter Zijlstra 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1068371fd7e7SPeter Zijlstra 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
10694530d7abSDmitry Adamushko 	void (*yield_task) (struct rq *rq);
1070d95f4122SMike Galbraith 	bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
107120b8a59fSIngo Molnar 
10727d478721SPeter Zijlstra 	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
107320b8a59fSIngo Molnar 
1074fb8d4724SIngo Molnar 	struct task_struct * (*pick_next_task) (struct rq *rq);
107531ee529cSIngo Molnar 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
107620b8a59fSIngo Molnar 
1077681f3e68SPeter Williams #ifdef CONFIG_SMP
10787608dec2SPeter Zijlstra 	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
10790a74bef8SPaul Turner 	void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
10804ce72a2cSLi Zefan 
10819a897c5aSSteven Rostedt 	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
10829a897c5aSSteven Rostedt 	void (*post_schedule) (struct rq *this_rq);
108374f8e4b2SPeter Zijlstra 	void (*task_waking) (struct task_struct *task);
1084efbbd05aSPeter Zijlstra 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1085e1d1484fSPeter Williams 
1086cd8ba7cdSMike Travis 	void (*set_cpus_allowed)(struct task_struct *p,
108796f874e2SRusty Russell 				 const struct cpumask *newmask);
108857d885feSGregory Haskins 
10891f11eb6aSGregory Haskins 	void (*rq_online)(struct rq *rq);
10901f11eb6aSGregory Haskins 	void (*rq_offline)(struct rq *rq);
10914ce72a2cSLi Zefan #endif
10924ce72a2cSLi Zefan 
10934ce72a2cSLi Zefan 	void (*set_curr_task) (struct rq *rq);
10944ce72a2cSLi Zefan 	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1095cd29fe6fSPeter Zijlstra 	void (*task_fork) (struct task_struct *p);
1096cb469845SSteven Rostedt 
1097da7a735eSPeter Zijlstra 	void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1098da7a735eSPeter Zijlstra 	void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1099cb469845SSteven Rostedt 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1100da7a735eSPeter Zijlstra 			     int oldprio);
1101810b3817SPeter Zijlstra 
1102dba091b9SThomas Gleixner 	unsigned int (*get_rr_interval) (struct rq *rq,
1103dba091b9SThomas Gleixner 					 struct task_struct *task);
11040d721ceaSPeter Williams 
1105810b3817SPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
1106b2b5ce02SPeter Zijlstra 	void (*task_move_group) (struct task_struct *p, int on_rq);
1107810b3817SPeter Zijlstra #endif
110820b8a59fSIngo Molnar };
110920b8a59fSIngo Molnar 
111020b8a59fSIngo Molnar struct load_weight {
111120b8a59fSIngo Molnar 	unsigned long weight, inv_weight;
111220b8a59fSIngo Molnar };
111320b8a59fSIngo Molnar 
11149d85f21cSPaul Turner struct sched_avg {
11159d85f21cSPaul Turner 	/*
11169d85f21cSPaul Turner 	 * These sums represent an infinite geometric series and so are bound
11179d85f21cSPaul Turner 	 * above by 1024/(1-y).  Thus we only need a u32 to store them for for all
11189d85f21cSPaul Turner 	 * choices of y < 1-2^(-32)*1024.
11199d85f21cSPaul Turner 	 */
11209d85f21cSPaul Turner 	u32 runnable_avg_sum, runnable_avg_period;
11219d85f21cSPaul Turner 	u64 last_runnable_update;
11229ee474f5SPaul Turner 	s64 decay_count;
11232dac754eSPaul Turner 	unsigned long load_avg_contrib;
11249d85f21cSPaul Turner };
11259d85f21cSPaul Turner 
112694c18227SIngo Molnar #ifdef CONFIG_SCHEDSTATS
112741acab88SLucas De Marchi struct sched_statistics {
112894c18227SIngo Molnar 	u64			wait_start;
112994c18227SIngo Molnar 	u64			wait_max;
11306d082592SArjan van de Ven 	u64			wait_count;
11316d082592SArjan van de Ven 	u64			wait_sum;
11328f0dfc34SArjan van de Ven 	u64			iowait_count;
11338f0dfc34SArjan van de Ven 	u64			iowait_sum;
113494c18227SIngo Molnar 
113594c18227SIngo Molnar 	u64			sleep_start;
113620b8a59fSIngo Molnar 	u64			sleep_max;
113794c18227SIngo Molnar 	s64			sum_sleep_runtime;
113894c18227SIngo Molnar 
113994c18227SIngo Molnar 	u64			block_start;
114020b8a59fSIngo Molnar 	u64			block_max;
114120b8a59fSIngo Molnar 	u64			exec_max;
1142eba1ed4bSIngo Molnar 	u64			slice_max;
1143cc367732SIngo Molnar 
1144cc367732SIngo Molnar 	u64			nr_migrations_cold;
1145cc367732SIngo Molnar 	u64			nr_failed_migrations_affine;
1146cc367732SIngo Molnar 	u64			nr_failed_migrations_running;
1147cc367732SIngo Molnar 	u64			nr_failed_migrations_hot;
1148cc367732SIngo Molnar 	u64			nr_forced_migrations;
1149cc367732SIngo Molnar 
1150cc367732SIngo Molnar 	u64			nr_wakeups;
1151cc367732SIngo Molnar 	u64			nr_wakeups_sync;
1152cc367732SIngo Molnar 	u64			nr_wakeups_migrate;
1153cc367732SIngo Molnar 	u64			nr_wakeups_local;
1154cc367732SIngo Molnar 	u64			nr_wakeups_remote;
1155cc367732SIngo Molnar 	u64			nr_wakeups_affine;
1156cc367732SIngo Molnar 	u64			nr_wakeups_affine_attempts;
1157cc367732SIngo Molnar 	u64			nr_wakeups_passive;
1158cc367732SIngo Molnar 	u64			nr_wakeups_idle;
115941acab88SLucas De Marchi };
116041acab88SLucas De Marchi #endif
116141acab88SLucas De Marchi 
116241acab88SLucas De Marchi struct sched_entity {
116341acab88SLucas De Marchi 	struct load_weight	load;		/* for load-balancing */
116441acab88SLucas De Marchi 	struct rb_node		run_node;
116541acab88SLucas De Marchi 	struct list_head	group_node;
116641acab88SLucas De Marchi 	unsigned int		on_rq;
116741acab88SLucas De Marchi 
116841acab88SLucas De Marchi 	u64			exec_start;
116941acab88SLucas De Marchi 	u64			sum_exec_runtime;
117041acab88SLucas De Marchi 	u64			vruntime;
117141acab88SLucas De Marchi 	u64			prev_sum_exec_runtime;
117241acab88SLucas De Marchi 
117341acab88SLucas De Marchi 	u64			nr_migrations;
117441acab88SLucas De Marchi 
117541acab88SLucas De Marchi #ifdef CONFIG_SCHEDSTATS
117641acab88SLucas De Marchi 	struct sched_statistics statistics;
117794c18227SIngo Molnar #endif
117894c18227SIngo Molnar 
117920b8a59fSIngo Molnar #ifdef CONFIG_FAIR_GROUP_SCHED
118020b8a59fSIngo Molnar 	struct sched_entity	*parent;
118120b8a59fSIngo Molnar 	/* rq on which this entity is (to be) queued: */
118220b8a59fSIngo Molnar 	struct cfs_rq		*cfs_rq;
118320b8a59fSIngo Molnar 	/* rq "owned" by this entity/group: */
118420b8a59fSIngo Molnar 	struct cfs_rq		*my_q;
118520b8a59fSIngo Molnar #endif
1186f4e26b12SPaul Turner /*
1187f4e26b12SPaul Turner  * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
1188f4e26b12SPaul Turner  * removed when useful for applications beyond shares distribution (e.g.
1189f4e26b12SPaul Turner  * load-balance).
1190f4e26b12SPaul Turner  */
1191f4e26b12SPaul Turner #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
1192f4e26b12SPaul Turner 	/* Per-entity load-tracking */
11939d85f21cSPaul Turner 	struct sched_avg	avg;
11949d85f21cSPaul Turner #endif
119520b8a59fSIngo Molnar };
119670b97a7fSIngo Molnar 
1197fa717060SPeter Zijlstra struct sched_rt_entity {
1198fa717060SPeter Zijlstra 	struct list_head run_list;
119978f2c7dbSPeter Zijlstra 	unsigned long timeout;
1200bee367edSRichard Kennedy 	unsigned int time_slice;
12016f505b16SPeter Zijlstra 
120258d6c2d7SPeter Zijlstra 	struct sched_rt_entity *back;
1203052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
12046f505b16SPeter Zijlstra 	struct sched_rt_entity	*parent;
12056f505b16SPeter Zijlstra 	/* rq on which this entity is (to be) queued: */
12066f505b16SPeter Zijlstra 	struct rt_rq		*rt_rq;
12076f505b16SPeter Zijlstra 	/* rq "owned" by this entity/group: */
12086f505b16SPeter Zijlstra 	struct rt_rq		*my_q;
12096f505b16SPeter Zijlstra #endif
1210fa717060SPeter Zijlstra };
1211fa717060SPeter Zijlstra 
1212de5bdff7SHiroshi Shimamoto /*
1213de5bdff7SHiroshi Shimamoto  * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1214de5bdff7SHiroshi Shimamoto  * Timeslices get refilled after they expire.
1215de5bdff7SHiroshi Shimamoto  */
1216de5bdff7SHiroshi Shimamoto #define RR_TIMESLICE		(100 * HZ / 1000)
1217de5bdff7SHiroshi Shimamoto 
121886848966SPaul E. McKenney struct rcu_node;
121986848966SPaul E. McKenney 
12208dc85d54SPeter Zijlstra enum perf_event_task_context {
12218dc85d54SPeter Zijlstra 	perf_invalid_context = -1,
12228dc85d54SPeter Zijlstra 	perf_hw_context = 0,
122389a1e187SPeter Zijlstra 	perf_sw_context,
12248dc85d54SPeter Zijlstra 	perf_nr_task_contexts,
12258dc85d54SPeter Zijlstra };
12268dc85d54SPeter Zijlstra 
12271da177e4SLinus Torvalds struct task_struct {
12281da177e4SLinus Torvalds 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1229f7e4217bSRoman Zippel 	void *stack;
12301da177e4SLinus Torvalds 	atomic_t usage;
123197dc32cdSWilliam Cohen 	unsigned int flags;	/* per process flags, defined below */
123297dc32cdSWilliam Cohen 	unsigned int ptrace;
12331da177e4SLinus Torvalds 
12342dd73a4fSPeter Williams #ifdef CONFIG_SMP
1235fa14ff4aSPeter Zijlstra 	struct llist_node wake_entry;
12363ca7a440SPeter Zijlstra 	int on_cpu;
12374866cde0SNick Piggin #endif
1238fd2f4419SPeter Zijlstra 	int on_rq;
123950e645a8SIngo Molnar 
1240b29739f9SIngo Molnar 	int prio, static_prio, normal_prio;
1241c7aceabaSRichard Kennedy 	unsigned int rt_priority;
12425522d5d5SIngo Molnar 	const struct sched_class *sched_class;
124320b8a59fSIngo Molnar 	struct sched_entity se;
1244fa717060SPeter Zijlstra 	struct sched_rt_entity rt;
12458323f26cSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
12468323f26cSPeter Zijlstra 	struct task_group *sched_task_group;
12478323f26cSPeter Zijlstra #endif
12481da177e4SLinus Torvalds 
1249e107be36SAvi Kivity #ifdef CONFIG_PREEMPT_NOTIFIERS
1250e107be36SAvi Kivity 	/* list of struct preempt_notifier: */
1251e107be36SAvi Kivity 	struct hlist_head preempt_notifiers;
1252e107be36SAvi Kivity #endif
1253e107be36SAvi Kivity 
125418796aa0SAlexey Dobriyan 	/*
125518796aa0SAlexey Dobriyan 	 * fpu_counter contains the number of consecutive context switches
125618796aa0SAlexey Dobriyan 	 * that the FPU is used. If this is over a threshold, the lazy fpu
125718796aa0SAlexey Dobriyan 	 * saving becomes unlazy to save the trap. This is an unsigned char
125818796aa0SAlexey Dobriyan 	 * so that after 256 times the counter wraps and the behavior turns
125918796aa0SAlexey Dobriyan 	 * lazy again; this to deal with bursty apps that only use FPU for
126018796aa0SAlexey Dobriyan 	 * a short time
126118796aa0SAlexey Dobriyan 	 */
126218796aa0SAlexey Dobriyan 	unsigned char fpu_counter;
12636c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE
12642056a782SJens Axboe 	unsigned int btrace_seq;
12656c5c9341SAlexey Dobriyan #endif
12661da177e4SLinus Torvalds 
126797dc32cdSWilliam Cohen 	unsigned int policy;
126829baa747SPeter Zijlstra 	int nr_cpus_allowed;
12691da177e4SLinus Torvalds 	cpumask_t cpus_allowed;
12701da177e4SLinus Torvalds 
1271a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
1272e260be67SPaul E. McKenney 	int rcu_read_lock_nesting;
1273f41d911fSPaul E. McKenney 	char rcu_read_unlock_special;
1274f41d911fSPaul E. McKenney 	struct list_head rcu_node_entry;
1275a57eb940SPaul E. McKenney #endif /* #ifdef CONFIG_PREEMPT_RCU */
1276a57eb940SPaul E. McKenney #ifdef CONFIG_TREE_PREEMPT_RCU
1277a57eb940SPaul E. McKenney 	struct rcu_node *rcu_blocked_node;
1278f41d911fSPaul E. McKenney #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
127924278d14SPaul E. McKenney #ifdef CONFIG_RCU_BOOST
128024278d14SPaul E. McKenney 	struct rt_mutex *rcu_boost_mutex;
128124278d14SPaul E. McKenney #endif /* #ifdef CONFIG_RCU_BOOST */
1282e260be67SPaul E. McKenney 
128352f17b6cSChandra Seetharaman #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
12841da177e4SLinus Torvalds 	struct sched_info sched_info;
12851da177e4SLinus Torvalds #endif
12861da177e4SLinus Torvalds 
12871da177e4SLinus Torvalds 	struct list_head tasks;
1288806c09a7SDario Faggioli #ifdef CONFIG_SMP
1289917b627dSGregory Haskins 	struct plist_node pushable_tasks;
1290806c09a7SDario Faggioli #endif
12911da177e4SLinus Torvalds 
12921da177e4SLinus Torvalds 	struct mm_struct *mm, *active_mm;
12934471a675SJiri Kosina #ifdef CONFIG_COMPAT_BRK
12944471a675SJiri Kosina 	unsigned brk_randomized:1;
12954471a675SJiri Kosina #endif
129634e55232SKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING)
129734e55232SKAMEZAWA Hiroyuki 	struct task_rss_stat	rss_stat;
129834e55232SKAMEZAWA Hiroyuki #endif
12991da177e4SLinus Torvalds /* task state */
130097dc32cdSWilliam Cohen 	int exit_state;
13011da177e4SLinus Torvalds 	int exit_code, exit_signal;
13021da177e4SLinus Torvalds 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1303a8f072c1STejun Heo 	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
13041da177e4SLinus Torvalds 	/* ??? */
130597dc32cdSWilliam Cohen 	unsigned int personality;
13061da177e4SLinus Torvalds 	unsigned did_exec:1;
1307f9ce1f1cSKentaro Takeda 	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1308f9ce1f1cSKentaro Takeda 				 * execve */
13098f0dfc34SArjan van de Ven 	unsigned in_iowait:1;
13108f0dfc34SArjan van de Ven 
1311259e5e6cSAndy Lutomirski 	/* task may not gain privileges */
1312259e5e6cSAndy Lutomirski 	unsigned no_new_privs:1;
1313ca94c442SLennart Poettering 
1314ca94c442SLennart Poettering 	/* Revert to default priority/policy when forking */
1315ca94c442SLennart Poettering 	unsigned sched_reset_on_fork:1;
1316a8e4f2eaSPeter Zijlstra 	unsigned sched_contributes_to_load:1;
1317ca94c442SLennart Poettering 
13181da177e4SLinus Torvalds 	pid_t pid;
13191da177e4SLinus Torvalds 	pid_t tgid;
13200a425405SArjan van de Ven 
13211314562aSHiroshi Shimamoto #ifdef CONFIG_CC_STACKPROTECTOR
13220a425405SArjan van de Ven 	/* Canary value for the -fstack-protector gcc feature */
13230a425405SArjan van de Ven 	unsigned long stack_canary;
13241314562aSHiroshi Shimamoto #endif
13251da177e4SLinus Torvalds 	/*
13261da177e4SLinus Torvalds 	 * pointers to (original) parent process, youngest child, younger sibling,
13271da177e4SLinus Torvalds 	 * older sibling, respectively.  (p->father can be replaced with
1328f470021aSRoland McGrath 	 * p->real_parent->pid)
13291da177e4SLinus Torvalds 	 */
1330abd63bc3SKees Cook 	struct task_struct __rcu *real_parent; /* real parent process */
1331abd63bc3SKees Cook 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
13321da177e4SLinus Torvalds 	/*
1333f470021aSRoland McGrath 	 * children/sibling forms the list of my natural children
13341da177e4SLinus Torvalds 	 */
13351da177e4SLinus Torvalds 	struct list_head children;	/* list of my children */
13361da177e4SLinus Torvalds 	struct list_head sibling;	/* linkage in my parent's children list */
13371da177e4SLinus Torvalds 	struct task_struct *group_leader;	/* threadgroup leader */
13381da177e4SLinus Torvalds 
1339f470021aSRoland McGrath 	/*
1340f470021aSRoland McGrath 	 * ptraced is the list of tasks this task is using ptrace on.
1341f470021aSRoland McGrath 	 * This includes both natural children and PTRACE_ATTACH targets.
1342f470021aSRoland McGrath 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1343f470021aSRoland McGrath 	 */
1344f470021aSRoland McGrath 	struct list_head ptraced;
1345f470021aSRoland McGrath 	struct list_head ptrace_entry;
1346f470021aSRoland McGrath 
13471da177e4SLinus Torvalds 	/* PID/PID hash table linkage. */
134892476d7fSEric W. Biederman 	struct pid_link pids[PIDTYPE_MAX];
134947e65328SOleg Nesterov 	struct list_head thread_group;
13501da177e4SLinus Torvalds 
13511da177e4SLinus Torvalds 	struct completion *vfork_done;		/* for vfork() */
13521da177e4SLinus Torvalds 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
13531da177e4SLinus Torvalds 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
13541da177e4SLinus Torvalds 
1355c66f08beSMichael Neuling 	cputime_t utime, stime, utimescaled, stimescaled;
13569ac52315SLaurent Vivier 	cputime_t gtime;
1357d99ca3b9SHidetoshi Seto #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1358*d37f761dSFrederic Weisbecker 	struct cputime prev_cputime;
1359d99ca3b9SHidetoshi Seto #endif
13601da177e4SLinus Torvalds 	unsigned long nvcsw, nivcsw; /* context switch counts */
1361924b42d5STomas Janousek 	struct timespec start_time; 		/* monotonic time */
1362924b42d5STomas Janousek 	struct timespec real_start_time;	/* boot based time */
13631da177e4SLinus Torvalds /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
13641da177e4SLinus Torvalds 	unsigned long min_flt, maj_flt;
13651da177e4SLinus Torvalds 
1366f06febc9SFrank Mayhar 	struct task_cputime cputime_expires;
13671da177e4SLinus Torvalds 	struct list_head cpu_timers[3];
13681da177e4SLinus Torvalds 
13691da177e4SLinus Torvalds /* process credentials */
13701b0ba1c9SArnd Bergmann 	const struct cred __rcu *real_cred; /* objective and real subjective task
13713b11a1deSDavid Howells 					 * credentials (COW) */
13721b0ba1c9SArnd Bergmann 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
13733b11a1deSDavid Howells 					 * credentials (COW) */
137436772092SPaolo 'Blaisorblade' Giarrusso 	char comm[TASK_COMM_LEN]; /* executable name excluding path
137536772092SPaolo 'Blaisorblade' Giarrusso 				     - access with [gs]et_task_comm (which lock
137636772092SPaolo 'Blaisorblade' Giarrusso 				       it with task_lock())
1377221af7f8SLinus Torvalds 				     - initialized normally by setup_new_exec */
13781da177e4SLinus Torvalds /* file system info */
13791da177e4SLinus Torvalds 	int link_count, total_link_count;
13803d5b6fccSAlexey Dobriyan #ifdef CONFIG_SYSVIPC
13811da177e4SLinus Torvalds /* ipc stuff */
13821da177e4SLinus Torvalds 	struct sysv_sem sysvsem;
13833d5b6fccSAlexey Dobriyan #endif
1384e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK
138582a1fcb9SIngo Molnar /* hung task detection */
138682a1fcb9SIngo Molnar 	unsigned long last_switch_count;
138782a1fcb9SIngo Molnar #endif
13881da177e4SLinus Torvalds /* CPU-specific state of this task */
13891da177e4SLinus Torvalds 	struct thread_struct thread;
13901da177e4SLinus Torvalds /* filesystem information */
13911da177e4SLinus Torvalds 	struct fs_struct *fs;
13921da177e4SLinus Torvalds /* open file information */
13931da177e4SLinus Torvalds 	struct files_struct *files;
13941651e14eSSerge E. Hallyn /* namespaces */
1395ab516013SSerge E. Hallyn 	struct nsproxy *nsproxy;
13961da177e4SLinus Torvalds /* signal handlers */
13971da177e4SLinus Torvalds 	struct signal_struct *signal;
13981da177e4SLinus Torvalds 	struct sighand_struct *sighand;
13991da177e4SLinus Torvalds 
14001da177e4SLinus Torvalds 	sigset_t blocked, real_blocked;
1401f3de272bSRoland McGrath 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
14021da177e4SLinus Torvalds 	struct sigpending pending;
14031da177e4SLinus Torvalds 
14041da177e4SLinus Torvalds 	unsigned long sas_ss_sp;
14051da177e4SLinus Torvalds 	size_t sas_ss_size;
14061da177e4SLinus Torvalds 	int (*notifier)(void *priv);
14071da177e4SLinus Torvalds 	void *notifier_data;
14081da177e4SLinus Torvalds 	sigset_t *notifier_mask;
140967d12145SAl Viro 	struct callback_head *task_works;
1410e73f8959SOleg Nesterov 
14111da177e4SLinus Torvalds 	struct audit_context *audit_context;
1412bfef93a5SAl Viro #ifdef CONFIG_AUDITSYSCALL
1413e1760bd5SEric W. Biederman 	kuid_t loginuid;
14144746ec5bSEric Paris 	unsigned int sessionid;
1415bfef93a5SAl Viro #endif
1416932ecebbSWill Drewry 	struct seccomp seccomp;
14171da177e4SLinus Torvalds 
14181da177e4SLinus Torvalds /* Thread group tracking */
14191da177e4SLinus Torvalds    	u32 parent_exec_id;
14201da177e4SLinus Torvalds    	u32 self_exec_id;
142158568d2aSMiao Xie /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
142258568d2aSMiao Xie  * mempolicy */
14231da177e4SLinus Torvalds 	spinlock_t alloc_lock;
14241da177e4SLinus Torvalds 
1425b29739f9SIngo Molnar 	/* Protection of the PI data structures: */
14261d615482SThomas Gleixner 	raw_spinlock_t pi_lock;
1427b29739f9SIngo Molnar 
142823f78d4aSIngo Molnar #ifdef CONFIG_RT_MUTEXES
142923f78d4aSIngo Molnar 	/* PI waiters blocked on a rt_mutex held by this task */
143023f78d4aSIngo Molnar 	struct plist_head pi_waiters;
143123f78d4aSIngo Molnar 	/* Deadlock detection and priority inheritance handling */
143223f78d4aSIngo Molnar 	struct rt_mutex_waiter *pi_blocked_on;
143323f78d4aSIngo Molnar #endif
143423f78d4aSIngo Molnar 
1435408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES
1436408894eeSIngo Molnar 	/* mutex deadlock detection */
1437408894eeSIngo Molnar 	struct mutex_waiter *blocked_on;
1438408894eeSIngo Molnar #endif
1439de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS
1440de30a2b3SIngo Molnar 	unsigned int irq_events;
1441de30a2b3SIngo Molnar 	unsigned long hardirq_enable_ip;
1442de30a2b3SIngo Molnar 	unsigned long hardirq_disable_ip;
1443fa1452e8SHiroshi Shimamoto 	unsigned int hardirq_enable_event;
1444de30a2b3SIngo Molnar 	unsigned int hardirq_disable_event;
1445fa1452e8SHiroshi Shimamoto 	int hardirqs_enabled;
1446de30a2b3SIngo Molnar 	int hardirq_context;
1447fa1452e8SHiroshi Shimamoto 	unsigned long softirq_disable_ip;
1448fa1452e8SHiroshi Shimamoto 	unsigned long softirq_enable_ip;
1449fa1452e8SHiroshi Shimamoto 	unsigned int softirq_disable_event;
1450fa1452e8SHiroshi Shimamoto 	unsigned int softirq_enable_event;
1451fa1452e8SHiroshi Shimamoto 	int softirqs_enabled;
1452de30a2b3SIngo Molnar 	int softirq_context;
1453de30a2b3SIngo Molnar #endif
1454fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP
1455bdb9441eSPeter Zijlstra # define MAX_LOCK_DEPTH 48UL
1456fbb9ce95SIngo Molnar 	u64 curr_chain_key;
1457fbb9ce95SIngo Molnar 	int lockdep_depth;
1458fbb9ce95SIngo Molnar 	unsigned int lockdep_recursion;
1459c7aceabaSRichard Kennedy 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1460cf40bd16SNick Piggin 	gfp_t lockdep_reclaim_gfp;
1461fbb9ce95SIngo Molnar #endif
1462408894eeSIngo Molnar 
14631da177e4SLinus Torvalds /* journalling filesystem info */
14641da177e4SLinus Torvalds 	void *journal_info;
14651da177e4SLinus Torvalds 
1466d89d8796SNeil Brown /* stacked block device info */
1467bddd87c7SAkinobu Mita 	struct bio_list *bio_list;
1468d89d8796SNeil Brown 
146973c10101SJens Axboe #ifdef CONFIG_BLOCK
147073c10101SJens Axboe /* stack plugging */
147173c10101SJens Axboe 	struct blk_plug *plug;
147273c10101SJens Axboe #endif
147373c10101SJens Axboe 
14741da177e4SLinus Torvalds /* VM state */
14751da177e4SLinus Torvalds 	struct reclaim_state *reclaim_state;
14761da177e4SLinus Torvalds 
14771da177e4SLinus Torvalds 	struct backing_dev_info *backing_dev_info;
14781da177e4SLinus Torvalds 
14791da177e4SLinus Torvalds 	struct io_context *io_context;
14801da177e4SLinus Torvalds 
14811da177e4SLinus Torvalds 	unsigned long ptrace_message;
14821da177e4SLinus Torvalds 	siginfo_t *last_siginfo; /* For ptrace use.  */
14837c3ab738SAndrew Morton 	struct task_io_accounting ioac;
14848f0ab514SJay Lan #if defined(CONFIG_TASK_XACCT)
14851da177e4SLinus Torvalds 	u64 acct_rss_mem1;	/* accumulated rss usage */
14861da177e4SLinus Torvalds 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
148749b5cf34SJonathan Lim 	cputime_t acct_timexpd;	/* stime + utime since last update */
14881da177e4SLinus Torvalds #endif
14891da177e4SLinus Torvalds #ifdef CONFIG_CPUSETS
149058568d2aSMiao Xie 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1491cc9a6c87SMel Gorman 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1492825a46afSPaul Jackson 	int cpuset_mem_spread_rotor;
14936adef3ebSJack Steiner 	int cpuset_slab_spread_rotor;
14941da177e4SLinus Torvalds #endif
1495ddbcc7e8SPaul Menage #ifdef CONFIG_CGROUPS
1496817929ecSPaul Menage 	/* Control Group info protected by css_set_lock */
14972c392b8cSArnd Bergmann 	struct css_set __rcu *cgroups;
1498817929ecSPaul Menage 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1499817929ecSPaul Menage 	struct list_head cg_list;
1500ddbcc7e8SPaul Menage #endif
150142b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX
15020771dfefSIngo Molnar 	struct robust_list_head __user *robust_list;
150334f192c6SIngo Molnar #ifdef CONFIG_COMPAT
150434f192c6SIngo Molnar 	struct compat_robust_list_head __user *compat_robust_list;
150534f192c6SIngo Molnar #endif
1506c87e2837SIngo Molnar 	struct list_head pi_state_list;
1507c87e2837SIngo Molnar 	struct futex_pi_state *pi_state_cache;
150842b2dd0aSAlexey Dobriyan #endif
1509cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
15108dc85d54SPeter Zijlstra 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1511cdd6c482SIngo Molnar 	struct mutex perf_event_mutex;
1512cdd6c482SIngo Molnar 	struct list_head perf_event_list;
1513a63eaf34SPaul Mackerras #endif
1514c7aceabaSRichard Kennedy #ifdef CONFIG_NUMA
151558568d2aSMiao Xie 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1516c7aceabaSRichard Kennedy 	short il_next;
1517207205a2SEric Dumazet 	short pref_node_fork;
1518c7aceabaSRichard Kennedy #endif
1519e56d0903SIngo Molnar 	struct rcu_head rcu;
1520b92ce558SJens Axboe 
1521b92ce558SJens Axboe 	/*
1522b92ce558SJens Axboe 	 * cache last used pipe for splice
1523b92ce558SJens Axboe 	 */
1524b92ce558SJens Axboe 	struct pipe_inode_info *splice_pipe;
15255640f768SEric Dumazet 
15265640f768SEric Dumazet 	struct page_frag task_frag;
15275640f768SEric Dumazet 
1528ca74e92bSShailabh Nagar #ifdef	CONFIG_TASK_DELAY_ACCT
1529ca74e92bSShailabh Nagar 	struct task_delay_info *delays;
1530ca74e92bSShailabh Nagar #endif
1531f4f154fdSAkinobu Mita #ifdef CONFIG_FAULT_INJECTION
1532f4f154fdSAkinobu Mita 	int make_it_fail;
1533f4f154fdSAkinobu Mita #endif
15349d823e8fSWu Fengguang 	/*
15359d823e8fSWu Fengguang 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
15369d823e8fSWu Fengguang 	 * balance_dirty_pages() for some dirty throttling pause
15379d823e8fSWu Fengguang 	 */
15389d823e8fSWu Fengguang 	int nr_dirtied;
15399d823e8fSWu Fengguang 	int nr_dirtied_pause;
154083712358SWu Fengguang 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
15419d823e8fSWu Fengguang 
15429745512cSArjan van de Ven #ifdef CONFIG_LATENCYTOP
15439745512cSArjan van de Ven 	int latency_record_count;
15449745512cSArjan van de Ven 	struct latency_record latency_record[LT_SAVECOUNT];
15459745512cSArjan van de Ven #endif
15466976675dSArjan van de Ven 	/*
15476976675dSArjan van de Ven 	 * time slack values; these are used to round up poll() and
15486976675dSArjan van de Ven 	 * select() etc timeout values. These are in nanoseconds.
15496976675dSArjan van de Ven 	 */
15506976675dSArjan van de Ven 	unsigned long timer_slack_ns;
15516976675dSArjan van de Ven 	unsigned long default_timer_slack_ns;
1552f8d570a4SDavid Miller 
1553fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
15543ad2f3fbSDaniel Mack 	/* Index of current stored address in ret_stack */
1555f201ae23SFrederic Weisbecker 	int curr_ret_stack;
1556f201ae23SFrederic Weisbecker 	/* Stack of return addresses for return function tracing */
1557f201ae23SFrederic Weisbecker 	struct ftrace_ret_stack	*ret_stack;
15588aef2d28SSteven Rostedt 	/* time stamp for last schedule */
15598aef2d28SSteven Rostedt 	unsigned long long ftrace_timestamp;
1560f201ae23SFrederic Weisbecker 	/*
1561f201ae23SFrederic Weisbecker 	 * Number of functions that haven't been traced
1562f201ae23SFrederic Weisbecker 	 * because of depth overrun.
1563f201ae23SFrederic Weisbecker 	 */
1564f201ae23SFrederic Weisbecker 	atomic_t trace_overrun;
1565380c4b14SFrederic Weisbecker 	/* Pause for the tracing */
1566380c4b14SFrederic Weisbecker 	atomic_t tracing_graph_pause;
1567f201ae23SFrederic Weisbecker #endif
1568ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING
1569ea4e2bc4SSteven Rostedt 	/* state flags for use by tracers */
1570ea4e2bc4SSteven Rostedt 	unsigned long trace;
1571b1cff0adSSteven Rostedt 	/* bitmask and counter of trace recursion */
1572261842b7SSteven Rostedt 	unsigned long trace_recursion;
1573261842b7SSteven Rostedt #endif /* CONFIG_TRACING */
1574c255a458SAndrew Morton #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
1575569b846dSKAMEZAWA Hiroyuki 	struct memcg_batch_info {
1576569b846dSKAMEZAWA Hiroyuki 		int do_batch;	/* incremented when batch uncharge started */
1577569b846dSKAMEZAWA Hiroyuki 		struct mem_cgroup *memcg; /* target memcg of uncharge */
15787ffd4ca7SJohannes Weiner 		unsigned long nr_pages;	/* uncharged usage */
15797ffd4ca7SJohannes Weiner 		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1580569b846dSKAMEZAWA Hiroyuki 	} memcg_batch;
1581569b846dSKAMEZAWA Hiroyuki #endif
1582bf26c018SFrederic Weisbecker #ifdef CONFIG_HAVE_HW_BREAKPOINT
1583bf26c018SFrederic Weisbecker 	atomic_t ptrace_bp_refcnt;
1584bf26c018SFrederic Weisbecker #endif
15850326f5a9SSrikar Dronamraju #ifdef CONFIG_UPROBES
15860326f5a9SSrikar Dronamraju 	struct uprobe_task *utask;
15870326f5a9SSrikar Dronamraju #endif
15881da177e4SLinus Torvalds };
15891da177e4SLinus Torvalds 
159076e6eee0SRusty Russell /* Future-safe accessor for struct task_struct's cpus_allowed. */
1591a4636818SRusty Russell #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
159276e6eee0SRusty Russell 
1593e05606d3SIngo Molnar /*
1594e05606d3SIngo Molnar  * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1595e05606d3SIngo Molnar  * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1596e05606d3SIngo Molnar  * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1597e05606d3SIngo Molnar  * values are inverted: lower p->prio value means higher priority.
1598e05606d3SIngo Molnar  *
1599e05606d3SIngo Molnar  * The MAX_USER_RT_PRIO value allows the actual maximum
1600e05606d3SIngo Molnar  * RT priority to be separate from the value exported to
1601e05606d3SIngo Molnar  * user-space.  This allows kernel threads to set their
1602e05606d3SIngo Molnar  * priority to a value higher than any user task. Note:
1603e05606d3SIngo Molnar  * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1604e05606d3SIngo Molnar  */
1605e05606d3SIngo Molnar 
1606e05606d3SIngo Molnar #define MAX_USER_RT_PRIO	100
1607e05606d3SIngo Molnar #define MAX_RT_PRIO		MAX_USER_RT_PRIO
1608e05606d3SIngo Molnar 
1609e05606d3SIngo Molnar #define MAX_PRIO		(MAX_RT_PRIO + 40)
1610e05606d3SIngo Molnar #define DEFAULT_PRIO		(MAX_RT_PRIO + 20)
1611e05606d3SIngo Molnar 
1612e05606d3SIngo Molnar static inline int rt_prio(int prio)
1613e05606d3SIngo Molnar {
1614e05606d3SIngo Molnar 	if (unlikely(prio < MAX_RT_PRIO))
1615e05606d3SIngo Molnar 		return 1;
1616e05606d3SIngo Molnar 	return 0;
1617e05606d3SIngo Molnar }
1618e05606d3SIngo Molnar 
1619e868171aSAlexey Dobriyan static inline int rt_task(struct task_struct *p)
1620e05606d3SIngo Molnar {
1621e05606d3SIngo Molnar 	return rt_prio(p->prio);
1622e05606d3SIngo Molnar }
1623e05606d3SIngo Molnar 
1624e868171aSAlexey Dobriyan static inline struct pid *task_pid(struct task_struct *task)
162522c935f4SEric W. Biederman {
162622c935f4SEric W. Biederman 	return task->pids[PIDTYPE_PID].pid;
162722c935f4SEric W. Biederman }
162822c935f4SEric W. Biederman 
1629e868171aSAlexey Dobriyan static inline struct pid *task_tgid(struct task_struct *task)
163022c935f4SEric W. Biederman {
163122c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PID].pid;
163222c935f4SEric W. Biederman }
163322c935f4SEric W. Biederman 
16346dda81f4SOleg Nesterov /*
16356dda81f4SOleg Nesterov  * Without tasklist or rcu lock it is not safe to dereference
16366dda81f4SOleg Nesterov  * the result of task_pgrp/task_session even if task == current,
16376dda81f4SOleg Nesterov  * we can race with another thread doing sys_setsid/sys_setpgid.
16386dda81f4SOleg Nesterov  */
1639e868171aSAlexey Dobriyan static inline struct pid *task_pgrp(struct task_struct *task)
164022c935f4SEric W. Biederman {
164122c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PGID].pid;
164222c935f4SEric W. Biederman }
164322c935f4SEric W. Biederman 
1644e868171aSAlexey Dobriyan static inline struct pid *task_session(struct task_struct *task)
164522c935f4SEric W. Biederman {
164622c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_SID].pid;
164722c935f4SEric W. Biederman }
164822c935f4SEric W. Biederman 
16497af57294SPavel Emelyanov struct pid_namespace;
16507af57294SPavel Emelyanov 
16517af57294SPavel Emelyanov /*
16527af57294SPavel Emelyanov  * the helpers to get the task's different pids as they are seen
16537af57294SPavel Emelyanov  * from various namespaces
16547af57294SPavel Emelyanov  *
16557af57294SPavel Emelyanov  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
165644c4e1b2SEric W. Biederman  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
165744c4e1b2SEric W. Biederman  *                     current.
16587af57294SPavel Emelyanov  * task_xid_nr_ns()  : id seen from the ns specified;
16597af57294SPavel Emelyanov  *
16607af57294SPavel Emelyanov  * set_task_vxid()   : assigns a virtual id to a task;
16617af57294SPavel Emelyanov  *
16627af57294SPavel Emelyanov  * see also pid_nr() etc in include/linux/pid.h
16637af57294SPavel Emelyanov  */
166452ee2dfdSOleg Nesterov pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
166552ee2dfdSOleg Nesterov 			struct pid_namespace *ns);
16667af57294SPavel Emelyanov 
1667e868171aSAlexey Dobriyan static inline pid_t task_pid_nr(struct task_struct *tsk)
16687af57294SPavel Emelyanov {
16697af57294SPavel Emelyanov 	return tsk->pid;
16707af57294SPavel Emelyanov }
16717af57294SPavel Emelyanov 
167252ee2dfdSOleg Nesterov static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
167352ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
167452ee2dfdSOleg Nesterov {
167552ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
167652ee2dfdSOleg Nesterov }
16777af57294SPavel Emelyanov 
16787af57294SPavel Emelyanov static inline pid_t task_pid_vnr(struct task_struct *tsk)
16797af57294SPavel Emelyanov {
168052ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
16817af57294SPavel Emelyanov }
16827af57294SPavel Emelyanov 
16837af57294SPavel Emelyanov 
1684e868171aSAlexey Dobriyan static inline pid_t task_tgid_nr(struct task_struct *tsk)
16857af57294SPavel Emelyanov {
16867af57294SPavel Emelyanov 	return tsk->tgid;
16877af57294SPavel Emelyanov }
16887af57294SPavel Emelyanov 
16892f2a3a46SPavel Emelyanov pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
16907af57294SPavel Emelyanov 
16917af57294SPavel Emelyanov static inline pid_t task_tgid_vnr(struct task_struct *tsk)
16927af57294SPavel Emelyanov {
16937af57294SPavel Emelyanov 	return pid_vnr(task_tgid(tsk));
16947af57294SPavel Emelyanov }
16957af57294SPavel Emelyanov 
16967af57294SPavel Emelyanov 
169752ee2dfdSOleg Nesterov static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
169852ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
16997af57294SPavel Emelyanov {
170052ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
17017af57294SPavel Emelyanov }
17027af57294SPavel Emelyanov 
17037af57294SPavel Emelyanov static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
17047af57294SPavel Emelyanov {
170552ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
17067af57294SPavel Emelyanov }
17077af57294SPavel Emelyanov 
17087af57294SPavel Emelyanov 
170952ee2dfdSOleg Nesterov static inline pid_t task_session_nr_ns(struct task_struct *tsk,
171052ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
17117af57294SPavel Emelyanov {
171252ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
17137af57294SPavel Emelyanov }
17147af57294SPavel Emelyanov 
17157af57294SPavel Emelyanov static inline pid_t task_session_vnr(struct task_struct *tsk)
17167af57294SPavel Emelyanov {
171752ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
17187af57294SPavel Emelyanov }
17197af57294SPavel Emelyanov 
17201b0f7ffdSOleg Nesterov /* obsolete, do not use */
17211b0f7ffdSOleg Nesterov static inline pid_t task_pgrp_nr(struct task_struct *tsk)
17221b0f7ffdSOleg Nesterov {
17231b0f7ffdSOleg Nesterov 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
17241b0f7ffdSOleg Nesterov }
17257af57294SPavel Emelyanov 
17261da177e4SLinus Torvalds /**
17271da177e4SLinus Torvalds  * pid_alive - check that a task structure is not stale
17281da177e4SLinus Torvalds  * @p: Task structure to be checked.
17291da177e4SLinus Torvalds  *
17301da177e4SLinus Torvalds  * Test if a process is not yet dead (at most zombie state)
17311da177e4SLinus Torvalds  * If pid_alive fails, then pointers within the task structure
17321da177e4SLinus Torvalds  * can be stale and must not be dereferenced.
17331da177e4SLinus Torvalds  */
1734e868171aSAlexey Dobriyan static inline int pid_alive(struct task_struct *p)
17351da177e4SLinus Torvalds {
173692476d7fSEric W. Biederman 	return p->pids[PIDTYPE_PID].pid != NULL;
17371da177e4SLinus Torvalds }
17381da177e4SLinus Torvalds 
1739f400e198SSukadev Bhattiprolu /**
1740b460cbc5SSerge E. Hallyn  * is_global_init - check if a task structure is init
17413260259fSHenne  * @tsk: Task structure to be checked.
17423260259fSHenne  *
17433260259fSHenne  * Check if a task structure is the first user space task the kernel created.
1744f400e198SSukadev Bhattiprolu  */
1745e868171aSAlexey Dobriyan static inline int is_global_init(struct task_struct *tsk)
1746b461cc03SPavel Emelyanov {
1747b461cc03SPavel Emelyanov 	return tsk->pid == 1;
1748b461cc03SPavel Emelyanov }
1749b460cbc5SSerge E. Hallyn 
1750b460cbc5SSerge E. Hallyn /*
1751b460cbc5SSerge E. Hallyn  * is_container_init:
1752b460cbc5SSerge E. Hallyn  * check whether in the task is init in its own pid namespace.
1753b460cbc5SSerge E. Hallyn  */
1754b461cc03SPavel Emelyanov extern int is_container_init(struct task_struct *tsk);
1755f400e198SSukadev Bhattiprolu 
17569ec52099SCedric Le Goater extern struct pid *cad_pid;
17579ec52099SCedric Le Goater 
17581da177e4SLinus Torvalds extern void free_task(struct task_struct *tsk);
17591da177e4SLinus Torvalds #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1760e56d0903SIngo Molnar 
1761158d9ebdSAndrew Morton extern void __put_task_struct(struct task_struct *t);
1762e56d0903SIngo Molnar 
1763e56d0903SIngo Molnar static inline void put_task_struct(struct task_struct *t)
1764e56d0903SIngo Molnar {
1765e56d0903SIngo Molnar 	if (atomic_dec_and_test(&t->usage))
17668c7904a0SEric W. Biederman 		__put_task_struct(t);
1767e56d0903SIngo Molnar }
17681da177e4SLinus Torvalds 
1769e80d0a1aSFrederic Weisbecker extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1770e80d0a1aSFrederic Weisbecker extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
177149048622SBalbir Singh 
17721da177e4SLinus Torvalds /*
17731da177e4SLinus Torvalds  * Per process flags
17741da177e4SLinus Torvalds  */
17751da177e4SLinus Torvalds #define PF_EXITING	0x00000004	/* getting shut down */
1776778e9a9cSAlexey Kuznetsov #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
177794886b84SLaurent Vivier #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
177821aa9af0STejun Heo #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
17791da177e4SLinus Torvalds #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
17804db96cf0SAndi Kleen #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
17811da177e4SLinus Torvalds #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
17821da177e4SLinus Torvalds #define PF_DUMPCORE	0x00000200	/* dumped core */
17831da177e4SLinus Torvalds #define PF_SIGNALED	0x00000400	/* killed by a signal */
17841da177e4SLinus Torvalds #define PF_MEMALLOC	0x00000800	/* Allocating memory */
178572fa5997SVasiliy Kulikov #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
17861da177e4SLinus Torvalds #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
17871da177e4SLinus Torvalds #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
17881da177e4SLinus Torvalds #define PF_FROZEN	0x00010000	/* frozen for system suspend */
17891da177e4SLinus Torvalds #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
17901da177e4SLinus Torvalds #define PF_KSWAPD	0x00040000	/* I am kswapd */
17911da177e4SLinus Torvalds #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1792246bb0b1SOleg Nesterov #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1793b31dc66aSJens Axboe #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1794b31dc66aSJens Axboe #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1795b31dc66aSJens Axboe #define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1796b31dc66aSJens Axboe #define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
17979985b0baSDavid Rientjes #define PF_THREAD_BOUND	0x04000000	/* Thread bound to specific cpu */
17984db96cf0SAndi Kleen #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1799c61afb18SPaul Jackson #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
180061a87122SThomas Gleixner #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
180158a69cb4STejun Heo #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
18021da177e4SLinus Torvalds 
18031da177e4SLinus Torvalds /*
18041da177e4SLinus Torvalds  * Only the _current_ task can read/write to tsk->flags, but other
18051da177e4SLinus Torvalds  * tasks can access tsk->flags in readonly mode for example
18061da177e4SLinus Torvalds  * with tsk_used_math (like during threaded core dumping).
18071da177e4SLinus Torvalds  * There is however an exception to this rule during ptrace
18081da177e4SLinus Torvalds  * or during fork: the ptracer task is allowed to write to the
18091da177e4SLinus Torvalds  * child->flags of its traced child (same goes for fork, the parent
18101da177e4SLinus Torvalds  * can write to the child->flags), because we're guaranteed the
18111da177e4SLinus Torvalds  * child is not running and in turn not changing child->flags
18121da177e4SLinus Torvalds  * at the same time the parent does it.
18131da177e4SLinus Torvalds  */
18141da177e4SLinus Torvalds #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
18151da177e4SLinus Torvalds #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
18161da177e4SLinus Torvalds #define clear_used_math() clear_stopped_child_used_math(current)
18171da177e4SLinus Torvalds #define set_used_math() set_stopped_child_used_math(current)
18181da177e4SLinus Torvalds #define conditional_stopped_child_used_math(condition, child) \
18191da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
18201da177e4SLinus Torvalds #define conditional_used_math(condition) \
18211da177e4SLinus Torvalds 	conditional_stopped_child_used_math(condition, current)
18221da177e4SLinus Torvalds #define copy_to_stopped_child_used_math(child) \
18231da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
18241da177e4SLinus Torvalds /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
18251da177e4SLinus Torvalds #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
18261da177e4SLinus Torvalds #define used_math() tsk_used_math(current)
18271da177e4SLinus Torvalds 
1828e5c1902eSTejun Heo /*
1829a8f072c1STejun Heo  * task->jobctl flags
1830e5c1902eSTejun Heo  */
1831a8f072c1STejun Heo #define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
1832e5c1902eSTejun Heo 
1833a8f072c1STejun Heo #define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
1834a8f072c1STejun Heo #define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
1835a8f072c1STejun Heo #define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
183673ddff2bSTejun Heo #define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
1837fb1d910cSTejun Heo #define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
1838a8f072c1STejun Heo #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
1839544b2c91STejun Heo #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
1840a8f072c1STejun Heo 
1841a8f072c1STejun Heo #define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT)
1842a8f072c1STejun Heo #define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT)
1843a8f072c1STejun Heo #define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT)
184473ddff2bSTejun Heo #define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT)
1845fb1d910cSTejun Heo #define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT)
1846a8f072c1STejun Heo #define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT)
1847544b2c91STejun Heo #define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT)
1848a8f072c1STejun Heo 
1849fb1d910cSTejun Heo #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
185073ddff2bSTejun Heo #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
18513759a0d9STejun Heo 
18527dd3db54STejun Heo extern bool task_set_jobctl_pending(struct task_struct *task,
18537dd3db54STejun Heo 				    unsigned int mask);
185473ddff2bSTejun Heo extern void task_clear_jobctl_trapping(struct task_struct *task);
18553759a0d9STejun Heo extern void task_clear_jobctl_pending(struct task_struct *task,
18563759a0d9STejun Heo 				      unsigned int mask);
185739efa3efSTejun Heo 
1858a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
1859f41d911fSPaul E. McKenney 
1860f41d911fSPaul E. McKenney #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
18611aa03f11SPaul E. McKenney #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1862f41d911fSPaul E. McKenney 
1863f41d911fSPaul E. McKenney static inline void rcu_copy_process(struct task_struct *p)
1864f41d911fSPaul E. McKenney {
1865f41d911fSPaul E. McKenney 	p->rcu_read_lock_nesting = 0;
1866f41d911fSPaul E. McKenney 	p->rcu_read_unlock_special = 0;
1867a57eb940SPaul E. McKenney #ifdef CONFIG_TREE_PREEMPT_RCU
1868dd5d19baSPaul E. McKenney 	p->rcu_blocked_node = NULL;
186924278d14SPaul E. McKenney #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
187024278d14SPaul E. McKenney #ifdef CONFIG_RCU_BOOST
187124278d14SPaul E. McKenney 	p->rcu_boost_mutex = NULL;
187224278d14SPaul E. McKenney #endif /* #ifdef CONFIG_RCU_BOOST */
1873f41d911fSPaul E. McKenney 	INIT_LIST_HEAD(&p->rcu_node_entry);
1874f41d911fSPaul E. McKenney }
1875f41d911fSPaul E. McKenney 
1876f41d911fSPaul E. McKenney #else
1877f41d911fSPaul E. McKenney 
1878f41d911fSPaul E. McKenney static inline void rcu_copy_process(struct task_struct *p)
1879f41d911fSPaul E. McKenney {
1880f41d911fSPaul E. McKenney }
1881f41d911fSPaul E. McKenney 
1882f41d911fSPaul E. McKenney #endif
1883f41d911fSPaul E. McKenney 
188404e7e951SFrederic Weisbecker static inline void rcu_switch(struct task_struct *prev,
188504e7e951SFrederic Weisbecker 			      struct task_struct *next)
188604e7e951SFrederic Weisbecker {
188704e7e951SFrederic Weisbecker #ifdef CONFIG_RCU_USER_QS
188804e7e951SFrederic Weisbecker 	rcu_user_hooks_switch(prev, next);
188904e7e951SFrederic Weisbecker #endif
189004e7e951SFrederic Weisbecker }
189104e7e951SFrederic Weisbecker 
1892907aed48SMel Gorman static inline void tsk_restore_flags(struct task_struct *task,
1893907aed48SMel Gorman 				unsigned long orig_flags, unsigned long flags)
1894907aed48SMel Gorman {
1895907aed48SMel Gorman 	task->flags &= ~flags;
1896907aed48SMel Gorman 	task->flags |= orig_flags & flags;
1897907aed48SMel Gorman }
1898907aed48SMel Gorman 
18991da177e4SLinus Torvalds #ifdef CONFIG_SMP
19001e1b6c51SKOSAKI Motohiro extern void do_set_cpus_allowed(struct task_struct *p,
19011e1b6c51SKOSAKI Motohiro 			       const struct cpumask *new_mask);
19021e1b6c51SKOSAKI Motohiro 
1903cd8ba7cdSMike Travis extern int set_cpus_allowed_ptr(struct task_struct *p,
190496f874e2SRusty Russell 				const struct cpumask *new_mask);
19051da177e4SLinus Torvalds #else
19061e1b6c51SKOSAKI Motohiro static inline void do_set_cpus_allowed(struct task_struct *p,
19071e1b6c51SKOSAKI Motohiro 				      const struct cpumask *new_mask)
19081e1b6c51SKOSAKI Motohiro {
19091e1b6c51SKOSAKI Motohiro }
1910cd8ba7cdSMike Travis static inline int set_cpus_allowed_ptr(struct task_struct *p,
191196f874e2SRusty Russell 				       const struct cpumask *new_mask)
19121da177e4SLinus Torvalds {
191396f874e2SRusty Russell 	if (!cpumask_test_cpu(0, new_mask))
19141da177e4SLinus Torvalds 		return -EINVAL;
19151da177e4SLinus Torvalds 	return 0;
19161da177e4SLinus Torvalds }
19171da177e4SLinus Torvalds #endif
1918e0ad9556SRusty Russell 
19195167e8d5SPeter Zijlstra #ifdef CONFIG_NO_HZ
19205167e8d5SPeter Zijlstra void calc_load_enter_idle(void);
19215167e8d5SPeter Zijlstra void calc_load_exit_idle(void);
19225167e8d5SPeter Zijlstra #else
19235167e8d5SPeter Zijlstra static inline void calc_load_enter_idle(void) { }
19245167e8d5SPeter Zijlstra static inline void calc_load_exit_idle(void) { }
19255167e8d5SPeter Zijlstra #endif /* CONFIG_NO_HZ */
19265167e8d5SPeter Zijlstra 
1927e0ad9556SRusty Russell #ifndef CONFIG_CPUMASK_OFFSTACK
1928cd8ba7cdSMike Travis static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1929cd8ba7cdSMike Travis {
1930cd8ba7cdSMike Travis 	return set_cpus_allowed_ptr(p, &new_mask);
1931cd8ba7cdSMike Travis }
1932e0ad9556SRusty Russell #endif
19331da177e4SLinus Torvalds 
1934b342501cSIngo Molnar /*
1935c676329aSPeter Zijlstra  * Do not use outside of architecture code which knows its limitations.
1936c676329aSPeter Zijlstra  *
1937c676329aSPeter Zijlstra  * sched_clock() has no promise of monotonicity or bounded drift between
1938c676329aSPeter Zijlstra  * CPUs, use (which you should not) requires disabling IRQs.
1939c676329aSPeter Zijlstra  *
1940c676329aSPeter Zijlstra  * Please use one of the three interfaces below.
1941b342501cSIngo Molnar  */
19421bbfa6f2SMike Frysinger extern unsigned long long notrace sched_clock(void);
1943c676329aSPeter Zijlstra /*
1944489a71b0SHiroshi Shimamoto  * See the comment in kernel/sched/clock.c
1945c676329aSPeter Zijlstra  */
1946c676329aSPeter Zijlstra extern u64 cpu_clock(int cpu);
1947c676329aSPeter Zijlstra extern u64 local_clock(void);
1948c676329aSPeter Zijlstra extern u64 sched_clock_cpu(int cpu);
1949c676329aSPeter Zijlstra 
1950e436d800SIngo Molnar 
1951c1955a3dSPeter Zijlstra extern void sched_clock_init(void);
1952c1955a3dSPeter Zijlstra 
19533e51f33fSPeter Zijlstra #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
19543e51f33fSPeter Zijlstra static inline void sched_clock_tick(void)
19553e51f33fSPeter Zijlstra {
19563e51f33fSPeter Zijlstra }
19573e51f33fSPeter Zijlstra 
19583e51f33fSPeter Zijlstra static inline void sched_clock_idle_sleep_event(void)
19593e51f33fSPeter Zijlstra {
19603e51f33fSPeter Zijlstra }
19613e51f33fSPeter Zijlstra 
19623e51f33fSPeter Zijlstra static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
19633e51f33fSPeter Zijlstra {
19643e51f33fSPeter Zijlstra }
19653e51f33fSPeter Zijlstra #else
1966c676329aSPeter Zijlstra /*
1967c676329aSPeter Zijlstra  * Architectures can set this to 1 if they have specified
1968c676329aSPeter Zijlstra  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1969c676329aSPeter Zijlstra  * but then during bootup it turns out that sched_clock()
1970c676329aSPeter Zijlstra  * is reliable after all:
1971c676329aSPeter Zijlstra  */
1972c676329aSPeter Zijlstra extern int sched_clock_stable;
1973c676329aSPeter Zijlstra 
19743e51f33fSPeter Zijlstra extern void sched_clock_tick(void);
19753e51f33fSPeter Zijlstra extern void sched_clock_idle_sleep_event(void);
19763e51f33fSPeter Zijlstra extern void sched_clock_idle_wakeup_event(u64 delta_ns);
19773e51f33fSPeter Zijlstra #endif
19783e51f33fSPeter Zijlstra 
1979b52bfee4SVenkatesh Pallipadi #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1980b52bfee4SVenkatesh Pallipadi /*
1981b52bfee4SVenkatesh Pallipadi  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
1982b52bfee4SVenkatesh Pallipadi  * The reason for this explicit opt-in is not to have perf penalty with
1983b52bfee4SVenkatesh Pallipadi  * slow sched_clocks.
1984b52bfee4SVenkatesh Pallipadi  */
1985b52bfee4SVenkatesh Pallipadi extern void enable_sched_clock_irqtime(void);
1986b52bfee4SVenkatesh Pallipadi extern void disable_sched_clock_irqtime(void);
1987b52bfee4SVenkatesh Pallipadi #else
1988b52bfee4SVenkatesh Pallipadi static inline void enable_sched_clock_irqtime(void) {}
1989b52bfee4SVenkatesh Pallipadi static inline void disable_sched_clock_irqtime(void) {}
1990b52bfee4SVenkatesh Pallipadi #endif
1991b52bfee4SVenkatesh Pallipadi 
199236c8b586SIngo Molnar extern unsigned long long
199341b86e9cSIngo Molnar task_sched_runtime(struct task_struct *task);
19941da177e4SLinus Torvalds 
19951da177e4SLinus Torvalds /* sched_exec is called by processes performing an exec */
19961da177e4SLinus Torvalds #ifdef CONFIG_SMP
19971da177e4SLinus Torvalds extern void sched_exec(void);
19981da177e4SLinus Torvalds #else
19991da177e4SLinus Torvalds #define sched_exec()   {}
20001da177e4SLinus Torvalds #endif
20011da177e4SLinus Torvalds 
20022aa44d05SIngo Molnar extern void sched_clock_idle_sleep_event(void);
20032aa44d05SIngo Molnar extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2004bb29ab26SIngo Molnar 
20051da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU
20061da177e4SLinus Torvalds extern void idle_task_exit(void);
20071da177e4SLinus Torvalds #else
20081da177e4SLinus Torvalds static inline void idle_task_exit(void) {}
20091da177e4SLinus Torvalds #endif
20101da177e4SLinus Torvalds 
201106d8308cSThomas Gleixner #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
201206d8308cSThomas Gleixner extern void wake_up_idle_cpu(int cpu);
201306d8308cSThomas Gleixner #else
201406d8308cSThomas Gleixner static inline void wake_up_idle_cpu(int cpu) { }
201506d8308cSThomas Gleixner #endif
201606d8308cSThomas Gleixner 
201721805085SPeter Zijlstra extern unsigned int sysctl_sched_latency;
2018b2be5e96SPeter Zijlstra extern unsigned int sysctl_sched_min_granularity;
2019bf0f6f24SIngo Molnar extern unsigned int sysctl_sched_wakeup_granularity;
2020bf0f6f24SIngo Molnar extern unsigned int sysctl_sched_child_runs_first;
20211983a922SChristian Ehrhardt 
20221983a922SChristian Ehrhardt enum sched_tunable_scaling {
20231983a922SChristian Ehrhardt 	SCHED_TUNABLESCALING_NONE,
20241983a922SChristian Ehrhardt 	SCHED_TUNABLESCALING_LOG,
20251983a922SChristian Ehrhardt 	SCHED_TUNABLESCALING_LINEAR,
20261983a922SChristian Ehrhardt 	SCHED_TUNABLESCALING_END,
20271983a922SChristian Ehrhardt };
20281983a922SChristian Ehrhardt extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
20291983a922SChristian Ehrhardt 
20302bba22c5SMike Galbraith #ifdef CONFIG_SCHED_DEBUG
2031da84d961SIngo Molnar extern unsigned int sysctl_sched_migration_cost;
2032b82d9fddSPeter Zijlstra extern unsigned int sysctl_sched_nr_migrate;
2033e9e9250bSPeter Zijlstra extern unsigned int sysctl_sched_time_avg;
2034cd1bb94bSArun R Bharadwaj extern unsigned int sysctl_timer_migration;
2035a7a4f8a7SPaul Turner extern unsigned int sysctl_sched_shares_window;
2036b2be5e96SPeter Zijlstra 
20371983a922SChristian Ehrhardt int sched_proc_update_handler(struct ctl_table *table, int write,
20388d65af78SAlexey Dobriyan 		void __user *buffer, size_t *length,
2039b2be5e96SPeter Zijlstra 		loff_t *ppos);
20402bd8e6d4SIngo Molnar #endif
2041eea08f32SArun R Bharadwaj #ifdef CONFIG_SCHED_DEBUG
2042eea08f32SArun R Bharadwaj static inline unsigned int get_sysctl_timer_migration(void)
2043eea08f32SArun R Bharadwaj {
2044eea08f32SArun R Bharadwaj 	return sysctl_timer_migration;
2045eea08f32SArun R Bharadwaj }
2046eea08f32SArun R Bharadwaj #else
2047eea08f32SArun R Bharadwaj static inline unsigned int get_sysctl_timer_migration(void)
2048eea08f32SArun R Bharadwaj {
2049eea08f32SArun R Bharadwaj 	return 1;
2050eea08f32SArun R Bharadwaj }
2051eea08f32SArun R Bharadwaj #endif
20529f0c1e56SPeter Zijlstra extern unsigned int sysctl_sched_rt_period;
20539f0c1e56SPeter Zijlstra extern int sysctl_sched_rt_runtime;
20542bd8e6d4SIngo Molnar 
2055d0b27fa7SPeter Zijlstra int sched_rt_handler(struct ctl_table *table, int write,
20568d65af78SAlexey Dobriyan 		void __user *buffer, size_t *lenp,
2057d0b27fa7SPeter Zijlstra 		loff_t *ppos);
2058d0b27fa7SPeter Zijlstra 
20595091faa4SMike Galbraith #ifdef CONFIG_SCHED_AUTOGROUP
20605091faa4SMike Galbraith extern unsigned int sysctl_sched_autogroup_enabled;
20615091faa4SMike Galbraith 
20625091faa4SMike Galbraith extern void sched_autogroup_create_attach(struct task_struct *p);
20635091faa4SMike Galbraith extern void sched_autogroup_detach(struct task_struct *p);
20645091faa4SMike Galbraith extern void sched_autogroup_fork(struct signal_struct *sig);
20655091faa4SMike Galbraith extern void sched_autogroup_exit(struct signal_struct *sig);
20665091faa4SMike Galbraith #ifdef CONFIG_PROC_FS
20675091faa4SMike Galbraith extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
20682e5b5b3aSHiroshi Shimamoto extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
20695091faa4SMike Galbraith #endif
20705091faa4SMike Galbraith #else
20715091faa4SMike Galbraith static inline void sched_autogroup_create_attach(struct task_struct *p) { }
20725091faa4SMike Galbraith static inline void sched_autogroup_detach(struct task_struct *p) { }
20735091faa4SMike Galbraith static inline void sched_autogroup_fork(struct signal_struct *sig) { }
20745091faa4SMike Galbraith static inline void sched_autogroup_exit(struct signal_struct *sig) { }
20755091faa4SMike Galbraith #endif
20765091faa4SMike Galbraith 
2077ec12cb7fSPaul Turner #ifdef CONFIG_CFS_BANDWIDTH
2078ec12cb7fSPaul Turner extern unsigned int sysctl_sched_cfs_bandwidth_slice;
2079ec12cb7fSPaul Turner #endif
2080ec12cb7fSPaul Turner 
2081b29739f9SIngo Molnar #ifdef CONFIG_RT_MUTEXES
208236c8b586SIngo Molnar extern int rt_mutex_getprio(struct task_struct *p);
208336c8b586SIngo Molnar extern void rt_mutex_setprio(struct task_struct *p, int prio);
208436c8b586SIngo Molnar extern void rt_mutex_adjust_pi(struct task_struct *p);
20853c7d5184SThomas Gleixner static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
20863c7d5184SThomas Gleixner {
20873c7d5184SThomas Gleixner 	return tsk->pi_blocked_on != NULL;
20883c7d5184SThomas Gleixner }
2089b29739f9SIngo Molnar #else
2090e868171aSAlexey Dobriyan static inline int rt_mutex_getprio(struct task_struct *p)
2091b29739f9SIngo Molnar {
2092b29739f9SIngo Molnar 	return p->normal_prio;
2093b29739f9SIngo Molnar }
209495e02ca9SThomas Gleixner # define rt_mutex_adjust_pi(p)		do { } while (0)
20953c7d5184SThomas Gleixner static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
20963c7d5184SThomas Gleixner {
20973c7d5184SThomas Gleixner 	return false;
20983c7d5184SThomas Gleixner }
2099b29739f9SIngo Molnar #endif
2100b29739f9SIngo Molnar 
2101d95f4122SMike Galbraith extern bool yield_to(struct task_struct *p, bool preempt);
210236c8b586SIngo Molnar extern void set_user_nice(struct task_struct *p, long nice);
210336c8b586SIngo Molnar extern int task_prio(const struct task_struct *p);
210436c8b586SIngo Molnar extern int task_nice(const struct task_struct *p);
210536c8b586SIngo Molnar extern int can_nice(const struct task_struct *p, const int nice);
210636c8b586SIngo Molnar extern int task_curr(const struct task_struct *p);
21071da177e4SLinus Torvalds extern int idle_cpu(int cpu);
2108fe7de49fSKOSAKI Motohiro extern int sched_setscheduler(struct task_struct *, int,
2109fe7de49fSKOSAKI Motohiro 			      const struct sched_param *);
2110961ccdddSRusty Russell extern int sched_setscheduler_nocheck(struct task_struct *, int,
2111fe7de49fSKOSAKI Motohiro 				      const struct sched_param *);
211236c8b586SIngo Molnar extern struct task_struct *idle_task(int cpu);
2113c4f30608SPaul E. McKenney /**
2114c4f30608SPaul E. McKenney  * is_idle_task - is the specified task an idle task?
2115fa757281SRandy Dunlap  * @p: the task in question.
2116c4f30608SPaul E. McKenney  */
21177061ca3bSPaul E. McKenney static inline bool is_idle_task(const struct task_struct *p)
2118c4f30608SPaul E. McKenney {
2119c4f30608SPaul E. McKenney 	return p->pid == 0;
2120c4f30608SPaul E. McKenney }
212136c8b586SIngo Molnar extern struct task_struct *curr_task(int cpu);
212236c8b586SIngo Molnar extern void set_curr_task(int cpu, struct task_struct *p);
21231da177e4SLinus Torvalds 
21241da177e4SLinus Torvalds void yield(void);
21251da177e4SLinus Torvalds 
21261da177e4SLinus Torvalds /*
21271da177e4SLinus Torvalds  * The default (Linux) execution domain.
21281da177e4SLinus Torvalds  */
21291da177e4SLinus Torvalds extern struct exec_domain	default_exec_domain;
21301da177e4SLinus Torvalds 
21311da177e4SLinus Torvalds union thread_union {
21321da177e4SLinus Torvalds 	struct thread_info thread_info;
21331da177e4SLinus Torvalds 	unsigned long stack[THREAD_SIZE/sizeof(long)];
21341da177e4SLinus Torvalds };
21351da177e4SLinus Torvalds 
21361da177e4SLinus Torvalds #ifndef __HAVE_ARCH_KSTACK_END
21371da177e4SLinus Torvalds static inline int kstack_end(void *addr)
21381da177e4SLinus Torvalds {
21391da177e4SLinus Torvalds 	/* Reliable end of stack detection:
21401da177e4SLinus Torvalds 	 * Some APM bios versions misalign the stack
21411da177e4SLinus Torvalds 	 */
21421da177e4SLinus Torvalds 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
21431da177e4SLinus Torvalds }
21441da177e4SLinus Torvalds #endif
21451da177e4SLinus Torvalds 
21461da177e4SLinus Torvalds extern union thread_union init_thread_union;
21471da177e4SLinus Torvalds extern struct task_struct init_task;
21481da177e4SLinus Torvalds 
21491da177e4SLinus Torvalds extern struct   mm_struct init_mm;
21501da177e4SLinus Torvalds 
2151198fe21bSPavel Emelyanov extern struct pid_namespace init_pid_ns;
2152198fe21bSPavel Emelyanov 
2153198fe21bSPavel Emelyanov /*
2154198fe21bSPavel Emelyanov  * find a task by one of its numerical ids
2155198fe21bSPavel Emelyanov  *
2156198fe21bSPavel Emelyanov  * find_task_by_pid_ns():
2157198fe21bSPavel Emelyanov  *      finds a task by its pid in the specified namespace
2158228ebcbeSPavel Emelyanov  * find_task_by_vpid():
2159228ebcbeSPavel Emelyanov  *      finds a task by its virtual pid
2160198fe21bSPavel Emelyanov  *
2161e49859e7SPavel Emelyanov  * see also find_vpid() etc in include/linux/pid.h
2162198fe21bSPavel Emelyanov  */
2163198fe21bSPavel Emelyanov 
2164228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_vpid(pid_t nr);
2165228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2166228ebcbeSPavel Emelyanov 		struct pid_namespace *ns);
2167198fe21bSPavel Emelyanov 
21688520d7c7SOleg Nesterov extern void __set_special_pids(struct pid *pid);
21691da177e4SLinus Torvalds 
21701da177e4SLinus Torvalds /* per-UID process charging. */
21717b44ab97SEric W. Biederman extern struct user_struct * alloc_uid(kuid_t);
21721da177e4SLinus Torvalds static inline struct user_struct *get_uid(struct user_struct *u)
21731da177e4SLinus Torvalds {
21741da177e4SLinus Torvalds 	atomic_inc(&u->__count);
21751da177e4SLinus Torvalds 	return u;
21761da177e4SLinus Torvalds }
21771da177e4SLinus Torvalds extern void free_uid(struct user_struct *);
21781da177e4SLinus Torvalds 
21791da177e4SLinus Torvalds #include <asm/current.h>
21801da177e4SLinus Torvalds 
2181f0af911aSTorben Hohn extern void xtime_update(unsigned long ticks);
21821da177e4SLinus Torvalds 
2183b3c97528SHarvey Harrison extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2184b3c97528SHarvey Harrison extern int wake_up_process(struct task_struct *tsk);
21853e51e3edSSamir Bellabes extern void wake_up_new_task(struct task_struct *tsk);
21861da177e4SLinus Torvalds #ifdef CONFIG_SMP
21871da177e4SLinus Torvalds  extern void kick_process(struct task_struct *tsk);
21881da177e4SLinus Torvalds #else
21891da177e4SLinus Torvalds  static inline void kick_process(struct task_struct *tsk) { }
21901da177e4SLinus Torvalds #endif
21913e51e3edSSamir Bellabes extern void sched_fork(struct task_struct *p);
2192ad46c2c4SIngo Molnar extern void sched_dead(struct task_struct *p);
21931da177e4SLinus Torvalds 
21941da177e4SLinus Torvalds extern void proc_caches_init(void);
21951da177e4SLinus Torvalds extern void flush_signals(struct task_struct *);
21963bcac026SDavid Howells extern void __flush_signals(struct task_struct *);
219710ab825bSOleg Nesterov extern void ignore_signals(struct task_struct *);
21981da177e4SLinus Torvalds extern void flush_signal_handlers(struct task_struct *, int force_default);
21991da177e4SLinus Torvalds extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
22001da177e4SLinus Torvalds 
22011da177e4SLinus Torvalds static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
22021da177e4SLinus Torvalds {
22031da177e4SLinus Torvalds 	unsigned long flags;
22041da177e4SLinus Torvalds 	int ret;
22051da177e4SLinus Torvalds 
22061da177e4SLinus Torvalds 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
22071da177e4SLinus Torvalds 	ret = dequeue_signal(tsk, mask, info);
22081da177e4SLinus Torvalds 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
22091da177e4SLinus Torvalds 
22101da177e4SLinus Torvalds 	return ret;
22111da177e4SLinus Torvalds }
22121da177e4SLinus Torvalds 
22131da177e4SLinus Torvalds extern void block_all_signals(int (*notifier)(void *priv), void *priv,
22141da177e4SLinus Torvalds 			      sigset_t *mask);
22151da177e4SLinus Torvalds extern void unblock_all_signals(void);
22161da177e4SLinus Torvalds extern void release_task(struct task_struct * p);
22171da177e4SLinus Torvalds extern int send_sig_info(int, struct siginfo *, struct task_struct *);
22181da177e4SLinus Torvalds extern int force_sigsegv(int, struct task_struct *);
22191da177e4SLinus Torvalds extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2220c4b92fc1SEric W. Biederman extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2221c4b92fc1SEric W. Biederman extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2222d178bc3aSSerge Hallyn extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2223d178bc3aSSerge Hallyn 				const struct cred *, u32);
2224c4b92fc1SEric W. Biederman extern int kill_pgrp(struct pid *pid, int sig, int priv);
2225c4b92fc1SEric W. Biederman extern int kill_pid(struct pid *pid, int sig, int priv);
2226c3de4b38SMatthew Wilcox extern int kill_proc_info(int, struct siginfo *, pid_t);
222786773473SOleg Nesterov extern __must_check bool do_notify_parent(struct task_struct *, int);
2228a7f0765eSOleg Nesterov extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
22291da177e4SLinus Torvalds extern void force_sig(int, struct task_struct *);
22301da177e4SLinus Torvalds extern int send_sig(int, struct task_struct *, int);
223109faef11SOleg Nesterov extern int zap_other_threads(struct task_struct *p);
22321da177e4SLinus Torvalds extern struct sigqueue *sigqueue_alloc(void);
22331da177e4SLinus Torvalds extern void sigqueue_free(struct sigqueue *);
2234ac5c2153SOleg Nesterov extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
22359ac95f2fSOleg Nesterov extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
22361da177e4SLinus Torvalds extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
22371da177e4SLinus Torvalds 
223851a7b448SAl Viro static inline void restore_saved_sigmask(void)
223951a7b448SAl Viro {
224051a7b448SAl Viro 	if (test_and_clear_restore_sigmask())
224177097ae5SAl Viro 		__set_current_blocked(&current->saved_sigmask);
224251a7b448SAl Viro }
224351a7b448SAl Viro 
2244b7f9a11aSAl Viro static inline sigset_t *sigmask_to_save(void)
2245b7f9a11aSAl Viro {
2246b7f9a11aSAl Viro 	sigset_t *res = &current->blocked;
2247b7f9a11aSAl Viro 	if (unlikely(test_restore_sigmask()))
2248b7f9a11aSAl Viro 		res = &current->saved_sigmask;
2249b7f9a11aSAl Viro 	return res;
2250b7f9a11aSAl Viro }
2251b7f9a11aSAl Viro 
22529ec52099SCedric Le Goater static inline int kill_cad_pid(int sig, int priv)
22539ec52099SCedric Le Goater {
22549ec52099SCedric Le Goater 	return kill_pid(cad_pid, sig, priv);
22559ec52099SCedric Le Goater }
22569ec52099SCedric Le Goater 
22571da177e4SLinus Torvalds /* These can be the second arg to send_sig_info/send_group_sig_info.  */
22581da177e4SLinus Torvalds #define SEND_SIG_NOINFO ((struct siginfo *) 0)
22591da177e4SLinus Torvalds #define SEND_SIG_PRIV	((struct siginfo *) 1)
22601da177e4SLinus Torvalds #define SEND_SIG_FORCED	((struct siginfo *) 2)
22611da177e4SLinus Torvalds 
22622a855dd0SSebastian Andrzej Siewior /*
22632a855dd0SSebastian Andrzej Siewior  * True if we are on the alternate signal stack.
22642a855dd0SSebastian Andrzej Siewior  */
22651da177e4SLinus Torvalds static inline int on_sig_stack(unsigned long sp)
22661da177e4SLinus Torvalds {
22672a855dd0SSebastian Andrzej Siewior #ifdef CONFIG_STACK_GROWSUP
22682a855dd0SSebastian Andrzej Siewior 	return sp >= current->sas_ss_sp &&
22692a855dd0SSebastian Andrzej Siewior 		sp - current->sas_ss_sp < current->sas_ss_size;
22702a855dd0SSebastian Andrzej Siewior #else
22712a855dd0SSebastian Andrzej Siewior 	return sp > current->sas_ss_sp &&
22722a855dd0SSebastian Andrzej Siewior 		sp - current->sas_ss_sp <= current->sas_ss_size;
22732a855dd0SSebastian Andrzej Siewior #endif
22741da177e4SLinus Torvalds }
22751da177e4SLinus Torvalds 
22761da177e4SLinus Torvalds static inline int sas_ss_flags(unsigned long sp)
22771da177e4SLinus Torvalds {
22781da177e4SLinus Torvalds 	return (current->sas_ss_size == 0 ? SS_DISABLE
22791da177e4SLinus Torvalds 		: on_sig_stack(sp) ? SS_ONSTACK : 0);
22801da177e4SLinus Torvalds }
22811da177e4SLinus Torvalds 
22821da177e4SLinus Torvalds /*
22831da177e4SLinus Torvalds  * Routines for handling mm_structs
22841da177e4SLinus Torvalds  */
22851da177e4SLinus Torvalds extern struct mm_struct * mm_alloc(void);
22861da177e4SLinus Torvalds 
22871da177e4SLinus Torvalds /* mmdrop drops the mm and the page tables */
2288b3c97528SHarvey Harrison extern void __mmdrop(struct mm_struct *);
22891da177e4SLinus Torvalds static inline void mmdrop(struct mm_struct * mm)
22901da177e4SLinus Torvalds {
22916fb43d7bSIngo Molnar 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
22921da177e4SLinus Torvalds 		__mmdrop(mm);
22931da177e4SLinus Torvalds }
22941da177e4SLinus Torvalds 
22951da177e4SLinus Torvalds /* mmput gets rid of the mappings and all user-space */
22961da177e4SLinus Torvalds extern void mmput(struct mm_struct *);
22971da177e4SLinus Torvalds /* Grab a reference to a task's mm, if it is not already going away */
22981da177e4SLinus Torvalds extern struct mm_struct *get_task_mm(struct task_struct *task);
22998cdb878dSChristopher Yeoh /*
23008cdb878dSChristopher Yeoh  * Grab a reference to a task's mm, if it is not already going away
23018cdb878dSChristopher Yeoh  * and ptrace_may_access with the mode parameter passed to it
23028cdb878dSChristopher Yeoh  * succeeds.
23038cdb878dSChristopher Yeoh  */
23048cdb878dSChristopher Yeoh extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
23051da177e4SLinus Torvalds /* Remove the current tasks stale references to the old mm_struct */
23061da177e4SLinus Torvalds extern void mm_release(struct task_struct *, struct mm_struct *);
2307402b0862SCarsten Otte /* Allocate a new mm structure and copy contents from tsk->mm */
2308402b0862SCarsten Otte extern struct mm_struct *dup_mm(struct task_struct *tsk);
23091da177e4SLinus Torvalds 
23106f2c55b8SAlexey Dobriyan extern int copy_thread(unsigned long, unsigned long, unsigned long,
23116f2c55b8SAlexey Dobriyan 			struct task_struct *, struct pt_regs *);
23121da177e4SLinus Torvalds extern void flush_thread(void);
23131da177e4SLinus Torvalds extern void exit_thread(void);
23141da177e4SLinus Torvalds 
23151da177e4SLinus Torvalds extern void exit_files(struct task_struct *);
2316a7e5328aSOleg Nesterov extern void __cleanup_sighand(struct sighand_struct *);
2317cbaffba1SOleg Nesterov 
23181da177e4SLinus Torvalds extern void exit_itimers(struct signal_struct *);
2319cbaffba1SOleg Nesterov extern void flush_itimer_signals(void);
23201da177e4SLinus Torvalds 
23219402c95fSJoe Perches extern void do_group_exit(int);
23221da177e4SLinus Torvalds 
23231da177e4SLinus Torvalds extern void daemonize(const char *, ...);
23241da177e4SLinus Torvalds extern int allow_signal(int);
23251da177e4SLinus Torvalds extern int disallow_signal(int);
23261da177e4SLinus Torvalds 
2327d7627467SDavid Howells extern int do_execve(const char *,
2328d7627467SDavid Howells 		     const char __user * const __user *,
2329d7627467SDavid Howells 		     const char __user * const __user *, struct pt_regs *);
23301da177e4SLinus Torvalds extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
233136c8b586SIngo Molnar struct task_struct *fork_idle(int);
23322aa3a7f8SAl Viro #ifdef CONFIG_GENERIC_KERNEL_THREAD
23332aa3a7f8SAl Viro extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
23342aa3a7f8SAl Viro #endif
23351da177e4SLinus Torvalds 
23361da177e4SLinus Torvalds extern void set_task_comm(struct task_struct *tsk, char *from);
233759714d65SAndrew Morton extern char *get_task_comm(char *to, struct task_struct *tsk);
23381da177e4SLinus Torvalds 
23391da177e4SLinus Torvalds #ifdef CONFIG_SMP
2340317f3941SPeter Zijlstra void scheduler_ipi(void);
234185ba2d86SRoland McGrath extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
23421da177e4SLinus Torvalds #else
2343184748ccSPeter Zijlstra static inline void scheduler_ipi(void) { }
234485ba2d86SRoland McGrath static inline unsigned long wait_task_inactive(struct task_struct *p,
234585ba2d86SRoland McGrath 					       long match_state)
234685ba2d86SRoland McGrath {
234785ba2d86SRoland McGrath 	return 1;
234885ba2d86SRoland McGrath }
23491da177e4SLinus Torvalds #endif
23501da177e4SLinus Torvalds 
235105725f7eSJiri Pirko #define next_task(p) \
235205725f7eSJiri Pirko 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
23531da177e4SLinus Torvalds 
23541da177e4SLinus Torvalds #define for_each_process(p) \
23551da177e4SLinus Torvalds 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
23561da177e4SLinus Torvalds 
23575bb459bbSOleg Nesterov extern bool current_is_single_threaded(void);
2358d84f4f99SDavid Howells 
23591da177e4SLinus Torvalds /*
23601da177e4SLinus Torvalds  * Careful: do_each_thread/while_each_thread is a double loop so
23611da177e4SLinus Torvalds  *          'break' will not work as expected - use goto instead.
23621da177e4SLinus Torvalds  */
23631da177e4SLinus Torvalds #define do_each_thread(g, t) \
23641da177e4SLinus Torvalds 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
23651da177e4SLinus Torvalds 
23661da177e4SLinus Torvalds #define while_each_thread(g, t) \
23671da177e4SLinus Torvalds 	while ((t = next_thread(t)) != g)
23681da177e4SLinus Torvalds 
23697e49827cSOleg Nesterov static inline int get_nr_threads(struct task_struct *tsk)
23707e49827cSOleg Nesterov {
2371b3ac022cSOleg Nesterov 	return tsk->signal->nr_threads;
23727e49827cSOleg Nesterov }
23737e49827cSOleg Nesterov 
2374087806b1SOleg Nesterov static inline bool thread_group_leader(struct task_struct *p)
2375087806b1SOleg Nesterov {
2376087806b1SOleg Nesterov 	return p->exit_signal >= 0;
2377087806b1SOleg Nesterov }
23781da177e4SLinus Torvalds 
23790804ef4bSEric W. Biederman /* Do to the insanities of de_thread it is possible for a process
23800804ef4bSEric W. Biederman  * to have the pid of the thread group leader without actually being
23810804ef4bSEric W. Biederman  * the thread group leader.  For iteration through the pids in proc
23820804ef4bSEric W. Biederman  * all we care about is that we have a task with the appropriate
23830804ef4bSEric W. Biederman  * pid, we don't actually care if we have the right task.
23840804ef4bSEric W. Biederman  */
2385e868171aSAlexey Dobriyan static inline int has_group_leader_pid(struct task_struct *p)
23860804ef4bSEric W. Biederman {
23870804ef4bSEric W. Biederman 	return p->pid == p->tgid;
23880804ef4bSEric W. Biederman }
23890804ef4bSEric W. Biederman 
2390bac0abd6SPavel Emelyanov static inline
2391bac0abd6SPavel Emelyanov int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2392bac0abd6SPavel Emelyanov {
2393bac0abd6SPavel Emelyanov 	return p1->tgid == p2->tgid;
2394bac0abd6SPavel Emelyanov }
2395bac0abd6SPavel Emelyanov 
239636c8b586SIngo Molnar static inline struct task_struct *next_thread(const struct task_struct *p)
239747e65328SOleg Nesterov {
239805725f7eSJiri Pirko 	return list_entry_rcu(p->thread_group.next,
239936c8b586SIngo Molnar 			      struct task_struct, thread_group);
240047e65328SOleg Nesterov }
240147e65328SOleg Nesterov 
2402e868171aSAlexey Dobriyan static inline int thread_group_empty(struct task_struct *p)
24031da177e4SLinus Torvalds {
240447e65328SOleg Nesterov 	return list_empty(&p->thread_group);
24051da177e4SLinus Torvalds }
24061da177e4SLinus Torvalds 
24071da177e4SLinus Torvalds #define delay_group_leader(p) \
24081da177e4SLinus Torvalds 		(thread_group_leader(p) && !thread_group_empty(p))
24091da177e4SLinus Torvalds 
24101da177e4SLinus Torvalds /*
2411260ea101SEric W. Biederman  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
241222e2c507SJens Axboe  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2413ddbcc7e8SPaul Menage  * pins the final release of task.io_context.  Also protects ->cpuset and
2414d68b46feSOleg Nesterov  * ->cgroup.subsys[]. And ->vfork_done.
24151da177e4SLinus Torvalds  *
24161da177e4SLinus Torvalds  * Nests both inside and outside of read_lock(&tasklist_lock).
24171da177e4SLinus Torvalds  * It must not be nested with write_lock_irq(&tasklist_lock),
24181da177e4SLinus Torvalds  * neither inside nor outside.
24191da177e4SLinus Torvalds  */
24201da177e4SLinus Torvalds static inline void task_lock(struct task_struct *p)
24211da177e4SLinus Torvalds {
24221da177e4SLinus Torvalds 	spin_lock(&p->alloc_lock);
24231da177e4SLinus Torvalds }
24241da177e4SLinus Torvalds 
24251da177e4SLinus Torvalds static inline void task_unlock(struct task_struct *p)
24261da177e4SLinus Torvalds {
24271da177e4SLinus Torvalds 	spin_unlock(&p->alloc_lock);
24281da177e4SLinus Torvalds }
24291da177e4SLinus Torvalds 
2430b8ed374eSNamhyung Kim extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2431f63ee72eSOleg Nesterov 							unsigned long *flags);
2432f63ee72eSOleg Nesterov 
24339388dc30SAnton Vorontsov static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
24349388dc30SAnton Vorontsov 						       unsigned long *flags)
24359388dc30SAnton Vorontsov {
24369388dc30SAnton Vorontsov 	struct sighand_struct *ret;
24379388dc30SAnton Vorontsov 
24389388dc30SAnton Vorontsov 	ret = __lock_task_sighand(tsk, flags);
24399388dc30SAnton Vorontsov 	(void)__cond_lock(&tsk->sighand->siglock, ret);
24409388dc30SAnton Vorontsov 	return ret;
24419388dc30SAnton Vorontsov }
2442b8ed374eSNamhyung Kim 
2443f63ee72eSOleg Nesterov static inline void unlock_task_sighand(struct task_struct *tsk,
2444f63ee72eSOleg Nesterov 						unsigned long *flags)
2445f63ee72eSOleg Nesterov {
2446f63ee72eSOleg Nesterov 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2447f63ee72eSOleg Nesterov }
2448f63ee72eSOleg Nesterov 
24494714d1d3SBen Blum #ifdef CONFIG_CGROUPS
2450257058aeSTejun Heo static inline void threadgroup_change_begin(struct task_struct *tsk)
24514714d1d3SBen Blum {
2452257058aeSTejun Heo 	down_read(&tsk->signal->group_rwsem);
24534714d1d3SBen Blum }
2454257058aeSTejun Heo static inline void threadgroup_change_end(struct task_struct *tsk)
24554714d1d3SBen Blum {
2456257058aeSTejun Heo 	up_read(&tsk->signal->group_rwsem);
24574714d1d3SBen Blum }
245877e4ef99STejun Heo 
245977e4ef99STejun Heo /**
246077e4ef99STejun Heo  * threadgroup_lock - lock threadgroup
246177e4ef99STejun Heo  * @tsk: member task of the threadgroup to lock
246277e4ef99STejun Heo  *
246377e4ef99STejun Heo  * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
246477e4ef99STejun Heo  * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
246577e4ef99STejun Heo  * perform exec.  This is useful for cases where the threadgroup needs to
246677e4ef99STejun Heo  * stay stable across blockable operations.
246777e4ef99STejun Heo  *
246877e4ef99STejun Heo  * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
246977e4ef99STejun Heo  * synchronization.  While held, no new task will be added to threadgroup
247077e4ef99STejun Heo  * and no existing live task will have its PF_EXITING set.
247177e4ef99STejun Heo  *
247277e4ef99STejun Heo  * During exec, a task goes and puts its thread group through unusual
247377e4ef99STejun Heo  * changes.  After de-threading, exclusive access is assumed to resources
247477e4ef99STejun Heo  * which are usually shared by tasks in the same group - e.g. sighand may
247577e4ef99STejun Heo  * be replaced with a new one.  Also, the exec'ing task takes over group
247677e4ef99STejun Heo  * leader role including its pid.  Exclude these changes while locked by
247777e4ef99STejun Heo  * grabbing cred_guard_mutex which is used to synchronize exec path.
247877e4ef99STejun Heo  */
2479257058aeSTejun Heo static inline void threadgroup_lock(struct task_struct *tsk)
24804714d1d3SBen Blum {
248177e4ef99STejun Heo 	/*
248277e4ef99STejun Heo 	 * exec uses exit for de-threading nesting group_rwsem inside
248377e4ef99STejun Heo 	 * cred_guard_mutex. Grab cred_guard_mutex first.
248477e4ef99STejun Heo 	 */
248577e4ef99STejun Heo 	mutex_lock(&tsk->signal->cred_guard_mutex);
2486257058aeSTejun Heo 	down_write(&tsk->signal->group_rwsem);
24874714d1d3SBen Blum }
248877e4ef99STejun Heo 
248977e4ef99STejun Heo /**
249077e4ef99STejun Heo  * threadgroup_unlock - unlock threadgroup
249177e4ef99STejun Heo  * @tsk: member task of the threadgroup to unlock
249277e4ef99STejun Heo  *
249377e4ef99STejun Heo  * Reverse threadgroup_lock().
249477e4ef99STejun Heo  */
2495257058aeSTejun Heo static inline void threadgroup_unlock(struct task_struct *tsk)
24964714d1d3SBen Blum {
2497257058aeSTejun Heo 	up_write(&tsk->signal->group_rwsem);
249877e4ef99STejun Heo 	mutex_unlock(&tsk->signal->cred_guard_mutex);
24994714d1d3SBen Blum }
25004714d1d3SBen Blum #else
2501257058aeSTejun Heo static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2502257058aeSTejun Heo static inline void threadgroup_change_end(struct task_struct *tsk) {}
2503257058aeSTejun Heo static inline void threadgroup_lock(struct task_struct *tsk) {}
2504257058aeSTejun Heo static inline void threadgroup_unlock(struct task_struct *tsk) {}
25054714d1d3SBen Blum #endif
25064714d1d3SBen Blum 
2507f037360fSAl Viro #ifndef __HAVE_THREAD_FUNCTIONS
2508f037360fSAl Viro 
2509f7e4217bSRoman Zippel #define task_thread_info(task)	((struct thread_info *)(task)->stack)
2510f7e4217bSRoman Zippel #define task_stack_page(task)	((task)->stack)
2511a1261f54SAl Viro 
251210ebffdeSAl Viro static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
251310ebffdeSAl Viro {
251410ebffdeSAl Viro 	*task_thread_info(p) = *task_thread_info(org);
251510ebffdeSAl Viro 	task_thread_info(p)->task = p;
251610ebffdeSAl Viro }
251710ebffdeSAl Viro 
251810ebffdeSAl Viro static inline unsigned long *end_of_stack(struct task_struct *p)
251910ebffdeSAl Viro {
2520f7e4217bSRoman Zippel 	return (unsigned long *)(task_thread_info(p) + 1);
252110ebffdeSAl Viro }
252210ebffdeSAl Viro 
2523f037360fSAl Viro #endif
2524f037360fSAl Viro 
25258b05c7e6SFUJITA Tomonori static inline int object_is_on_stack(void *obj)
25268b05c7e6SFUJITA Tomonori {
25278b05c7e6SFUJITA Tomonori 	void *stack = task_stack_page(current);
25288b05c7e6SFUJITA Tomonori 
25298b05c7e6SFUJITA Tomonori 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
25308b05c7e6SFUJITA Tomonori }
25318b05c7e6SFUJITA Tomonori 
25328c9843e5SBenjamin Herrenschmidt extern void thread_info_cache_init(void);
25338c9843e5SBenjamin Herrenschmidt 
25347c9f8861SEric Sandeen #ifdef CONFIG_DEBUG_STACK_USAGE
25357c9f8861SEric Sandeen static inline unsigned long stack_not_used(struct task_struct *p)
25367c9f8861SEric Sandeen {
25377c9f8861SEric Sandeen 	unsigned long *n = end_of_stack(p);
25387c9f8861SEric Sandeen 
25397c9f8861SEric Sandeen 	do { 	/* Skip over canary */
25407c9f8861SEric Sandeen 		n++;
25417c9f8861SEric Sandeen 	} while (!*n);
25427c9f8861SEric Sandeen 
25437c9f8861SEric Sandeen 	return (unsigned long)n - (unsigned long)end_of_stack(p);
25447c9f8861SEric Sandeen }
25457c9f8861SEric Sandeen #endif
25467c9f8861SEric Sandeen 
25471da177e4SLinus Torvalds /* set thread flags in other task's structures
25481da177e4SLinus Torvalds  * - see asm/thread_info.h for TIF_xxxx flags available
25491da177e4SLinus Torvalds  */
25501da177e4SLinus Torvalds static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
25511da177e4SLinus Torvalds {
2552a1261f54SAl Viro 	set_ti_thread_flag(task_thread_info(tsk), flag);
25531da177e4SLinus Torvalds }
25541da177e4SLinus Torvalds 
25551da177e4SLinus Torvalds static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
25561da177e4SLinus Torvalds {
2557a1261f54SAl Viro 	clear_ti_thread_flag(task_thread_info(tsk), flag);
25581da177e4SLinus Torvalds }
25591da177e4SLinus Torvalds 
25601da177e4SLinus Torvalds static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
25611da177e4SLinus Torvalds {
2562a1261f54SAl Viro 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
25631da177e4SLinus Torvalds }
25641da177e4SLinus Torvalds 
25651da177e4SLinus Torvalds static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
25661da177e4SLinus Torvalds {
2567a1261f54SAl Viro 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
25681da177e4SLinus Torvalds }
25691da177e4SLinus Torvalds 
25701da177e4SLinus Torvalds static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
25711da177e4SLinus Torvalds {
2572a1261f54SAl Viro 	return test_ti_thread_flag(task_thread_info(tsk), flag);
25731da177e4SLinus Torvalds }
25741da177e4SLinus Torvalds 
25751da177e4SLinus Torvalds static inline void set_tsk_need_resched(struct task_struct *tsk)
25761da177e4SLinus Torvalds {
25771da177e4SLinus Torvalds 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
25781da177e4SLinus Torvalds }
25791da177e4SLinus Torvalds 
25801da177e4SLinus Torvalds static inline void clear_tsk_need_resched(struct task_struct *tsk)
25811da177e4SLinus Torvalds {
25821da177e4SLinus Torvalds 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
25831da177e4SLinus Torvalds }
25841da177e4SLinus Torvalds 
25858ae121acSGregory Haskins static inline int test_tsk_need_resched(struct task_struct *tsk)
25868ae121acSGregory Haskins {
25878ae121acSGregory Haskins 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
25888ae121acSGregory Haskins }
25898ae121acSGregory Haskins 
2590690cc3ffSEric W. Biederman static inline int restart_syscall(void)
2591690cc3ffSEric W. Biederman {
2592690cc3ffSEric W. Biederman 	set_tsk_thread_flag(current, TIF_SIGPENDING);
2593690cc3ffSEric W. Biederman 	return -ERESTARTNOINTR;
2594690cc3ffSEric W. Biederman }
2595690cc3ffSEric W. Biederman 
25961da177e4SLinus Torvalds static inline int signal_pending(struct task_struct *p)
25971da177e4SLinus Torvalds {
25981da177e4SLinus Torvalds 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
25991da177e4SLinus Torvalds }
26001da177e4SLinus Torvalds 
2601d9588725SRoland McGrath static inline int __fatal_signal_pending(struct task_struct *p)
2602d9588725SRoland McGrath {
2603d9588725SRoland McGrath 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2604d9588725SRoland McGrath }
2605f776d12dSMatthew Wilcox 
2606f776d12dSMatthew Wilcox static inline int fatal_signal_pending(struct task_struct *p)
2607f776d12dSMatthew Wilcox {
2608f776d12dSMatthew Wilcox 	return signal_pending(p) && __fatal_signal_pending(p);
2609f776d12dSMatthew Wilcox }
2610f776d12dSMatthew Wilcox 
261116882c1eSOleg Nesterov static inline int signal_pending_state(long state, struct task_struct *p)
261216882c1eSOleg Nesterov {
261316882c1eSOleg Nesterov 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
261416882c1eSOleg Nesterov 		return 0;
261516882c1eSOleg Nesterov 	if (!signal_pending(p))
261616882c1eSOleg Nesterov 		return 0;
261716882c1eSOleg Nesterov 
261816882c1eSOleg Nesterov 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
261916882c1eSOleg Nesterov }
262016882c1eSOleg Nesterov 
26211da177e4SLinus Torvalds static inline int need_resched(void)
26221da177e4SLinus Torvalds {
26239404ef02SLinus Torvalds 	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
26241da177e4SLinus Torvalds }
26251da177e4SLinus Torvalds 
26261da177e4SLinus Torvalds /*
26271da177e4SLinus Torvalds  * cond_resched() and cond_resched_lock(): latency reduction via
26281da177e4SLinus Torvalds  * explicit rescheduling in places that are safe. The return
26291da177e4SLinus Torvalds  * value indicates whether a reschedule was done in fact.
26301da177e4SLinus Torvalds  * cond_resched_lock() will drop the spinlock before scheduling,
26311da177e4SLinus Torvalds  * cond_resched_softirq() will enable bhs before scheduling.
26321da177e4SLinus Torvalds  */
2633c3921ab7SLinus Torvalds extern int _cond_resched(void);
26346f80bd98SFrederic Weisbecker 
2635613afbf8SFrederic Weisbecker #define cond_resched() ({			\
2636613afbf8SFrederic Weisbecker 	__might_sleep(__FILE__, __LINE__, 0);	\
2637613afbf8SFrederic Weisbecker 	_cond_resched();			\
2638613afbf8SFrederic Weisbecker })
26396f80bd98SFrederic Weisbecker 
2640613afbf8SFrederic Weisbecker extern int __cond_resched_lock(spinlock_t *lock);
2641613afbf8SFrederic Weisbecker 
2642bdd4e85dSFrederic Weisbecker #ifdef CONFIG_PREEMPT_COUNT
2643716a4234SFrederic Weisbecker #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
264402b67cc3SHerbert Xu #else
2645716a4234SFrederic Weisbecker #define PREEMPT_LOCK_OFFSET	0
264602b67cc3SHerbert Xu #endif
2647716a4234SFrederic Weisbecker 
2648613afbf8SFrederic Weisbecker #define cond_resched_lock(lock) ({				\
2649716a4234SFrederic Weisbecker 	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2650613afbf8SFrederic Weisbecker 	__cond_resched_lock(lock);				\
2651613afbf8SFrederic Weisbecker })
2652613afbf8SFrederic Weisbecker 
2653613afbf8SFrederic Weisbecker extern int __cond_resched_softirq(void);
2654613afbf8SFrederic Weisbecker 
2655613afbf8SFrederic Weisbecker #define cond_resched_softirq() ({					\
265675e1056fSVenkatesh Pallipadi 	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2657613afbf8SFrederic Weisbecker 	__cond_resched_softirq();					\
2658613afbf8SFrederic Weisbecker })
26591da177e4SLinus Torvalds 
26601da177e4SLinus Torvalds /*
26611da177e4SLinus Torvalds  * Does a critical section need to be broken due to another
266295c354feSNick Piggin  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
266395c354feSNick Piggin  * but a general need for low latency)
26641da177e4SLinus Torvalds  */
266595c354feSNick Piggin static inline int spin_needbreak(spinlock_t *lock)
26661da177e4SLinus Torvalds {
266795c354feSNick Piggin #ifdef CONFIG_PREEMPT
266895c354feSNick Piggin 	return spin_is_contended(lock);
266995c354feSNick Piggin #else
26701da177e4SLinus Torvalds 	return 0;
267195c354feSNick Piggin #endif
26721da177e4SLinus Torvalds }
26731da177e4SLinus Torvalds 
26747bb44adeSRoland McGrath /*
2675f06febc9SFrank Mayhar  * Thread group CPU time accounting.
2676f06febc9SFrank Mayhar  */
26774cd4c1b4SPeter Zijlstra void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
26784da94d49SPeter Zijlstra void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2679f06febc9SFrank Mayhar 
2680f06febc9SFrank Mayhar static inline void thread_group_cputime_init(struct signal_struct *sig)
2681f06febc9SFrank Mayhar {
2682ee30a7b2SThomas Gleixner 	raw_spin_lock_init(&sig->cputimer.lock);
2683f06febc9SFrank Mayhar }
2684f06febc9SFrank Mayhar 
2685f06febc9SFrank Mayhar /*
26867bb44adeSRoland McGrath  * Reevaluate whether the task has signals pending delivery.
26877bb44adeSRoland McGrath  * Wake the task if so.
26887bb44adeSRoland McGrath  * This is required every time the blocked sigset_t changes.
26897bb44adeSRoland McGrath  * callers must hold sighand->siglock.
26907bb44adeSRoland McGrath  */
26917bb44adeSRoland McGrath extern void recalc_sigpending_and_wake(struct task_struct *t);
26921da177e4SLinus Torvalds extern void recalc_sigpending(void);
26931da177e4SLinus Torvalds 
26941da177e4SLinus Torvalds extern void signal_wake_up(struct task_struct *t, int resume_stopped);
26951da177e4SLinus Torvalds 
26961da177e4SLinus Torvalds /*
26971da177e4SLinus Torvalds  * Wrappers for p->thread_info->cpu access. No-op on UP.
26981da177e4SLinus Torvalds  */
26991da177e4SLinus Torvalds #ifdef CONFIG_SMP
27001da177e4SLinus Torvalds 
27011da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
27021da177e4SLinus Torvalds {
2703a1261f54SAl Viro 	return task_thread_info(p)->cpu;
27041da177e4SLinus Torvalds }
27051da177e4SLinus Torvalds 
2706c65cc870SIngo Molnar extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
27071da177e4SLinus Torvalds 
27081da177e4SLinus Torvalds #else
27091da177e4SLinus Torvalds 
27101da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
27111da177e4SLinus Torvalds {
27121da177e4SLinus Torvalds 	return 0;
27131da177e4SLinus Torvalds }
27141da177e4SLinus Torvalds 
27151da177e4SLinus Torvalds static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
27161da177e4SLinus Torvalds {
27171da177e4SLinus Torvalds }
27181da177e4SLinus Torvalds 
27191da177e4SLinus Torvalds #endif /* CONFIG_SMP */
27201da177e4SLinus Torvalds 
272196f874e2SRusty Russell extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
272296f874e2SRusty Russell extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
27235c45bf27SSiddha, Suresh B 
27241da177e4SLinus Torvalds extern void normalize_rt_tasks(void);
27251da177e4SLinus Torvalds 
27267c941438SDhaval Giani #ifdef CONFIG_CGROUP_SCHED
27279b5b7751SSrivatsa Vaddagiri 
272807e06b01SYong Zhang extern struct task_group root_task_group;
27299b5b7751SSrivatsa Vaddagiri 
2730ec7dc8acSDhaval Giani extern struct task_group *sched_create_group(struct task_group *parent);
27314cf86d77SIngo Molnar extern void sched_destroy_group(struct task_group *tg);
27329b5b7751SSrivatsa Vaddagiri extern void sched_move_task(struct task_struct *tsk);
2733052f1dc7SPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
27344cf86d77SIngo Molnar extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
27355cb350baSDhaval Giani extern unsigned long sched_group_shares(struct task_group *tg);
2736052f1dc7SPeter Zijlstra #endif
2737052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
27389f0c1e56SPeter Zijlstra extern int sched_group_set_rt_runtime(struct task_group *tg,
27399f0c1e56SPeter Zijlstra 				      long rt_runtime_us);
27409f0c1e56SPeter Zijlstra extern long sched_group_rt_runtime(struct task_group *tg);
2741d0b27fa7SPeter Zijlstra extern int sched_group_set_rt_period(struct task_group *tg,
2742d0b27fa7SPeter Zijlstra 				      long rt_period_us);
2743d0b27fa7SPeter Zijlstra extern long sched_group_rt_period(struct task_group *tg);
274454e99124SDhaval Giani extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2745052f1dc7SPeter Zijlstra #endif
27468323f26cSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */
27479b5b7751SSrivatsa Vaddagiri 
274854e99124SDhaval Giani extern int task_can_switch_user(struct user_struct *up,
274954e99124SDhaval Giani 					struct task_struct *tsk);
275054e99124SDhaval Giani 
27514b98d11bSAlexey Dobriyan #ifdef CONFIG_TASK_XACCT
27524b98d11bSAlexey Dobriyan static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
27534b98d11bSAlexey Dobriyan {
2754940389b8SAndrea Righi 	tsk->ioac.rchar += amt;
27554b98d11bSAlexey Dobriyan }
27564b98d11bSAlexey Dobriyan 
27574b98d11bSAlexey Dobriyan static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
27584b98d11bSAlexey Dobriyan {
2759940389b8SAndrea Righi 	tsk->ioac.wchar += amt;
27604b98d11bSAlexey Dobriyan }
27614b98d11bSAlexey Dobriyan 
27624b98d11bSAlexey Dobriyan static inline void inc_syscr(struct task_struct *tsk)
27634b98d11bSAlexey Dobriyan {
2764940389b8SAndrea Righi 	tsk->ioac.syscr++;
27654b98d11bSAlexey Dobriyan }
27664b98d11bSAlexey Dobriyan 
27674b98d11bSAlexey Dobriyan static inline void inc_syscw(struct task_struct *tsk)
27684b98d11bSAlexey Dobriyan {
2769940389b8SAndrea Righi 	tsk->ioac.syscw++;
27704b98d11bSAlexey Dobriyan }
27714b98d11bSAlexey Dobriyan #else
27724b98d11bSAlexey Dobriyan static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
27734b98d11bSAlexey Dobriyan {
27744b98d11bSAlexey Dobriyan }
27754b98d11bSAlexey Dobriyan 
27764b98d11bSAlexey Dobriyan static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
27774b98d11bSAlexey Dobriyan {
27784b98d11bSAlexey Dobriyan }
27794b98d11bSAlexey Dobriyan 
27804b98d11bSAlexey Dobriyan static inline void inc_syscr(struct task_struct *tsk)
27814b98d11bSAlexey Dobriyan {
27824b98d11bSAlexey Dobriyan }
27834b98d11bSAlexey Dobriyan 
27844b98d11bSAlexey Dobriyan static inline void inc_syscw(struct task_struct *tsk)
27854b98d11bSAlexey Dobriyan {
27864b98d11bSAlexey Dobriyan }
27874b98d11bSAlexey Dobriyan #endif
27884b98d11bSAlexey Dobriyan 
278982455257SDave Hansen #ifndef TASK_SIZE_OF
279082455257SDave Hansen #define TASK_SIZE_OF(tsk)	TASK_SIZE
279182455257SDave Hansen #endif
279282455257SDave Hansen 
2793cf475ad2SBalbir Singh #ifdef CONFIG_MM_OWNER
2794cf475ad2SBalbir Singh extern void mm_update_next_owner(struct mm_struct *mm);
2795cf475ad2SBalbir Singh extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2796cf475ad2SBalbir Singh #else
2797cf475ad2SBalbir Singh static inline void mm_update_next_owner(struct mm_struct *mm)
2798cf475ad2SBalbir Singh {
2799cf475ad2SBalbir Singh }
2800cf475ad2SBalbir Singh 
2801cf475ad2SBalbir Singh static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2802cf475ad2SBalbir Singh {
2803cf475ad2SBalbir Singh }
2804cf475ad2SBalbir Singh #endif /* CONFIG_MM_OWNER */
2805cf475ad2SBalbir Singh 
28063e10e716SJiri Slaby static inline unsigned long task_rlimit(const struct task_struct *tsk,
28073e10e716SJiri Slaby 		unsigned int limit)
28083e10e716SJiri Slaby {
28093e10e716SJiri Slaby 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
28103e10e716SJiri Slaby }
28113e10e716SJiri Slaby 
28123e10e716SJiri Slaby static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
28133e10e716SJiri Slaby 		unsigned int limit)
28143e10e716SJiri Slaby {
28153e10e716SJiri Slaby 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
28163e10e716SJiri Slaby }
28173e10e716SJiri Slaby 
28183e10e716SJiri Slaby static inline unsigned long rlimit(unsigned int limit)
28193e10e716SJiri Slaby {
28203e10e716SJiri Slaby 	return task_rlimit(current, limit);
28213e10e716SJiri Slaby }
28223e10e716SJiri Slaby 
28233e10e716SJiri Slaby static inline unsigned long rlimit_max(unsigned int limit)
28243e10e716SJiri Slaby {
28253e10e716SJiri Slaby 	return task_rlimit_max(current, limit);
28263e10e716SJiri Slaby }
28273e10e716SJiri Slaby 
28281da177e4SLinus Torvalds #endif
2829