xref: /linux/include/linux/sched.h (revision 932ecebb0405b9a41cd18946e6cff8a17d434e23)
11da177e4SLinus Torvalds #ifndef _LINUX_SCHED_H
21da177e4SLinus Torvalds #define _LINUX_SCHED_H
31da177e4SLinus Torvalds 
4b7b3c76aSDavid Woodhouse /*
5b7b3c76aSDavid Woodhouse  * cloning flags:
6b7b3c76aSDavid Woodhouse  */
7b7b3c76aSDavid Woodhouse #define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */
8b7b3c76aSDavid Woodhouse #define CLONE_VM	0x00000100	/* set if VM shared between processes */
9b7b3c76aSDavid Woodhouse #define CLONE_FS	0x00000200	/* set if fs info shared between processes */
10b7b3c76aSDavid Woodhouse #define CLONE_FILES	0x00000400	/* set if open files shared between processes */
11b7b3c76aSDavid Woodhouse #define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */
12b7b3c76aSDavid Woodhouse #define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */
13b7b3c76aSDavid Woodhouse #define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */
14b7b3c76aSDavid Woodhouse #define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */
15b7b3c76aSDavid Woodhouse #define CLONE_THREAD	0x00010000	/* Same thread group? */
16b7b3c76aSDavid Woodhouse #define CLONE_NEWNS	0x00020000	/* New namespace group? */
17b7b3c76aSDavid Woodhouse #define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */
18b7b3c76aSDavid Woodhouse #define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */
19b7b3c76aSDavid Woodhouse #define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */
20b7b3c76aSDavid Woodhouse #define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */
21b7b3c76aSDavid Woodhouse #define CLONE_DETACHED		0x00400000	/* Unused, ignored */
22b7b3c76aSDavid Woodhouse #define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */
23b7b3c76aSDavid Woodhouse #define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */
2443bb40c9SDave Jones /* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state)
2543bb40c9SDave Jones    and is now available for re-use. */
26071df104SSerge E. Hallyn #define CLONE_NEWUTS		0x04000000	/* New utsname group? */
2725b21cb2SKirill Korotaev #define CLONE_NEWIPC		0x08000000	/* New ipcs */
2877ec739dSSerge E. Hallyn #define CLONE_NEWUSER		0x10000000	/* New user namespace */
2930e49c26SPavel Emelyanov #define CLONE_NEWPID		0x20000000	/* New pid namespace */
30169e3674SEric W. Biederman #define CLONE_NEWNET		0x40000000	/* New network namespace */
31fadad878SJens Axboe #define CLONE_IO		0x80000000	/* Clone io context */
32b7b3c76aSDavid Woodhouse 
33b7b3c76aSDavid Woodhouse /*
34b7b3c76aSDavid Woodhouse  * Scheduling policies
35b7b3c76aSDavid Woodhouse  */
36b7b3c76aSDavid Woodhouse #define SCHED_NORMAL		0
37b7b3c76aSDavid Woodhouse #define SCHED_FIFO		1
38b7b3c76aSDavid Woodhouse #define SCHED_RR		2
39b7b3c76aSDavid Woodhouse #define SCHED_BATCH		3
400e6aca43SIngo Molnar /* SCHED_ISO: reserved but not implemented yet */
410e6aca43SIngo Molnar #define SCHED_IDLE		5
42ca94c442SLennart Poettering /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
43ca94c442SLennart Poettering #define SCHED_RESET_ON_FORK     0x40000000
44b7b3c76aSDavid Woodhouse 
45a3b6714eSDavid Woodhouse #ifdef __KERNEL__
46b7b3c76aSDavid Woodhouse 
47b7b3c76aSDavid Woodhouse struct sched_param {
48b7b3c76aSDavid Woodhouse 	int sched_priority;
49b7b3c76aSDavid Woodhouse };
50b7b3c76aSDavid Woodhouse 
511da177e4SLinus Torvalds #include <asm/param.h>	/* for HZ */
521da177e4SLinus Torvalds 
531da177e4SLinus Torvalds #include <linux/capability.h>
541da177e4SLinus Torvalds #include <linux/threads.h>
551da177e4SLinus Torvalds #include <linux/kernel.h>
561da177e4SLinus Torvalds #include <linux/types.h>
571da177e4SLinus Torvalds #include <linux/timex.h>
581da177e4SLinus Torvalds #include <linux/jiffies.h>
591da177e4SLinus Torvalds #include <linux/rbtree.h>
601da177e4SLinus Torvalds #include <linux/thread_info.h>
611da177e4SLinus Torvalds #include <linux/cpumask.h>
621da177e4SLinus Torvalds #include <linux/errno.h>
631da177e4SLinus Torvalds #include <linux/nodemask.h>
64c92ff1bdSMartin Schwidefsky #include <linux/mm_types.h>
651da177e4SLinus Torvalds 
661da177e4SLinus Torvalds #include <asm/page.h>
671da177e4SLinus Torvalds #include <asm/ptrace.h>
681da177e4SLinus Torvalds #include <asm/cputime.h>
691da177e4SLinus Torvalds 
701da177e4SLinus Torvalds #include <linux/smp.h>
711da177e4SLinus Torvalds #include <linux/sem.h>
721da177e4SLinus Torvalds #include <linux/signal.h>
731da177e4SLinus Torvalds #include <linux/compiler.h>
741da177e4SLinus Torvalds #include <linux/completion.h>
751da177e4SLinus Torvalds #include <linux/pid.h>
761da177e4SLinus Torvalds #include <linux/percpu.h>
771da177e4SLinus Torvalds #include <linux/topology.h>
783e26c149SPeter Zijlstra #include <linux/proportions.h>
791da177e4SLinus Torvalds #include <linux/seccomp.h>
80e56d0903SIngo Molnar #include <linux/rcupdate.h>
8105725f7eSJiri Pirko #include <linux/rculist.h>
8223f78d4aSIngo Molnar #include <linux/rtmutex.h>
831da177e4SLinus Torvalds 
84a3b6714eSDavid Woodhouse #include <linux/time.h>
85a3b6714eSDavid Woodhouse #include <linux/param.h>
86a3b6714eSDavid Woodhouse #include <linux/resource.h>
87a3b6714eSDavid Woodhouse #include <linux/timer.h>
88a3b6714eSDavid Woodhouse #include <linux/hrtimer.h>
897c3ab738SAndrew Morton #include <linux/task_io_accounting.h>
909745512cSArjan van de Ven #include <linux/latencytop.h>
919e2b2dc4SDavid Howells #include <linux/cred.h>
92fa14ff4aSPeter Zijlstra #include <linux/llist.h>
93a3b6714eSDavid Woodhouse 
94a3b6714eSDavid Woodhouse #include <asm/processor.h>
9536d57ac4SH. J. Lu 
961da177e4SLinus Torvalds struct exec_domain;
97c87e2837SIngo Molnar struct futex_pi_state;
98286100a6SAlexey Dobriyan struct robust_list_head;
99bddd87c7SAkinobu Mita struct bio_list;
1005ad4e53bSAl Viro struct fs_struct;
101cdd6c482SIngo Molnar struct perf_event_context;
10273c10101SJens Axboe struct blk_plug;
1031da177e4SLinus Torvalds 
1041da177e4SLinus Torvalds /*
1051da177e4SLinus Torvalds  * List of flags we want to share for kernel threads,
1061da177e4SLinus Torvalds  * if only because they are not used by them anyway.
1071da177e4SLinus Torvalds  */
1081da177e4SLinus Torvalds #define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
1091da177e4SLinus Torvalds 
1101da177e4SLinus Torvalds /*
1111da177e4SLinus Torvalds  * These are the constant used to fake the fixed-point load-average
1121da177e4SLinus Torvalds  * counting. Some notes:
1131da177e4SLinus Torvalds  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
1141da177e4SLinus Torvalds  *    a load-average precision of 10 bits integer + 11 bits fractional
1151da177e4SLinus Torvalds  *  - if you want to count load-averages more often, you need more
1161da177e4SLinus Torvalds  *    precision, or rounding will get you. With 2-second counting freq,
1171da177e4SLinus Torvalds  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
1181da177e4SLinus Torvalds  *    11 bit fractions.
1191da177e4SLinus Torvalds  */
1201da177e4SLinus Torvalds extern unsigned long avenrun[];		/* Load averages */
1212d02494fSThomas Gleixner extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
1221da177e4SLinus Torvalds 
1231da177e4SLinus Torvalds #define FSHIFT		11		/* nr of bits of precision */
1241da177e4SLinus Torvalds #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
1250c2043abSLinus Torvalds #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
1261da177e4SLinus Torvalds #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
1271da177e4SLinus Torvalds #define EXP_5		2014		/* 1/exp(5sec/5min) */
1281da177e4SLinus Torvalds #define EXP_15		2037		/* 1/exp(5sec/15min) */
1291da177e4SLinus Torvalds 
1301da177e4SLinus Torvalds #define CALC_LOAD(load,exp,n) \
1311da177e4SLinus Torvalds 	load *= exp; \
1321da177e4SLinus Torvalds 	load += n*(FIXED_1-exp); \
1331da177e4SLinus Torvalds 	load >>= FSHIFT;
1341da177e4SLinus Torvalds 
1351da177e4SLinus Torvalds extern unsigned long total_forks;
1361da177e4SLinus Torvalds extern int nr_threads;
1371da177e4SLinus Torvalds DECLARE_PER_CPU(unsigned long, process_counts);
1381da177e4SLinus Torvalds extern int nr_processes(void);
1391da177e4SLinus Torvalds extern unsigned long nr_running(void);
1401da177e4SLinus Torvalds extern unsigned long nr_uninterruptible(void);
1411da177e4SLinus Torvalds extern unsigned long nr_iowait(void);
1428c215bd3SPeter Zijlstra extern unsigned long nr_iowait_cpu(int cpu);
14369d25870SArjan van de Ven extern unsigned long this_cpu_load(void);
14469d25870SArjan van de Ven 
14569d25870SArjan van de Ven 
1460f004f5aSPeter Zijlstra extern void calc_global_load(unsigned long ticks);
1471da177e4SLinus Torvalds 
1487e49fcceSSteven Rostedt extern unsigned long get_parent_ip(unsigned long addr);
1497e49fcceSSteven Rostedt 
15043ae34cbSIngo Molnar struct seq_file;
15143ae34cbSIngo Molnar struct cfs_rq;
1524cf86d77SIngo Molnar struct task_group;
15343ae34cbSIngo Molnar #ifdef CONFIG_SCHED_DEBUG
15443ae34cbSIngo Molnar extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
15543ae34cbSIngo Molnar extern void proc_sched_set_task(struct task_struct *p);
15643ae34cbSIngo Molnar extern void
1575cef9ecaSIngo Molnar print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
15843ae34cbSIngo Molnar #else
15943ae34cbSIngo Molnar static inline void
16043ae34cbSIngo Molnar proc_sched_show_task(struct task_struct *p, struct seq_file *m)
16143ae34cbSIngo Molnar {
16243ae34cbSIngo Molnar }
16343ae34cbSIngo Molnar static inline void proc_sched_set_task(struct task_struct *p)
16443ae34cbSIngo Molnar {
16543ae34cbSIngo Molnar }
16643ae34cbSIngo Molnar static inline void
1675cef9ecaSIngo Molnar print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
16843ae34cbSIngo Molnar {
16943ae34cbSIngo Molnar }
17043ae34cbSIngo Molnar #endif
1711da177e4SLinus Torvalds 
1724a8342d2SLinus Torvalds /*
1734a8342d2SLinus Torvalds  * Task state bitmask. NOTE! These bits are also
1744a8342d2SLinus Torvalds  * encoded in fs/proc/array.c: get_task_state().
1754a8342d2SLinus Torvalds  *
1764a8342d2SLinus Torvalds  * We have two separate sets of flags: task->state
1774a8342d2SLinus Torvalds  * is about runnability, while task->exit_state are
1784a8342d2SLinus Torvalds  * about the task exiting. Confusing, but this way
1794a8342d2SLinus Torvalds  * modifying one set can't modify the other one by
1804a8342d2SLinus Torvalds  * mistake.
1814a8342d2SLinus Torvalds  */
1821da177e4SLinus Torvalds #define TASK_RUNNING		0
1831da177e4SLinus Torvalds #define TASK_INTERRUPTIBLE	1
1841da177e4SLinus Torvalds #define TASK_UNINTERRUPTIBLE	2
185f021a3c2SMatthew Wilcox #define __TASK_STOPPED		4
186f021a3c2SMatthew Wilcox #define __TASK_TRACED		8
1874a8342d2SLinus Torvalds /* in tsk->exit_state */
1884a8342d2SLinus Torvalds #define EXIT_ZOMBIE		16
1894a8342d2SLinus Torvalds #define EXIT_DEAD		32
1904a8342d2SLinus Torvalds /* in tsk->state again */
191af927232SMike Galbraith #define TASK_DEAD		64
192f021a3c2SMatthew Wilcox #define TASK_WAKEKILL		128
193e9c84311SPeter Zijlstra #define TASK_WAKING		256
194e1781538SPeter Zijlstra #define TASK_STATE_MAX		512
195f021a3c2SMatthew Wilcox 
19644d90df6SPeter Zijlstra #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
19773342151SPeter Zijlstra 
198e1781538SPeter Zijlstra extern char ___assert_task_state[1 - 2*!!(
199e1781538SPeter Zijlstra 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
200f021a3c2SMatthew Wilcox 
201f021a3c2SMatthew Wilcox /* Convenience macros for the sake of set_task_state */
202f021a3c2SMatthew Wilcox #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
203f021a3c2SMatthew Wilcox #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
204f021a3c2SMatthew Wilcox #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
2051da177e4SLinus Torvalds 
20692a1f4bcSMatthew Wilcox /* Convenience macros for the sake of wake_up */
20792a1f4bcSMatthew Wilcox #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
208f021a3c2SMatthew Wilcox #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
20992a1f4bcSMatthew Wilcox 
21092a1f4bcSMatthew Wilcox /* get_task_state() */
21192a1f4bcSMatthew Wilcox #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
212f021a3c2SMatthew Wilcox 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
213f021a3c2SMatthew Wilcox 				 __TASK_TRACED)
21492a1f4bcSMatthew Wilcox 
215f021a3c2SMatthew Wilcox #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
216f021a3c2SMatthew Wilcox #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
2178f92054eSDavid Howells #define task_is_dead(task)	((task)->exit_state != 0)
21892a1f4bcSMatthew Wilcox #define task_is_stopped_or_traced(task)	\
219f021a3c2SMatthew Wilcox 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
22092a1f4bcSMatthew Wilcox #define task_contributes_to_load(task)	\
221e3c8ca83SNathan Lynch 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
222376fede8STejun Heo 				 (task->flags & PF_FROZEN) == 0)
2231da177e4SLinus Torvalds 
2241da177e4SLinus Torvalds #define __set_task_state(tsk, state_value)		\
2251da177e4SLinus Torvalds 	do { (tsk)->state = (state_value); } while (0)
2261da177e4SLinus Torvalds #define set_task_state(tsk, state_value)		\
2271da177e4SLinus Torvalds 	set_mb((tsk)->state, (state_value))
2281da177e4SLinus Torvalds 
229498d0c57SAndrew Morton /*
230498d0c57SAndrew Morton  * set_current_state() includes a barrier so that the write of current->state
231498d0c57SAndrew Morton  * is correctly serialised wrt the caller's subsequent test of whether to
232498d0c57SAndrew Morton  * actually sleep:
233498d0c57SAndrew Morton  *
234498d0c57SAndrew Morton  *	set_current_state(TASK_UNINTERRUPTIBLE);
235498d0c57SAndrew Morton  *	if (do_i_need_to_sleep())
236498d0c57SAndrew Morton  *		schedule();
237498d0c57SAndrew Morton  *
238498d0c57SAndrew Morton  * If the caller does not need such serialisation then use __set_current_state()
239498d0c57SAndrew Morton  */
2401da177e4SLinus Torvalds #define __set_current_state(state_value)			\
2411da177e4SLinus Torvalds 	do { current->state = (state_value); } while (0)
2421da177e4SLinus Torvalds #define set_current_state(state_value)		\
2431da177e4SLinus Torvalds 	set_mb(current->state, (state_value))
2441da177e4SLinus Torvalds 
2451da177e4SLinus Torvalds /* Task command name length */
2461da177e4SLinus Torvalds #define TASK_COMM_LEN 16
2471da177e4SLinus Torvalds 
2481da177e4SLinus Torvalds #include <linux/spinlock.h>
2491da177e4SLinus Torvalds 
2501da177e4SLinus Torvalds /*
2511da177e4SLinus Torvalds  * This serializes "schedule()" and also protects
2521da177e4SLinus Torvalds  * the run-queue from deletions/modifications (but
2531da177e4SLinus Torvalds  * _adding_ to the beginning of the run-queue has
2541da177e4SLinus Torvalds  * a separate lock).
2551da177e4SLinus Torvalds  */
2561da177e4SLinus Torvalds extern rwlock_t tasklist_lock;
2571da177e4SLinus Torvalds extern spinlock_t mmlist_lock;
2581da177e4SLinus Torvalds 
25936c8b586SIngo Molnar struct task_struct;
2601da177e4SLinus Torvalds 
261db1466b3SPaul E. McKenney #ifdef CONFIG_PROVE_RCU
262db1466b3SPaul E. McKenney extern int lockdep_tasklist_lock_is_held(void);
263db1466b3SPaul E. McKenney #endif /* #ifdef CONFIG_PROVE_RCU */
264db1466b3SPaul E. McKenney 
2651da177e4SLinus Torvalds extern void sched_init(void);
2661da177e4SLinus Torvalds extern void sched_init_smp(void);
2672d07b255SHarvey Harrison extern asmlinkage void schedule_tail(struct task_struct *prev);
26836c8b586SIngo Molnar extern void init_idle(struct task_struct *idle, int cpu);
2691df21055SIngo Molnar extern void init_idle_bootup_task(struct task_struct *idle);
2701da177e4SLinus Torvalds 
27189f19f04SAndrew Morton extern int runqueue_is_locked(int cpu);
272017730c1SIngo Molnar 
27346cb4b7cSSiddha, Suresh B #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
27483cd4fe2SVenkatesh Pallipadi extern void select_nohz_load_balancer(int stop_tick);
27569e1e811SSuresh Siddha extern void set_cpu_sd_state_idle(void);
27683cd4fe2SVenkatesh Pallipadi extern int get_nohz_timer_target(void);
27746cb4b7cSSiddha, Suresh B #else
27883cd4fe2SVenkatesh Pallipadi static inline void select_nohz_load_balancer(int stop_tick) { }
279fdaabd80SPeter Zijlstra static inline void set_cpu_sd_state_idle(void) { }
28046cb4b7cSSiddha, Suresh B #endif
2811da177e4SLinus Torvalds 
282e59e2ae2SIngo Molnar /*
28339bc89fdSIngo Molnar  * Only dump TASK_* tasks. (0 for all tasks)
284e59e2ae2SIngo Molnar  */
285e59e2ae2SIngo Molnar extern void show_state_filter(unsigned long state_filter);
286e59e2ae2SIngo Molnar 
287e59e2ae2SIngo Molnar static inline void show_state(void)
288e59e2ae2SIngo Molnar {
28939bc89fdSIngo Molnar 	show_state_filter(0);
290e59e2ae2SIngo Molnar }
291e59e2ae2SIngo Molnar 
2921da177e4SLinus Torvalds extern void show_regs(struct pt_regs *);
2931da177e4SLinus Torvalds 
2941da177e4SLinus Torvalds /*
2951da177e4SLinus Torvalds  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
2961da177e4SLinus Torvalds  * task), SP is the stack pointer of the first frame that should be shown in the back
2971da177e4SLinus Torvalds  * trace (or NULL if the entire call-chain of the task should be shown).
2981da177e4SLinus Torvalds  */
2991da177e4SLinus Torvalds extern void show_stack(struct task_struct *task, unsigned long *sp);
3001da177e4SLinus Torvalds 
3011da177e4SLinus Torvalds void io_schedule(void);
3021da177e4SLinus Torvalds long io_schedule_timeout(long timeout);
3031da177e4SLinus Torvalds 
3041da177e4SLinus Torvalds extern void cpu_init (void);
3051da177e4SLinus Torvalds extern void trap_init(void);
3061da177e4SLinus Torvalds extern void update_process_times(int user);
3071da177e4SLinus Torvalds extern void scheduler_tick(void);
3081da177e4SLinus Torvalds 
30982a1fcb9SIngo Molnar extern void sched_show_task(struct task_struct *p);
31082a1fcb9SIngo Molnar 
31119cc36c0SFrederic Weisbecker #ifdef CONFIG_LOCKUP_DETECTOR
3128446f1d3SIngo Molnar extern void touch_softlockup_watchdog(void);
313d6ad3e28SJason Wessel extern void touch_softlockup_watchdog_sync(void);
31404c9167fSJeremy Fitzhardinge extern void touch_all_softlockup_watchdogs(void);
315332fbdbcSDon Zickus extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
3168d65af78SAlexey Dobriyan 				  void __user *buffer,
317baf48f65SMandeep Singh Baines 				  size_t *lenp, loff_t *ppos);
3189c44bc03SIngo Molnar extern unsigned int  softlockup_panic;
319004417a6SPeter Zijlstra void lockup_detector_init(void);
3208446f1d3SIngo Molnar #else
3218446f1d3SIngo Molnar static inline void touch_softlockup_watchdog(void)
3228446f1d3SIngo Molnar {
3238446f1d3SIngo Molnar }
324d6ad3e28SJason Wessel static inline void touch_softlockup_watchdog_sync(void)
325d6ad3e28SJason Wessel {
326d6ad3e28SJason Wessel }
32704c9167fSJeremy Fitzhardinge static inline void touch_all_softlockup_watchdogs(void)
32804c9167fSJeremy Fitzhardinge {
32904c9167fSJeremy Fitzhardinge }
330004417a6SPeter Zijlstra static inline void lockup_detector_init(void)
331004417a6SPeter Zijlstra {
332004417a6SPeter Zijlstra }
3338446f1d3SIngo Molnar #endif
3348446f1d3SIngo Molnar 
335e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK
336e162b39aSMandeep Singh Baines extern unsigned int  sysctl_hung_task_panic;
337e162b39aSMandeep Singh Baines extern unsigned long sysctl_hung_task_check_count;
338e162b39aSMandeep Singh Baines extern unsigned long sysctl_hung_task_timeout_secs;
339e162b39aSMandeep Singh Baines extern unsigned long sysctl_hung_task_warnings;
340e162b39aSMandeep Singh Baines extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
3418d65af78SAlexey Dobriyan 					 void __user *buffer,
342e162b39aSMandeep Singh Baines 					 size_t *lenp, loff_t *ppos);
343e4ecda1bSMark Lord #else
344e4ecda1bSMark Lord /* Avoid need for ifdefs elsewhere in the code */
345e4ecda1bSMark Lord enum { sysctl_hung_task_timeout_secs = 0 };
346e162b39aSMandeep Singh Baines #endif
3478446f1d3SIngo Molnar 
3481da177e4SLinus Torvalds /* Attach to any functions which should be ignored in wchan output. */
3491da177e4SLinus Torvalds #define __sched		__attribute__((__section__(".sched.text")))
350deaf2227SIngo Molnar 
351deaf2227SIngo Molnar /* Linker adds these: start and end of __sched functions */
352deaf2227SIngo Molnar extern char __sched_text_start[], __sched_text_end[];
353deaf2227SIngo Molnar 
3541da177e4SLinus Torvalds /* Is this address in the __sched functions? */
3551da177e4SLinus Torvalds extern int in_sched_functions(unsigned long addr);
3561da177e4SLinus Torvalds 
3571da177e4SLinus Torvalds #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
358b3c97528SHarvey Harrison extern signed long schedule_timeout(signed long timeout);
35964ed93a2SNishanth Aravamudan extern signed long schedule_timeout_interruptible(signed long timeout);
360294d5cc2SMatthew Wilcox extern signed long schedule_timeout_killable(signed long timeout);
36164ed93a2SNishanth Aravamudan extern signed long schedule_timeout_uninterruptible(signed long timeout);
3621da177e4SLinus Torvalds asmlinkage void schedule(void);
363c5491ea7SThomas Gleixner extern void schedule_preempt_disabled(void);
364c6eb3ddaSPeter Zijlstra extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
3651da177e4SLinus Torvalds 
366ab516013SSerge E. Hallyn struct nsproxy;
367acce292cSCedric Le Goater struct user_namespace;
3681da177e4SLinus Torvalds 
369341c87bfSKAMEZAWA Hiroyuki /*
370341c87bfSKAMEZAWA Hiroyuki  * Default maximum number of active map areas, this limits the number of vmas
371341c87bfSKAMEZAWA Hiroyuki  * per mm struct. Users can overwrite this number by sysctl but there is a
372341c87bfSKAMEZAWA Hiroyuki  * problem.
373341c87bfSKAMEZAWA Hiroyuki  *
374341c87bfSKAMEZAWA Hiroyuki  * When a program's coredump is generated as ELF format, a section is created
375341c87bfSKAMEZAWA Hiroyuki  * per a vma. In ELF, the number of sections is represented in unsigned short.
376341c87bfSKAMEZAWA Hiroyuki  * This means the number of sections should be smaller than 65535 at coredump.
377341c87bfSKAMEZAWA Hiroyuki  * Because the kernel adds some informative sections to a image of program at
378341c87bfSKAMEZAWA Hiroyuki  * generating coredump, we need some margin. The number of extra sections is
379341c87bfSKAMEZAWA Hiroyuki  * 1-3 now and depends on arch. We use "5" as safe margin, here.
380341c87bfSKAMEZAWA Hiroyuki  */
381341c87bfSKAMEZAWA Hiroyuki #define MAPCOUNT_ELF_CORE_MARGIN	(5)
3824be929beSAlexey Dobriyan #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
3831da177e4SLinus Torvalds 
3841da177e4SLinus Torvalds extern int sysctl_max_map_count;
3851da177e4SLinus Torvalds 
3861da177e4SLinus Torvalds #include <linux/aio.h>
3871da177e4SLinus Torvalds 
388efc1a3b1SDavid Howells #ifdef CONFIG_MMU
389efc1a3b1SDavid Howells extern void arch_pick_mmap_layout(struct mm_struct *mm);
3901da177e4SLinus Torvalds extern unsigned long
3911da177e4SLinus Torvalds arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
3921da177e4SLinus Torvalds 		       unsigned long, unsigned long);
3931da177e4SLinus Torvalds extern unsigned long
3941da177e4SLinus Torvalds arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
3951da177e4SLinus Torvalds 			  unsigned long len, unsigned long pgoff,
3961da177e4SLinus Torvalds 			  unsigned long flags);
3971363c3cdSWolfgang Wander extern void arch_unmap_area(struct mm_struct *, unsigned long);
3981363c3cdSWolfgang Wander extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
399efc1a3b1SDavid Howells #else
400efc1a3b1SDavid Howells static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
401efc1a3b1SDavid Howells #endif
4021da177e4SLinus Torvalds 
403901608d9SOleg Nesterov 
4046c5d5238SKawai, Hidehiro extern void set_dumpable(struct mm_struct *mm, int value);
4056c5d5238SKawai, Hidehiro extern int get_dumpable(struct mm_struct *mm);
4066c5d5238SKawai, Hidehiro 
4076c5d5238SKawai, Hidehiro /* mm flags */
4083cb4a0bbSKawai, Hidehiro /* dumpable bits */
4096c5d5238SKawai, Hidehiro #define MMF_DUMPABLE      0  /* core dump is permitted */
4106c5d5238SKawai, Hidehiro #define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
411f8af4da3SHugh Dickins 
4123cb4a0bbSKawai, Hidehiro #define MMF_DUMPABLE_BITS 2
413f8af4da3SHugh Dickins #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
4143cb4a0bbSKawai, Hidehiro 
4153cb4a0bbSKawai, Hidehiro /* coredump filter bits */
4163cb4a0bbSKawai, Hidehiro #define MMF_DUMP_ANON_PRIVATE	2
4173cb4a0bbSKawai, Hidehiro #define MMF_DUMP_ANON_SHARED	3
4183cb4a0bbSKawai, Hidehiro #define MMF_DUMP_MAPPED_PRIVATE	4
4193cb4a0bbSKawai, Hidehiro #define MMF_DUMP_MAPPED_SHARED	5
42082df3973SRoland McGrath #define MMF_DUMP_ELF_HEADERS	6
421e575f111SKOSAKI Motohiro #define MMF_DUMP_HUGETLB_PRIVATE 7
422e575f111SKOSAKI Motohiro #define MMF_DUMP_HUGETLB_SHARED  8
423f8af4da3SHugh Dickins 
4243cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
425e575f111SKOSAKI Motohiro #define MMF_DUMP_FILTER_BITS	7
4263cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_MASK \
4273cb4a0bbSKawai, Hidehiro 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
4283cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_DEFAULT \
429e575f111SKOSAKI Motohiro 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
430656eb2cdSRoland McGrath 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
431656eb2cdSRoland McGrath 
432656eb2cdSRoland McGrath #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
433656eb2cdSRoland McGrath # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
434656eb2cdSRoland McGrath #else
435656eb2cdSRoland McGrath # define MMF_DUMP_MASK_DEFAULT_ELF	0
436656eb2cdSRoland McGrath #endif
437f8af4da3SHugh Dickins 					/* leave room for more dump flags */
438f8af4da3SHugh Dickins #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
439ba76149fSAndrea Arcangeli #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
440f8af4da3SHugh Dickins 
441f8af4da3SHugh Dickins #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
4426c5d5238SKawai, Hidehiro 
4431da177e4SLinus Torvalds struct sighand_struct {
4441da177e4SLinus Torvalds 	atomic_t		count;
4451da177e4SLinus Torvalds 	struct k_sigaction	action[_NSIG];
4461da177e4SLinus Torvalds 	spinlock_t		siglock;
447b8fceee1SDavide Libenzi 	wait_queue_head_t	signalfd_wqh;
4481da177e4SLinus Torvalds };
4491da177e4SLinus Torvalds 
4500e464814SKaiGai Kohei struct pacct_struct {
451f6ec29a4SKaiGai Kohei 	int			ac_flag;
452f6ec29a4SKaiGai Kohei 	long			ac_exitcode;
4530e464814SKaiGai Kohei 	unsigned long		ac_mem;
45477787bfbSKaiGai Kohei 	cputime_t		ac_utime, ac_stime;
45577787bfbSKaiGai Kohei 	unsigned long		ac_minflt, ac_majflt;
4560e464814SKaiGai Kohei };
4570e464814SKaiGai Kohei 
45842c4ab41SStanislaw Gruszka struct cpu_itimer {
45942c4ab41SStanislaw Gruszka 	cputime_t expires;
46042c4ab41SStanislaw Gruszka 	cputime_t incr;
4618356b5f9SStanislaw Gruszka 	u32 error;
4628356b5f9SStanislaw Gruszka 	u32 incr_error;
46342c4ab41SStanislaw Gruszka };
46442c4ab41SStanislaw Gruszka 
465f06febc9SFrank Mayhar /**
466f06febc9SFrank Mayhar  * struct task_cputime - collected CPU time counts
467f06febc9SFrank Mayhar  * @utime:		time spent in user mode, in &cputime_t units
468f06febc9SFrank Mayhar  * @stime:		time spent in kernel mode, in &cputime_t units
469f06febc9SFrank Mayhar  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
470f06febc9SFrank Mayhar  *
471f06febc9SFrank Mayhar  * This structure groups together three kinds of CPU time that are
472f06febc9SFrank Mayhar  * tracked for threads and thread groups.  Most things considering
473f06febc9SFrank Mayhar  * CPU time want to group these counts together and treat all three
474f06febc9SFrank Mayhar  * of them in parallel.
475f06febc9SFrank Mayhar  */
476f06febc9SFrank Mayhar struct task_cputime {
477f06febc9SFrank Mayhar 	cputime_t utime;
478f06febc9SFrank Mayhar 	cputime_t stime;
479f06febc9SFrank Mayhar 	unsigned long long sum_exec_runtime;
480f06febc9SFrank Mayhar };
481f06febc9SFrank Mayhar /* Alternate field names when used to cache expirations. */
482f06febc9SFrank Mayhar #define prof_exp	stime
483f06febc9SFrank Mayhar #define virt_exp	utime
484f06febc9SFrank Mayhar #define sched_exp	sum_exec_runtime
485f06febc9SFrank Mayhar 
4864cd4c1b4SPeter Zijlstra #define INIT_CPUTIME	\
4874cd4c1b4SPeter Zijlstra 	(struct task_cputime) {					\
48864861634SMartin Schwidefsky 		.utime = 0,					\
48964861634SMartin Schwidefsky 		.stime = 0,					\
4904cd4c1b4SPeter Zijlstra 		.sum_exec_runtime = 0,				\
4914cd4c1b4SPeter Zijlstra 	}
4924cd4c1b4SPeter Zijlstra 
493c99e6efeSPeter Zijlstra /*
494c99e6efeSPeter Zijlstra  * Disable preemption until the scheduler is running.
495c99e6efeSPeter Zijlstra  * Reset by start_kernel()->sched_init()->init_idle().
496d86ee480SPeter Zijlstra  *
497d86ee480SPeter Zijlstra  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
498d86ee480SPeter Zijlstra  * before the scheduler is active -- see should_resched().
499c99e6efeSPeter Zijlstra  */
500d86ee480SPeter Zijlstra #define INIT_PREEMPT_COUNT	(1 + PREEMPT_ACTIVE)
501c99e6efeSPeter Zijlstra 
502f06febc9SFrank Mayhar /**
5034cd4c1b4SPeter Zijlstra  * struct thread_group_cputimer - thread group interval timer counts
5044cd4c1b4SPeter Zijlstra  * @cputime:		thread group interval timers.
5054cd4c1b4SPeter Zijlstra  * @running:		non-zero when there are timers running and
5064cd4c1b4SPeter Zijlstra  * 			@cputime receives updates.
5074cd4c1b4SPeter Zijlstra  * @lock:		lock for fields in this struct.
508f06febc9SFrank Mayhar  *
509f06febc9SFrank Mayhar  * This structure contains the version of task_cputime, above, that is
5104cd4c1b4SPeter Zijlstra  * used for thread group CPU timer calculations.
511f06febc9SFrank Mayhar  */
5124cd4c1b4SPeter Zijlstra struct thread_group_cputimer {
5134cd4c1b4SPeter Zijlstra 	struct task_cputime cputime;
5144cd4c1b4SPeter Zijlstra 	int running;
515ee30a7b2SThomas Gleixner 	raw_spinlock_t lock;
516f06febc9SFrank Mayhar };
517f06febc9SFrank Mayhar 
5184714d1d3SBen Blum #include <linux/rwsem.h>
5195091faa4SMike Galbraith struct autogroup;
5205091faa4SMike Galbraith 
5211da177e4SLinus Torvalds /*
522e815f0a8SJonathan Neuschäfer  * NOTE! "signal_struct" does not have its own
5231da177e4SLinus Torvalds  * locking, because a shared signal_struct always
5241da177e4SLinus Torvalds  * implies a shared sighand_struct, so locking
5251da177e4SLinus Torvalds  * sighand_struct is always a proper superset of
5261da177e4SLinus Torvalds  * the locking of signal_struct.
5271da177e4SLinus Torvalds  */
5281da177e4SLinus Torvalds struct signal_struct {
529ea6d290cSOleg Nesterov 	atomic_t		sigcnt;
5301da177e4SLinus Torvalds 	atomic_t		live;
531b3ac022cSOleg Nesterov 	int			nr_threads;
5321da177e4SLinus Torvalds 
5331da177e4SLinus Torvalds 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
5341da177e4SLinus Torvalds 
5351da177e4SLinus Torvalds 	/* current thread group signal load-balancing target: */
53636c8b586SIngo Molnar 	struct task_struct	*curr_target;
5371da177e4SLinus Torvalds 
5381da177e4SLinus Torvalds 	/* shared signal handling: */
5391da177e4SLinus Torvalds 	struct sigpending	shared_pending;
5401da177e4SLinus Torvalds 
5411da177e4SLinus Torvalds 	/* thread group exit support */
5421da177e4SLinus Torvalds 	int			group_exit_code;
5431da177e4SLinus Torvalds 	/* overloaded:
5441da177e4SLinus Torvalds 	 * - notify group_exit_task when ->count is equal to notify_count
5451da177e4SLinus Torvalds 	 * - everyone except group_exit_task is stopped during signal delivery
5461da177e4SLinus Torvalds 	 *   of fatal signals, group_exit_task processes the signal.
5471da177e4SLinus Torvalds 	 */
5481da177e4SLinus Torvalds 	int			notify_count;
54907dd20e0SRichard Kennedy 	struct task_struct	*group_exit_task;
5501da177e4SLinus Torvalds 
5511da177e4SLinus Torvalds 	/* thread group stop support, overloads group_exit_code too */
5521da177e4SLinus Torvalds 	int			group_stop_count;
5531da177e4SLinus Torvalds 	unsigned int		flags; /* see SIGNAL_* flags below */
5541da177e4SLinus Torvalds 
555ebec18a6SLennart Poettering 	/*
556ebec18a6SLennart Poettering 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
557ebec18a6SLennart Poettering 	 * manager, to re-parent orphan (double-forking) child processes
558ebec18a6SLennart Poettering 	 * to this process instead of 'init'. The service manager is
559ebec18a6SLennart Poettering 	 * able to receive SIGCHLD signals and is able to investigate
560ebec18a6SLennart Poettering 	 * the process until it calls wait(). All children of this
561ebec18a6SLennart Poettering 	 * process will inherit a flag if they should look for a
562ebec18a6SLennart Poettering 	 * child_subreaper process at exit.
563ebec18a6SLennart Poettering 	 */
564ebec18a6SLennart Poettering 	unsigned int		is_child_subreaper:1;
565ebec18a6SLennart Poettering 	unsigned int		has_child_subreaper:1;
566ebec18a6SLennart Poettering 
5671da177e4SLinus Torvalds 	/* POSIX.1b Interval Timers */
5681da177e4SLinus Torvalds 	struct list_head posix_timers;
5691da177e4SLinus Torvalds 
5701da177e4SLinus Torvalds 	/* ITIMER_REAL timer for the process */
5712ff678b8SThomas Gleixner 	struct hrtimer real_timer;
572fea9d175SOleg Nesterov 	struct pid *leader_pid;
5732ff678b8SThomas Gleixner 	ktime_t it_real_incr;
5741da177e4SLinus Torvalds 
57542c4ab41SStanislaw Gruszka 	/*
57642c4ab41SStanislaw Gruszka 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
57742c4ab41SStanislaw Gruszka 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
57842c4ab41SStanislaw Gruszka 	 * values are defined to 0 and 1 respectively
57942c4ab41SStanislaw Gruszka 	 */
58042c4ab41SStanislaw Gruszka 	struct cpu_itimer it[2];
5811da177e4SLinus Torvalds 
582f06febc9SFrank Mayhar 	/*
5834cd4c1b4SPeter Zijlstra 	 * Thread group totals for process CPU timers.
5844cd4c1b4SPeter Zijlstra 	 * See thread_group_cputimer(), et al, for details.
585f06febc9SFrank Mayhar 	 */
5864cd4c1b4SPeter Zijlstra 	struct thread_group_cputimer cputimer;
587f06febc9SFrank Mayhar 
588f06febc9SFrank Mayhar 	/* Earliest-expiration cache. */
589f06febc9SFrank Mayhar 	struct task_cputime cputime_expires;
590f06febc9SFrank Mayhar 
591f06febc9SFrank Mayhar 	struct list_head cpu_timers[3];
592f06febc9SFrank Mayhar 
593ab521dc0SEric W. Biederman 	struct pid *tty_old_pgrp;
5941ec320afSCedric Le Goater 
5951da177e4SLinus Torvalds 	/* boolean value for session group leader */
5961da177e4SLinus Torvalds 	int leader;
5971da177e4SLinus Torvalds 
5981da177e4SLinus Torvalds 	struct tty_struct *tty; /* NULL if no tty */
5991da177e4SLinus Torvalds 
6005091faa4SMike Galbraith #ifdef CONFIG_SCHED_AUTOGROUP
6015091faa4SMike Galbraith 	struct autogroup *autogroup;
6025091faa4SMike Galbraith #endif
6031da177e4SLinus Torvalds 	/*
6041da177e4SLinus Torvalds 	 * Cumulative resource counters for dead threads in the group,
6051da177e4SLinus Torvalds 	 * and for reaped dead child processes forked by this group.
6061da177e4SLinus Torvalds 	 * Live threads maintain their own counters and add to these
6071da177e4SLinus Torvalds 	 * in __exit_signal, except for the group leader.
6081da177e4SLinus Torvalds 	 */
60932bd671dSPeter Zijlstra 	cputime_t utime, stime, cutime, cstime;
6109ac52315SLaurent Vivier 	cputime_t gtime;
6119ac52315SLaurent Vivier 	cputime_t cgtime;
6120cf55e1eSHidetoshi Seto #ifndef CONFIG_VIRT_CPU_ACCOUNTING
6130cf55e1eSHidetoshi Seto 	cputime_t prev_utime, prev_stime;
6140cf55e1eSHidetoshi Seto #endif
6151da177e4SLinus Torvalds 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
6161da177e4SLinus Torvalds 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
6176eaeeabaSEric Dumazet 	unsigned long inblock, oublock, cinblock, coublock;
6181f10206cSJiri Pirko 	unsigned long maxrss, cmaxrss;
619940389b8SAndrea Righi 	struct task_io_accounting ioac;
6201da177e4SLinus Torvalds 
6211da177e4SLinus Torvalds 	/*
62232bd671dSPeter Zijlstra 	 * Cumulative ns of schedule CPU time fo dead threads in the
62332bd671dSPeter Zijlstra 	 * group, not including a zombie group leader, (This only differs
62432bd671dSPeter Zijlstra 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
62532bd671dSPeter Zijlstra 	 * other than jiffies.)
62632bd671dSPeter Zijlstra 	 */
62732bd671dSPeter Zijlstra 	unsigned long long sum_sched_runtime;
62832bd671dSPeter Zijlstra 
62932bd671dSPeter Zijlstra 	/*
6301da177e4SLinus Torvalds 	 * We don't bother to synchronize most readers of this at all,
6311da177e4SLinus Torvalds 	 * because there is no reader checking a limit that actually needs
6321da177e4SLinus Torvalds 	 * to get both rlim_cur and rlim_max atomically, and either one
6331da177e4SLinus Torvalds 	 * alone is a single word that can safely be read normally.
6341da177e4SLinus Torvalds 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
6351da177e4SLinus Torvalds 	 * protect this instead of the siglock, because they really
6361da177e4SLinus Torvalds 	 * have no need to disable irqs.
6371da177e4SLinus Torvalds 	 */
6381da177e4SLinus Torvalds 	struct rlimit rlim[RLIM_NLIMITS];
6391da177e4SLinus Torvalds 
6400e464814SKaiGai Kohei #ifdef CONFIG_BSD_PROCESS_ACCT
6410e464814SKaiGai Kohei 	struct pacct_struct pacct;	/* per-process accounting information */
6420e464814SKaiGai Kohei #endif
643ad4ecbcbSShailabh Nagar #ifdef CONFIG_TASKSTATS
644ad4ecbcbSShailabh Nagar 	struct taskstats *stats;
645ad4ecbcbSShailabh Nagar #endif
646522ed776SMiloslav Trmac #ifdef CONFIG_AUDIT
647522ed776SMiloslav Trmac 	unsigned audit_tty;
648522ed776SMiloslav Trmac 	struct tty_audit_buf *tty_audit_buf;
649522ed776SMiloslav Trmac #endif
6504714d1d3SBen Blum #ifdef CONFIG_CGROUPS
6514714d1d3SBen Blum 	/*
65277e4ef99STejun Heo 	 * group_rwsem prevents new tasks from entering the threadgroup and
65377e4ef99STejun Heo 	 * member tasks from exiting,a more specifically, setting of
65477e4ef99STejun Heo 	 * PF_EXITING.  fork and exit paths are protected with this rwsem
65577e4ef99STejun Heo 	 * using threadgroup_change_begin/end().  Users which require
65677e4ef99STejun Heo 	 * threadgroup to remain stable should use threadgroup_[un]lock()
65777e4ef99STejun Heo 	 * which also takes care of exec path.  Currently, cgroup is the
65877e4ef99STejun Heo 	 * only user.
6594714d1d3SBen Blum 	 */
660257058aeSTejun Heo 	struct rw_semaphore group_rwsem;
6614714d1d3SBen Blum #endif
66228b83c51SKOSAKI Motohiro 
66328b83c51SKOSAKI Motohiro 	int oom_adj;		/* OOM kill score adjustment (bit shift) */
664a63d83f4SDavid Rientjes 	int oom_score_adj;	/* OOM kill score adjustment */
665dabb16f6SMandeep Singh Baines 	int oom_score_adj_min;	/* OOM kill score adjustment minimum value.
666dabb16f6SMandeep Singh Baines 				 * Only settable by CAP_SYS_RESOURCE. */
6679b1bf12dSKOSAKI Motohiro 
6689b1bf12dSKOSAKI Motohiro 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
6699b1bf12dSKOSAKI Motohiro 					 * credential calculations
6709b1bf12dSKOSAKI Motohiro 					 * (notably. ptrace) */
6711da177e4SLinus Torvalds };
6721da177e4SLinus Torvalds 
6734866cde0SNick Piggin /* Context switch must be unlocked if interrupts are to be enabled */
6744866cde0SNick Piggin #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
6754866cde0SNick Piggin # define __ARCH_WANT_UNLOCKED_CTXSW
6764866cde0SNick Piggin #endif
6774866cde0SNick Piggin 
6781da177e4SLinus Torvalds /*
6791da177e4SLinus Torvalds  * Bits in flags field of signal_struct.
6801da177e4SLinus Torvalds  */
6811da177e4SLinus Torvalds #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
682ee77f075SOleg Nesterov #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
683ee77f075SOleg Nesterov #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
684e4420551SOleg Nesterov /*
685e4420551SOleg Nesterov  * Pending notifications to parent.
686e4420551SOleg Nesterov  */
687e4420551SOleg Nesterov #define SIGNAL_CLD_STOPPED	0x00000010
688e4420551SOleg Nesterov #define SIGNAL_CLD_CONTINUED	0x00000020
689e4420551SOleg Nesterov #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
6901da177e4SLinus Torvalds 
691fae5fa44SOleg Nesterov #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
692fae5fa44SOleg Nesterov 
693ed5d2cacSOleg Nesterov /* If true, all threads except ->group_exit_task have pending SIGKILL */
694ed5d2cacSOleg Nesterov static inline int signal_group_exit(const struct signal_struct *sig)
695ed5d2cacSOleg Nesterov {
696ed5d2cacSOleg Nesterov 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
697ed5d2cacSOleg Nesterov 		(sig->group_exit_task != NULL);
698ed5d2cacSOleg Nesterov }
699ed5d2cacSOleg Nesterov 
7001da177e4SLinus Torvalds /*
7011da177e4SLinus Torvalds  * Some day this will be a full-fledged user tracking system..
7021da177e4SLinus Torvalds  */
7031da177e4SLinus Torvalds struct user_struct {
7041da177e4SLinus Torvalds 	atomic_t __count;	/* reference count */
7051da177e4SLinus Torvalds 	atomic_t processes;	/* How many processes does this user have? */
7061da177e4SLinus Torvalds 	atomic_t files;		/* How many open files does this user have? */
7071da177e4SLinus Torvalds 	atomic_t sigpending;	/* How many pending signals does this user have? */
7082d9048e2SAmy Griffis #ifdef CONFIG_INOTIFY_USER
7090eeca283SRobert Love 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
7100eeca283SRobert Love 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
7110eeca283SRobert Love #endif
7124afeff85SEric Paris #ifdef CONFIG_FANOTIFY
7134afeff85SEric Paris 	atomic_t fanotify_listeners;
7144afeff85SEric Paris #endif
7157ef9964eSDavide Libenzi #ifdef CONFIG_EPOLL
71652bd19f7SRobin Holt 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
7177ef9964eSDavide Libenzi #endif
718970a8645SAlexey Dobriyan #ifdef CONFIG_POSIX_MQUEUE
7191da177e4SLinus Torvalds 	/* protected by mq_lock	*/
7201da177e4SLinus Torvalds 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
721970a8645SAlexey Dobriyan #endif
7221da177e4SLinus Torvalds 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
7231da177e4SLinus Torvalds 
7241da177e4SLinus Torvalds #ifdef CONFIG_KEYS
7251da177e4SLinus Torvalds 	struct key *uid_keyring;	/* UID specific keyring */
7261da177e4SLinus Torvalds 	struct key *session_keyring;	/* UID's default session keyring */
7271da177e4SLinus Torvalds #endif
7281da177e4SLinus Torvalds 
7291da177e4SLinus Torvalds 	/* Hash table maintenance information */
730735de223SPavel Emelyanov 	struct hlist_node uidhash_node;
7311da177e4SLinus Torvalds 	uid_t uid;
73218b6e041SSerge Hallyn 	struct user_namespace *user_ns;
73324e377a8SSrivatsa Vaddagiri 
734cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
735789f90fcSPeter Zijlstra 	atomic_long_t locked_vm;
736789f90fcSPeter Zijlstra #endif
7371da177e4SLinus Torvalds };
7381da177e4SLinus Torvalds 
739eb41d946SKay Sievers extern int uids_sysfs_init(void);
7405cb350baSDhaval Giani 
7411da177e4SLinus Torvalds extern struct user_struct *find_user(uid_t);
7421da177e4SLinus Torvalds 
7431da177e4SLinus Torvalds extern struct user_struct root_user;
7441da177e4SLinus Torvalds #define INIT_USER (&root_user)
7451da177e4SLinus Torvalds 
746b6dff3ecSDavid Howells 
7471da177e4SLinus Torvalds struct backing_dev_info;
7481da177e4SLinus Torvalds struct reclaim_state;
7491da177e4SLinus Torvalds 
75052f17b6cSChandra Seetharaman #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
7511da177e4SLinus Torvalds struct sched_info {
7521da177e4SLinus Torvalds 	/* cumulative counters */
7532d72376bSIngo Molnar 	unsigned long pcount;	      /* # of times run on this cpu */
7549c2c4802SKen Chen 	unsigned long long run_delay; /* time spent waiting on a runqueue */
7551da177e4SLinus Torvalds 
7561da177e4SLinus Torvalds 	/* timestamps */
757172ba844SBalbir Singh 	unsigned long long last_arrival,/* when we last ran on a cpu */
7581da177e4SLinus Torvalds 			   last_queued;	/* when we were last queued to run */
7591da177e4SLinus Torvalds };
76052f17b6cSChandra Seetharaman #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
7611da177e4SLinus Torvalds 
762ca74e92bSShailabh Nagar #ifdef CONFIG_TASK_DELAY_ACCT
763ca74e92bSShailabh Nagar struct task_delay_info {
764ca74e92bSShailabh Nagar 	spinlock_t	lock;
765ca74e92bSShailabh Nagar 	unsigned int	flags;	/* Private per-task flags */
766ca74e92bSShailabh Nagar 
767ca74e92bSShailabh Nagar 	/* For each stat XXX, add following, aligned appropriately
768ca74e92bSShailabh Nagar 	 *
769ca74e92bSShailabh Nagar 	 * struct timespec XXX_start, XXX_end;
770ca74e92bSShailabh Nagar 	 * u64 XXX_delay;
771ca74e92bSShailabh Nagar 	 * u32 XXX_count;
772ca74e92bSShailabh Nagar 	 *
773ca74e92bSShailabh Nagar 	 * Atomicity of updates to XXX_delay, XXX_count protected by
774ca74e92bSShailabh Nagar 	 * single lock above (split into XXX_lock if contention is an issue).
775ca74e92bSShailabh Nagar 	 */
7760ff92245SShailabh Nagar 
7770ff92245SShailabh Nagar 	/*
7780ff92245SShailabh Nagar 	 * XXX_count is incremented on every XXX operation, the delay
7790ff92245SShailabh Nagar 	 * associated with the operation is added to XXX_delay.
7800ff92245SShailabh Nagar 	 * XXX_delay contains the accumulated delay time in nanoseconds.
7810ff92245SShailabh Nagar 	 */
7820ff92245SShailabh Nagar 	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
7830ff92245SShailabh Nagar 	u64 blkio_delay;	/* wait for sync block io completion */
7840ff92245SShailabh Nagar 	u64 swapin_delay;	/* wait for swapin block io completion */
7850ff92245SShailabh Nagar 	u32 blkio_count;	/* total count of the number of sync block */
7860ff92245SShailabh Nagar 				/* io operations performed */
7870ff92245SShailabh Nagar 	u32 swapin_count;	/* total count of the number of swapin block */
7880ff92245SShailabh Nagar 				/* io operations performed */
789873b4771SKeika Kobayashi 
790873b4771SKeika Kobayashi 	struct timespec freepages_start, freepages_end;
791873b4771SKeika Kobayashi 	u64 freepages_delay;	/* wait for memory reclaim */
792873b4771SKeika Kobayashi 	u32 freepages_count;	/* total count of memory reclaim */
793ca74e92bSShailabh Nagar };
79452f17b6cSChandra Seetharaman #endif	/* CONFIG_TASK_DELAY_ACCT */
79552f17b6cSChandra Seetharaman 
79652f17b6cSChandra Seetharaman static inline int sched_info_on(void)
79752f17b6cSChandra Seetharaman {
79852f17b6cSChandra Seetharaman #ifdef CONFIG_SCHEDSTATS
79952f17b6cSChandra Seetharaman 	return 1;
80052f17b6cSChandra Seetharaman #elif defined(CONFIG_TASK_DELAY_ACCT)
80152f17b6cSChandra Seetharaman 	extern int delayacct_on;
80252f17b6cSChandra Seetharaman 	return delayacct_on;
80352f17b6cSChandra Seetharaman #else
80452f17b6cSChandra Seetharaman 	return 0;
805ca74e92bSShailabh Nagar #endif
80652f17b6cSChandra Seetharaman }
807ca74e92bSShailabh Nagar 
808d15bcfdbSIngo Molnar enum cpu_idle_type {
809d15bcfdbSIngo Molnar 	CPU_IDLE,
810d15bcfdbSIngo Molnar 	CPU_NOT_IDLE,
811d15bcfdbSIngo Molnar 	CPU_NEWLY_IDLE,
812d15bcfdbSIngo Molnar 	CPU_MAX_IDLE_TYPES
8131da177e4SLinus Torvalds };
8141da177e4SLinus Torvalds 
8151da177e4SLinus Torvalds /*
816c8b28116SNikhil Rao  * Increase resolution of nice-level calculations for 64-bit architectures.
817c8b28116SNikhil Rao  * The extra resolution improves shares distribution and load balancing of
818c8b28116SNikhil Rao  * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
819c8b28116SNikhil Rao  * hierarchies, especially on larger systems. This is not a user-visible change
820c8b28116SNikhil Rao  * and does not change the user-interface for setting shares/weights.
821c8b28116SNikhil Rao  *
822c8b28116SNikhil Rao  * We increase resolution only if we have enough bits to allow this increased
823c8b28116SNikhil Rao  * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
824c8b28116SNikhil Rao  * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
825c8b28116SNikhil Rao  * increased costs.
8261da177e4SLinus Torvalds  */
827e4c2fb0dSPeter Zijlstra #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load  */
828c8b28116SNikhil Rao # define SCHED_LOAD_RESOLUTION	10
829c8b28116SNikhil Rao # define scale_load(w)		((w) << SCHED_LOAD_RESOLUTION)
830c8b28116SNikhil Rao # define scale_load_down(w)	((w) >> SCHED_LOAD_RESOLUTION)
831c8b28116SNikhil Rao #else
832c8b28116SNikhil Rao # define SCHED_LOAD_RESOLUTION	0
833c8b28116SNikhil Rao # define scale_load(w)		(w)
834c8b28116SNikhil Rao # define scale_load_down(w)	(w)
835c8b28116SNikhil Rao #endif
8369aa7b369SIngo Molnar 
837c8b28116SNikhil Rao #define SCHED_LOAD_SHIFT	(10 + SCHED_LOAD_RESOLUTION)
8389aa7b369SIngo Molnar #define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
8399aa7b369SIngo Molnar 
8401399fa78SNikhil Rao /*
8411399fa78SNikhil Rao  * Increase resolution of cpu_power calculations
8421399fa78SNikhil Rao  */
8431399fa78SNikhil Rao #define SCHED_POWER_SHIFT	10
8441399fa78SNikhil Rao #define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT)
8451da177e4SLinus Torvalds 
8461399fa78SNikhil Rao /*
8471399fa78SNikhil Rao  * sched-domains (multiprocessor balancing) declarations:
8481399fa78SNikhil Rao  */
8492dd73a4fSPeter Williams #ifdef CONFIG_SMP
850b5d978e0SPeter Zijlstra #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
851b5d978e0SPeter Zijlstra #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
852b5d978e0SPeter Zijlstra #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
853b5d978e0SPeter Zijlstra #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
854c88d5910SPeter Zijlstra #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
855b5d978e0SPeter Zijlstra #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
85659abf026SPeter Zijlstra #define SD_PREFER_LOCAL		0x0040  /* Prefer to keep tasks local to this domain */
857b5d978e0SPeter Zijlstra #define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
858b5d978e0SPeter Zijlstra #define SD_POWERSAVINGS_BALANCE	0x0100	/* Balance for power savings */
859b5d978e0SPeter Zijlstra #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
860b5d978e0SPeter Zijlstra #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
861532cb4c4SMichael Neuling #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
862b5d978e0SPeter Zijlstra #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
863e3589f6cSPeter Zijlstra #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
8645c45bf27SSiddha, Suresh B 
865afb8a9b7SGautham R Shenoy enum powersavings_balance_level {
866afb8a9b7SGautham R Shenoy 	POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */
867afb8a9b7SGautham R Shenoy 	POWERSAVINGS_BALANCE_BASIC,	/* Fill one thread/core/package
868afb8a9b7SGautham R Shenoy 					 * first for long running threads
869afb8a9b7SGautham R Shenoy 					 */
870afb8a9b7SGautham R Shenoy 	POWERSAVINGS_BALANCE_WAKEUP,	/* Also bias task wakeups to semi-idle
871afb8a9b7SGautham R Shenoy 					 * cpu package for power savings
872afb8a9b7SGautham R Shenoy 					 */
873afb8a9b7SGautham R Shenoy 	MAX_POWERSAVINGS_BALANCE_LEVELS
874afb8a9b7SGautham R Shenoy };
87589c4710eSSiddha, Suresh B 
876716707b2SVaidyanathan Srinivasan extern int sched_mc_power_savings, sched_smt_power_savings;
87789c4710eSSiddha, Suresh B 
878716707b2SVaidyanathan Srinivasan static inline int sd_balance_for_mc_power(void)
879716707b2SVaidyanathan Srinivasan {
880716707b2SVaidyanathan Srinivasan 	if (sched_smt_power_savings)
881716707b2SVaidyanathan Srinivasan 		return SD_POWERSAVINGS_BALANCE;
8825c45bf27SSiddha, Suresh B 
88328f53181SVaidyanathan Srinivasan 	if (!sched_mc_power_savings)
884b5d978e0SPeter Zijlstra 		return SD_PREFER_SIBLING;
88528f53181SVaidyanathan Srinivasan 
88628f53181SVaidyanathan Srinivasan 	return 0;
887716707b2SVaidyanathan Srinivasan }
888716707b2SVaidyanathan Srinivasan 
889716707b2SVaidyanathan Srinivasan static inline int sd_balance_for_package_power(void)
890716707b2SVaidyanathan Srinivasan {
891716707b2SVaidyanathan Srinivasan 	if (sched_mc_power_savings | sched_smt_power_savings)
892716707b2SVaidyanathan Srinivasan 		return SD_POWERSAVINGS_BALANCE;
893716707b2SVaidyanathan Srinivasan 
894b5d978e0SPeter Zijlstra 	return SD_PREFER_SIBLING;
895716707b2SVaidyanathan Srinivasan }
896147cbb4bSNick Piggin 
897532cb4c4SMichael Neuling extern int __weak arch_sd_sibiling_asym_packing(void);
898532cb4c4SMichael Neuling 
899100fdaeeSVaidyanathan Srinivasan /*
900100fdaeeSVaidyanathan Srinivasan  * Optimise SD flags for power savings:
90125985edcSLucas De Marchi  * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings.
902100fdaeeSVaidyanathan Srinivasan  * Keep default SD flags if sched_{smt,mc}_power_saving=0
903100fdaeeSVaidyanathan Srinivasan  */
904100fdaeeSVaidyanathan Srinivasan 
905100fdaeeSVaidyanathan Srinivasan static inline int sd_power_saving_flags(void)
906100fdaeeSVaidyanathan Srinivasan {
907100fdaeeSVaidyanathan Srinivasan 	if (sched_mc_power_savings | sched_smt_power_savings)
908100fdaeeSVaidyanathan Srinivasan 		return SD_BALANCE_NEWIDLE;
909100fdaeeSVaidyanathan Srinivasan 
910100fdaeeSVaidyanathan Srinivasan 	return 0;
911100fdaeeSVaidyanathan Srinivasan }
9121da177e4SLinus Torvalds 
9139c3f75cbSPeter Zijlstra struct sched_group_power {
914e3589f6cSPeter Zijlstra 	atomic_t ref;
9151da177e4SLinus Torvalds 	/*
9161da177e4SLinus Torvalds 	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
91718a3885fSPeter Zijlstra 	 * single CPU.
9181da177e4SLinus Torvalds 	 */
9199c3f75cbSPeter Zijlstra 	unsigned int power, power_orig;
9204ec4412eSVincent Guittot 	unsigned long next_update;
92169e1e811SSuresh Siddha 	/*
92269e1e811SSuresh Siddha 	 * Number of busy cpus in this group.
92369e1e811SSuresh Siddha 	 */
92469e1e811SSuresh Siddha 	atomic_t nr_busy_cpus;
9259c3f75cbSPeter Zijlstra };
9269c3f75cbSPeter Zijlstra 
9279c3f75cbSPeter Zijlstra struct sched_group {
9289c3f75cbSPeter Zijlstra 	struct sched_group *next;	/* Must be a circular list */
9299c3f75cbSPeter Zijlstra 	atomic_t ref;
9309c3f75cbSPeter Zijlstra 
931aae6d3ddSSuresh Siddha 	unsigned int group_weight;
9329c3f75cbSPeter Zijlstra 	struct sched_group_power *sgp;
9336c99e9adSRusty Russell 
9344200efd9SIngo Molnar 	/*
9354200efd9SIngo Molnar 	 * The CPUs this group covers.
9364200efd9SIngo Molnar 	 *
9374200efd9SIngo Molnar 	 * NOTE: this field is variable length. (Allocated dynamically
9384200efd9SIngo Molnar 	 * by attaching extra space to the end of the structure,
9394200efd9SIngo Molnar 	 * depending on how many CPUs the kernel has booted up with)
9404200efd9SIngo Molnar 	 */
9414200efd9SIngo Molnar 	unsigned long cpumask[0];
9421da177e4SLinus Torvalds };
9431da177e4SLinus Torvalds 
944758b2cdcSRusty Russell static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
945758b2cdcSRusty Russell {
9466c99e9adSRusty Russell 	return to_cpumask(sg->cpumask);
947758b2cdcSRusty Russell }
948758b2cdcSRusty Russell 
949029632fbSPeter Zijlstra /**
950029632fbSPeter Zijlstra  * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
951029632fbSPeter Zijlstra  * @group: The group whose first cpu is to be returned.
952029632fbSPeter Zijlstra  */
953029632fbSPeter Zijlstra static inline unsigned int group_first_cpu(struct sched_group *group)
954029632fbSPeter Zijlstra {
955029632fbSPeter Zijlstra 	return cpumask_first(sched_group_cpus(group));
956029632fbSPeter Zijlstra }
957029632fbSPeter Zijlstra 
9581d3504fcSHidetoshi Seto struct sched_domain_attr {
9591d3504fcSHidetoshi Seto 	int relax_domain_level;
9601d3504fcSHidetoshi Seto };
9611d3504fcSHidetoshi Seto 
9621d3504fcSHidetoshi Seto #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
9631d3504fcSHidetoshi Seto 	.relax_domain_level = -1,			\
9641d3504fcSHidetoshi Seto }
9651d3504fcSHidetoshi Seto 
96660495e77SPeter Zijlstra extern int sched_domain_level_max;
96760495e77SPeter Zijlstra 
9681da177e4SLinus Torvalds struct sched_domain {
9691da177e4SLinus Torvalds 	/* These fields must be setup */
9701da177e4SLinus Torvalds 	struct sched_domain *parent;	/* top domain must be null terminated */
9711a848870SSiddha, Suresh B 	struct sched_domain *child;	/* bottom domain must be null terminated */
9721da177e4SLinus Torvalds 	struct sched_group *groups;	/* the balancing groups of the domain */
9731da177e4SLinus Torvalds 	unsigned long min_interval;	/* Minimum balance interval ms */
9741da177e4SLinus Torvalds 	unsigned long max_interval;	/* Maximum balance interval ms */
9751da177e4SLinus Torvalds 	unsigned int busy_factor;	/* less balancing by factor if busy */
9761da177e4SLinus Torvalds 	unsigned int imbalance_pct;	/* No balance until over watermark */
9771da177e4SLinus Torvalds 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
9787897986bSNick Piggin 	unsigned int busy_idx;
9797897986bSNick Piggin 	unsigned int idle_idx;
9807897986bSNick Piggin 	unsigned int newidle_idx;
9817897986bSNick Piggin 	unsigned int wake_idx;
982147cbb4bSNick Piggin 	unsigned int forkexec_idx;
983a52bfd73SPeter Zijlstra 	unsigned int smt_gain;
9841da177e4SLinus Torvalds 	int flags;			/* See SD_* */
98560495e77SPeter Zijlstra 	int level;
9861da177e4SLinus Torvalds 
9871da177e4SLinus Torvalds 	/* Runtime fields. */
9881da177e4SLinus Torvalds 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
9891da177e4SLinus Torvalds 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
9901da177e4SLinus Torvalds 	unsigned int nr_balance_failed; /* initialise to 0 */
9911da177e4SLinus Torvalds 
9922398f2c6SPeter Zijlstra 	u64 last_update;
9932398f2c6SPeter Zijlstra 
9941da177e4SLinus Torvalds #ifdef CONFIG_SCHEDSTATS
9951da177e4SLinus Torvalds 	/* load_balance() stats */
996480b9434SKen Chen 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
997480b9434SKen Chen 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
998480b9434SKen Chen 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
999480b9434SKen Chen 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
1000480b9434SKen Chen 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
1001480b9434SKen Chen 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
1002480b9434SKen Chen 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
1003480b9434SKen Chen 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
10041da177e4SLinus Torvalds 
10051da177e4SLinus Torvalds 	/* Active load balancing */
1006480b9434SKen Chen 	unsigned int alb_count;
1007480b9434SKen Chen 	unsigned int alb_failed;
1008480b9434SKen Chen 	unsigned int alb_pushed;
10091da177e4SLinus Torvalds 
101068767a0aSNick Piggin 	/* SD_BALANCE_EXEC stats */
1011480b9434SKen Chen 	unsigned int sbe_count;
1012480b9434SKen Chen 	unsigned int sbe_balanced;
1013480b9434SKen Chen 	unsigned int sbe_pushed;
10141da177e4SLinus Torvalds 
101568767a0aSNick Piggin 	/* SD_BALANCE_FORK stats */
1016480b9434SKen Chen 	unsigned int sbf_count;
1017480b9434SKen Chen 	unsigned int sbf_balanced;
1018480b9434SKen Chen 	unsigned int sbf_pushed;
101968767a0aSNick Piggin 
10201da177e4SLinus Torvalds 	/* try_to_wake_up() stats */
1021480b9434SKen Chen 	unsigned int ttwu_wake_remote;
1022480b9434SKen Chen 	unsigned int ttwu_move_affine;
1023480b9434SKen Chen 	unsigned int ttwu_move_balance;
10241da177e4SLinus Torvalds #endif
1025a5d8c348SIngo Molnar #ifdef CONFIG_SCHED_DEBUG
1026a5d8c348SIngo Molnar 	char *name;
1027a5d8c348SIngo Molnar #endif
1028dce840a0SPeter Zijlstra 	union {
1029dce840a0SPeter Zijlstra 		void *private;		/* used during construction */
1030dce840a0SPeter Zijlstra 		struct rcu_head rcu;	/* used during destruction */
1031dce840a0SPeter Zijlstra 	};
10326c99e9adSRusty Russell 
1033669c55e9SPeter Zijlstra 	unsigned int span_weight;
10344200efd9SIngo Molnar 	/*
10354200efd9SIngo Molnar 	 * Span of all CPUs in this domain.
10364200efd9SIngo Molnar 	 *
10374200efd9SIngo Molnar 	 * NOTE: this field is variable length. (Allocated dynamically
10384200efd9SIngo Molnar 	 * by attaching extra space to the end of the structure,
10394200efd9SIngo Molnar 	 * depending on how many CPUs the kernel has booted up with)
10404200efd9SIngo Molnar 	 */
10414200efd9SIngo Molnar 	unsigned long span[0];
10421da177e4SLinus Torvalds };
10431da177e4SLinus Torvalds 
1044758b2cdcSRusty Russell static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1045758b2cdcSRusty Russell {
10466c99e9adSRusty Russell 	return to_cpumask(sd->span);
1047758b2cdcSRusty Russell }
1048758b2cdcSRusty Russell 
1049acc3f5d7SRusty Russell extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
10501d3504fcSHidetoshi Seto 				    struct sched_domain_attr *dattr_new);
1051029190c5SPaul Jackson 
1052acc3f5d7SRusty Russell /* Allocate an array of sched domains, for partition_sched_domains(). */
1053acc3f5d7SRusty Russell cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1054acc3f5d7SRusty Russell void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1055acc3f5d7SRusty Russell 
105606aaf76aSIngo Molnar /* Test a flag in parent sched domain */
105706aaf76aSIngo Molnar static inline int test_sd_parent(struct sched_domain *sd, int flag)
105806aaf76aSIngo Molnar {
105906aaf76aSIngo Molnar 	if (sd->parent && (sd->parent->flags & flag))
106006aaf76aSIngo Molnar 		return 1;
106106aaf76aSIngo Molnar 
106206aaf76aSIngo Molnar 	return 0;
106306aaf76aSIngo Molnar }
10641da177e4SLinus Torvalds 
106547fe38fcSPeter Zijlstra unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
106647fe38fcSPeter Zijlstra unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
106747fe38fcSPeter Zijlstra 
106839be3501SPeter Zijlstra bool cpus_share_cache(int this_cpu, int that_cpu);
106939be3501SPeter Zijlstra 
10701b427c15SIngo Molnar #else /* CONFIG_SMP */
10711da177e4SLinus Torvalds 
10721b427c15SIngo Molnar struct sched_domain_attr;
10731b427c15SIngo Molnar 
10741b427c15SIngo Molnar static inline void
1075acc3f5d7SRusty Russell partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
10761b427c15SIngo Molnar 			struct sched_domain_attr *dattr_new)
1077d02c7a8cSCon Kolivas {
1078d02c7a8cSCon Kolivas }
107939be3501SPeter Zijlstra 
108039be3501SPeter Zijlstra static inline bool cpus_share_cache(int this_cpu, int that_cpu)
108139be3501SPeter Zijlstra {
108239be3501SPeter Zijlstra 	return true;
108339be3501SPeter Zijlstra }
108439be3501SPeter Zijlstra 
10851b427c15SIngo Molnar #endif	/* !CONFIG_SMP */
10861da177e4SLinus Torvalds 
108747fe38fcSPeter Zijlstra 
10881da177e4SLinus Torvalds struct io_context;			/* See blkdev.h */
10891da177e4SLinus Torvalds 
10901da177e4SLinus Torvalds 
1091383f2835SChen, Kenneth W #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
109236c8b586SIngo Molnar extern void prefetch_stack(struct task_struct *t);
1093383f2835SChen, Kenneth W #else
1094383f2835SChen, Kenneth W static inline void prefetch_stack(struct task_struct *t) { }
1095383f2835SChen, Kenneth W #endif
10961da177e4SLinus Torvalds 
10971da177e4SLinus Torvalds struct audit_context;		/* See audit.c */
10981da177e4SLinus Torvalds struct mempolicy;
1099b92ce558SJens Axboe struct pipe_inode_info;
11004865ecf1SSerge E. Hallyn struct uts_namespace;
11011da177e4SLinus Torvalds 
110220b8a59fSIngo Molnar struct rq;
110320b8a59fSIngo Molnar struct sched_domain;
110420b8a59fSIngo Molnar 
11057d478721SPeter Zijlstra /*
11067d478721SPeter Zijlstra  * wake flags
11077d478721SPeter Zijlstra  */
11087d478721SPeter Zijlstra #define WF_SYNC		0x01		/* waker goes to sleep after wakup */
1109a7558e01SPeter Zijlstra #define WF_FORK		0x02		/* child wakeup after fork */
1110f339b9dcSPeter Zijlstra #define WF_MIGRATED	0x04		/* internal use, task got migrated */
11117d478721SPeter Zijlstra 
1112371fd7e7SPeter Zijlstra #define ENQUEUE_WAKEUP		1
111374f8e4b2SPeter Zijlstra #define ENQUEUE_HEAD		2
111474f8e4b2SPeter Zijlstra #ifdef CONFIG_SMP
111574f8e4b2SPeter Zijlstra #define ENQUEUE_WAKING		4	/* sched_class::task_waking was called */
111674f8e4b2SPeter Zijlstra #else
111774f8e4b2SPeter Zijlstra #define ENQUEUE_WAKING		0
111874f8e4b2SPeter Zijlstra #endif
1119371fd7e7SPeter Zijlstra 
1120371fd7e7SPeter Zijlstra #define DEQUEUE_SLEEP		1
1121371fd7e7SPeter Zijlstra 
112220b8a59fSIngo Molnar struct sched_class {
11235522d5d5SIngo Molnar 	const struct sched_class *next;
112420b8a59fSIngo Molnar 
1125371fd7e7SPeter Zijlstra 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1126371fd7e7SPeter Zijlstra 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
11274530d7abSDmitry Adamushko 	void (*yield_task) (struct rq *rq);
1128d95f4122SMike Galbraith 	bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
112920b8a59fSIngo Molnar 
11307d478721SPeter Zijlstra 	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
113120b8a59fSIngo Molnar 
1132fb8d4724SIngo Molnar 	struct task_struct * (*pick_next_task) (struct rq *rq);
113331ee529cSIngo Molnar 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
113420b8a59fSIngo Molnar 
1135681f3e68SPeter Williams #ifdef CONFIG_SMP
11367608dec2SPeter Zijlstra 	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
11374ce72a2cSLi Zefan 
11389a897c5aSSteven Rostedt 	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
11399a897c5aSSteven Rostedt 	void (*post_schedule) (struct rq *this_rq);
114074f8e4b2SPeter Zijlstra 	void (*task_waking) (struct task_struct *task);
1141efbbd05aSPeter Zijlstra 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1142e1d1484fSPeter Williams 
1143cd8ba7cdSMike Travis 	void (*set_cpus_allowed)(struct task_struct *p,
114496f874e2SRusty Russell 				 const struct cpumask *newmask);
114557d885feSGregory Haskins 
11461f11eb6aSGregory Haskins 	void (*rq_online)(struct rq *rq);
11471f11eb6aSGregory Haskins 	void (*rq_offline)(struct rq *rq);
11484ce72a2cSLi Zefan #endif
11494ce72a2cSLi Zefan 
11504ce72a2cSLi Zefan 	void (*set_curr_task) (struct rq *rq);
11514ce72a2cSLi Zefan 	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1152cd29fe6fSPeter Zijlstra 	void (*task_fork) (struct task_struct *p);
1153cb469845SSteven Rostedt 
1154da7a735eSPeter Zijlstra 	void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1155da7a735eSPeter Zijlstra 	void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1156cb469845SSteven Rostedt 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1157da7a735eSPeter Zijlstra 			     int oldprio);
1158810b3817SPeter Zijlstra 
1159dba091b9SThomas Gleixner 	unsigned int (*get_rr_interval) (struct rq *rq,
1160dba091b9SThomas Gleixner 					 struct task_struct *task);
11610d721ceaSPeter Williams 
1162810b3817SPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
1163b2b5ce02SPeter Zijlstra 	void (*task_move_group) (struct task_struct *p, int on_rq);
1164810b3817SPeter Zijlstra #endif
116520b8a59fSIngo Molnar };
116620b8a59fSIngo Molnar 
116720b8a59fSIngo Molnar struct load_weight {
116820b8a59fSIngo Molnar 	unsigned long weight, inv_weight;
116920b8a59fSIngo Molnar };
117020b8a59fSIngo Molnar 
117194c18227SIngo Molnar #ifdef CONFIG_SCHEDSTATS
117241acab88SLucas De Marchi struct sched_statistics {
117394c18227SIngo Molnar 	u64			wait_start;
117494c18227SIngo Molnar 	u64			wait_max;
11756d082592SArjan van de Ven 	u64			wait_count;
11766d082592SArjan van de Ven 	u64			wait_sum;
11778f0dfc34SArjan van de Ven 	u64			iowait_count;
11788f0dfc34SArjan van de Ven 	u64			iowait_sum;
117994c18227SIngo Molnar 
118094c18227SIngo Molnar 	u64			sleep_start;
118120b8a59fSIngo Molnar 	u64			sleep_max;
118294c18227SIngo Molnar 	s64			sum_sleep_runtime;
118394c18227SIngo Molnar 
118494c18227SIngo Molnar 	u64			block_start;
118520b8a59fSIngo Molnar 	u64			block_max;
118620b8a59fSIngo Molnar 	u64			exec_max;
1187eba1ed4bSIngo Molnar 	u64			slice_max;
1188cc367732SIngo Molnar 
1189cc367732SIngo Molnar 	u64			nr_migrations_cold;
1190cc367732SIngo Molnar 	u64			nr_failed_migrations_affine;
1191cc367732SIngo Molnar 	u64			nr_failed_migrations_running;
1192cc367732SIngo Molnar 	u64			nr_failed_migrations_hot;
1193cc367732SIngo Molnar 	u64			nr_forced_migrations;
1194cc367732SIngo Molnar 
1195cc367732SIngo Molnar 	u64			nr_wakeups;
1196cc367732SIngo Molnar 	u64			nr_wakeups_sync;
1197cc367732SIngo Molnar 	u64			nr_wakeups_migrate;
1198cc367732SIngo Molnar 	u64			nr_wakeups_local;
1199cc367732SIngo Molnar 	u64			nr_wakeups_remote;
1200cc367732SIngo Molnar 	u64			nr_wakeups_affine;
1201cc367732SIngo Molnar 	u64			nr_wakeups_affine_attempts;
1202cc367732SIngo Molnar 	u64			nr_wakeups_passive;
1203cc367732SIngo Molnar 	u64			nr_wakeups_idle;
120441acab88SLucas De Marchi };
120541acab88SLucas De Marchi #endif
120641acab88SLucas De Marchi 
120741acab88SLucas De Marchi struct sched_entity {
120841acab88SLucas De Marchi 	struct load_weight	load;		/* for load-balancing */
120941acab88SLucas De Marchi 	struct rb_node		run_node;
121041acab88SLucas De Marchi 	struct list_head	group_node;
121141acab88SLucas De Marchi 	unsigned int		on_rq;
121241acab88SLucas De Marchi 
121341acab88SLucas De Marchi 	u64			exec_start;
121441acab88SLucas De Marchi 	u64			sum_exec_runtime;
121541acab88SLucas De Marchi 	u64			vruntime;
121641acab88SLucas De Marchi 	u64			prev_sum_exec_runtime;
121741acab88SLucas De Marchi 
121841acab88SLucas De Marchi 	u64			nr_migrations;
121941acab88SLucas De Marchi 
122041acab88SLucas De Marchi #ifdef CONFIG_SCHEDSTATS
122141acab88SLucas De Marchi 	struct sched_statistics statistics;
122294c18227SIngo Molnar #endif
122394c18227SIngo Molnar 
122420b8a59fSIngo Molnar #ifdef CONFIG_FAIR_GROUP_SCHED
122520b8a59fSIngo Molnar 	struct sched_entity	*parent;
122620b8a59fSIngo Molnar 	/* rq on which this entity is (to be) queued: */
122720b8a59fSIngo Molnar 	struct cfs_rq		*cfs_rq;
122820b8a59fSIngo Molnar 	/* rq "owned" by this entity/group: */
122920b8a59fSIngo Molnar 	struct cfs_rq		*my_q;
123020b8a59fSIngo Molnar #endif
123120b8a59fSIngo Molnar };
123270b97a7fSIngo Molnar 
1233fa717060SPeter Zijlstra struct sched_rt_entity {
1234fa717060SPeter Zijlstra 	struct list_head run_list;
123578f2c7dbSPeter Zijlstra 	unsigned long timeout;
1236bee367edSRichard Kennedy 	unsigned int time_slice;
12376f505b16SPeter Zijlstra 	int nr_cpus_allowed;
12386f505b16SPeter Zijlstra 
123958d6c2d7SPeter Zijlstra 	struct sched_rt_entity *back;
1240052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
12416f505b16SPeter Zijlstra 	struct sched_rt_entity	*parent;
12426f505b16SPeter Zijlstra 	/* rq on which this entity is (to be) queued: */
12436f505b16SPeter Zijlstra 	struct rt_rq		*rt_rq;
12446f505b16SPeter Zijlstra 	/* rq "owned" by this entity/group: */
12456f505b16SPeter Zijlstra 	struct rt_rq		*my_q;
12466f505b16SPeter Zijlstra #endif
1247fa717060SPeter Zijlstra };
1248fa717060SPeter Zijlstra 
1249de5bdff7SHiroshi Shimamoto /*
1250de5bdff7SHiroshi Shimamoto  * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1251de5bdff7SHiroshi Shimamoto  * Timeslices get refilled after they expire.
1252de5bdff7SHiroshi Shimamoto  */
1253de5bdff7SHiroshi Shimamoto #define RR_TIMESLICE		(100 * HZ / 1000)
1254de5bdff7SHiroshi Shimamoto 
125586848966SPaul E. McKenney struct rcu_node;
125686848966SPaul E. McKenney 
12578dc85d54SPeter Zijlstra enum perf_event_task_context {
12588dc85d54SPeter Zijlstra 	perf_invalid_context = -1,
12598dc85d54SPeter Zijlstra 	perf_hw_context = 0,
126089a1e187SPeter Zijlstra 	perf_sw_context,
12618dc85d54SPeter Zijlstra 	perf_nr_task_contexts,
12628dc85d54SPeter Zijlstra };
12638dc85d54SPeter Zijlstra 
12641da177e4SLinus Torvalds struct task_struct {
12651da177e4SLinus Torvalds 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1266f7e4217bSRoman Zippel 	void *stack;
12671da177e4SLinus Torvalds 	atomic_t usage;
126897dc32cdSWilliam Cohen 	unsigned int flags;	/* per process flags, defined below */
126997dc32cdSWilliam Cohen 	unsigned int ptrace;
12701da177e4SLinus Torvalds 
12712dd73a4fSPeter Williams #ifdef CONFIG_SMP
1272fa14ff4aSPeter Zijlstra 	struct llist_node wake_entry;
12733ca7a440SPeter Zijlstra 	int on_cpu;
12744866cde0SNick Piggin #endif
1275fd2f4419SPeter Zijlstra 	int on_rq;
127650e645a8SIngo Molnar 
1277b29739f9SIngo Molnar 	int prio, static_prio, normal_prio;
1278c7aceabaSRichard Kennedy 	unsigned int rt_priority;
12795522d5d5SIngo Molnar 	const struct sched_class *sched_class;
128020b8a59fSIngo Molnar 	struct sched_entity se;
1281fa717060SPeter Zijlstra 	struct sched_rt_entity rt;
12821da177e4SLinus Torvalds 
1283e107be36SAvi Kivity #ifdef CONFIG_PREEMPT_NOTIFIERS
1284e107be36SAvi Kivity 	/* list of struct preempt_notifier: */
1285e107be36SAvi Kivity 	struct hlist_head preempt_notifiers;
1286e107be36SAvi Kivity #endif
1287e107be36SAvi Kivity 
128818796aa0SAlexey Dobriyan 	/*
128918796aa0SAlexey Dobriyan 	 * fpu_counter contains the number of consecutive context switches
129018796aa0SAlexey Dobriyan 	 * that the FPU is used. If this is over a threshold, the lazy fpu
129118796aa0SAlexey Dobriyan 	 * saving becomes unlazy to save the trap. This is an unsigned char
129218796aa0SAlexey Dobriyan 	 * so that after 256 times the counter wraps and the behavior turns
129318796aa0SAlexey Dobriyan 	 * lazy again; this to deal with bursty apps that only use FPU for
129418796aa0SAlexey Dobriyan 	 * a short time
129518796aa0SAlexey Dobriyan 	 */
129618796aa0SAlexey Dobriyan 	unsigned char fpu_counter;
12976c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE
12982056a782SJens Axboe 	unsigned int btrace_seq;
12996c5c9341SAlexey Dobriyan #endif
13001da177e4SLinus Torvalds 
130197dc32cdSWilliam Cohen 	unsigned int policy;
13021da177e4SLinus Torvalds 	cpumask_t cpus_allowed;
13031da177e4SLinus Torvalds 
1304a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
1305e260be67SPaul E. McKenney 	int rcu_read_lock_nesting;
1306f41d911fSPaul E. McKenney 	char rcu_read_unlock_special;
1307f41d911fSPaul E. McKenney 	struct list_head rcu_node_entry;
1308a57eb940SPaul E. McKenney #endif /* #ifdef CONFIG_PREEMPT_RCU */
1309a57eb940SPaul E. McKenney #ifdef CONFIG_TREE_PREEMPT_RCU
1310a57eb940SPaul E. McKenney 	struct rcu_node *rcu_blocked_node;
1311f41d911fSPaul E. McKenney #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
131224278d14SPaul E. McKenney #ifdef CONFIG_RCU_BOOST
131324278d14SPaul E. McKenney 	struct rt_mutex *rcu_boost_mutex;
131424278d14SPaul E. McKenney #endif /* #ifdef CONFIG_RCU_BOOST */
1315e260be67SPaul E. McKenney 
131652f17b6cSChandra Seetharaman #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
13171da177e4SLinus Torvalds 	struct sched_info sched_info;
13181da177e4SLinus Torvalds #endif
13191da177e4SLinus Torvalds 
13201da177e4SLinus Torvalds 	struct list_head tasks;
1321806c09a7SDario Faggioli #ifdef CONFIG_SMP
1322917b627dSGregory Haskins 	struct plist_node pushable_tasks;
1323806c09a7SDario Faggioli #endif
13241da177e4SLinus Torvalds 
13251da177e4SLinus Torvalds 	struct mm_struct *mm, *active_mm;
13264471a675SJiri Kosina #ifdef CONFIG_COMPAT_BRK
13274471a675SJiri Kosina 	unsigned brk_randomized:1;
13284471a675SJiri Kosina #endif
132934e55232SKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING)
133034e55232SKAMEZAWA Hiroyuki 	struct task_rss_stat	rss_stat;
133134e55232SKAMEZAWA Hiroyuki #endif
13321da177e4SLinus Torvalds /* task state */
133397dc32cdSWilliam Cohen 	int exit_state;
13341da177e4SLinus Torvalds 	int exit_code, exit_signal;
13351da177e4SLinus Torvalds 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1336a8f072c1STejun Heo 	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
13371da177e4SLinus Torvalds 	/* ??? */
133897dc32cdSWilliam Cohen 	unsigned int personality;
13391da177e4SLinus Torvalds 	unsigned did_exec:1;
1340f9ce1f1cSKentaro Takeda 	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1341f9ce1f1cSKentaro Takeda 				 * execve */
13428f0dfc34SArjan van de Ven 	unsigned in_iowait:1;
13438f0dfc34SArjan van de Ven 
1344259e5e6cSAndy Lutomirski 	/* task may not gain privileges */
1345259e5e6cSAndy Lutomirski 	unsigned no_new_privs:1;
1346ca94c442SLennart Poettering 
1347ca94c442SLennart Poettering 	/* Revert to default priority/policy when forking */
1348ca94c442SLennart Poettering 	unsigned sched_reset_on_fork:1;
1349a8e4f2eaSPeter Zijlstra 	unsigned sched_contributes_to_load:1;
1350ca94c442SLennart Poettering 
13514bcdf1d0SAlexander Gordeev #ifdef CONFIG_GENERIC_HARDIRQS
13524bcdf1d0SAlexander Gordeev 	/* IRQ handler threads */
13534bcdf1d0SAlexander Gordeev 	unsigned irq_thread:1;
13544bcdf1d0SAlexander Gordeev #endif
13554bcdf1d0SAlexander Gordeev 
13561da177e4SLinus Torvalds 	pid_t pid;
13571da177e4SLinus Torvalds 	pid_t tgid;
13580a425405SArjan van de Ven 
13591314562aSHiroshi Shimamoto #ifdef CONFIG_CC_STACKPROTECTOR
13600a425405SArjan van de Ven 	/* Canary value for the -fstack-protector gcc feature */
13610a425405SArjan van de Ven 	unsigned long stack_canary;
13621314562aSHiroshi Shimamoto #endif
1363e0032087SIngo Molnar 
13641da177e4SLinus Torvalds 	/*
13651da177e4SLinus Torvalds 	 * pointers to (original) parent process, youngest child, younger sibling,
13661da177e4SLinus Torvalds 	 * older sibling, respectively.  (p->father can be replaced with
1367f470021aSRoland McGrath 	 * p->real_parent->pid)
13681da177e4SLinus Torvalds 	 */
1369abd63bc3SKees Cook 	struct task_struct __rcu *real_parent; /* real parent process */
1370abd63bc3SKees Cook 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
13711da177e4SLinus Torvalds 	/*
1372f470021aSRoland McGrath 	 * children/sibling forms the list of my natural children
13731da177e4SLinus Torvalds 	 */
13741da177e4SLinus Torvalds 	struct list_head children;	/* list of my children */
13751da177e4SLinus Torvalds 	struct list_head sibling;	/* linkage in my parent's children list */
13761da177e4SLinus Torvalds 	struct task_struct *group_leader;	/* threadgroup leader */
13771da177e4SLinus Torvalds 
1378f470021aSRoland McGrath 	/*
1379f470021aSRoland McGrath 	 * ptraced is the list of tasks this task is using ptrace on.
1380f470021aSRoland McGrath 	 * This includes both natural children and PTRACE_ATTACH targets.
1381f470021aSRoland McGrath 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1382f470021aSRoland McGrath 	 */
1383f470021aSRoland McGrath 	struct list_head ptraced;
1384f470021aSRoland McGrath 	struct list_head ptrace_entry;
1385f470021aSRoland McGrath 
13861da177e4SLinus Torvalds 	/* PID/PID hash table linkage. */
138792476d7fSEric W. Biederman 	struct pid_link pids[PIDTYPE_MAX];
138847e65328SOleg Nesterov 	struct list_head thread_group;
13891da177e4SLinus Torvalds 
13901da177e4SLinus Torvalds 	struct completion *vfork_done;		/* for vfork() */
13911da177e4SLinus Torvalds 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
13921da177e4SLinus Torvalds 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
13931da177e4SLinus Torvalds 
1394c66f08beSMichael Neuling 	cputime_t utime, stime, utimescaled, stimescaled;
13959ac52315SLaurent Vivier 	cputime_t gtime;
1396d99ca3b9SHidetoshi Seto #ifndef CONFIG_VIRT_CPU_ACCOUNTING
13979301899bSBalbir Singh 	cputime_t prev_utime, prev_stime;
1398d99ca3b9SHidetoshi Seto #endif
13991da177e4SLinus Torvalds 	unsigned long nvcsw, nivcsw; /* context switch counts */
1400924b42d5STomas Janousek 	struct timespec start_time; 		/* monotonic time */
1401924b42d5STomas Janousek 	struct timespec real_start_time;	/* boot based time */
14021da177e4SLinus Torvalds /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
14031da177e4SLinus Torvalds 	unsigned long min_flt, maj_flt;
14041da177e4SLinus Torvalds 
1405f06febc9SFrank Mayhar 	struct task_cputime cputime_expires;
14061da177e4SLinus Torvalds 	struct list_head cpu_timers[3];
14071da177e4SLinus Torvalds 
14081da177e4SLinus Torvalds /* process credentials */
14091b0ba1c9SArnd Bergmann 	const struct cred __rcu *real_cred; /* objective and real subjective task
14103b11a1deSDavid Howells 					 * credentials (COW) */
14111b0ba1c9SArnd Bergmann 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
14123b11a1deSDavid Howells 					 * credentials (COW) */
1413ee18d64cSDavid Howells 	struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1414b6dff3ecSDavid Howells 
141536772092SPaolo 'Blaisorblade' Giarrusso 	char comm[TASK_COMM_LEN]; /* executable name excluding path
141636772092SPaolo 'Blaisorblade' Giarrusso 				     - access with [gs]et_task_comm (which lock
141736772092SPaolo 'Blaisorblade' Giarrusso 				       it with task_lock())
1418221af7f8SLinus Torvalds 				     - initialized normally by setup_new_exec */
14191da177e4SLinus Torvalds /* file system info */
14201da177e4SLinus Torvalds 	int link_count, total_link_count;
14213d5b6fccSAlexey Dobriyan #ifdef CONFIG_SYSVIPC
14221da177e4SLinus Torvalds /* ipc stuff */
14231da177e4SLinus Torvalds 	struct sysv_sem sysvsem;
14243d5b6fccSAlexey Dobriyan #endif
1425e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK
142682a1fcb9SIngo Molnar /* hung task detection */
142782a1fcb9SIngo Molnar 	unsigned long last_switch_count;
142882a1fcb9SIngo Molnar #endif
14291da177e4SLinus Torvalds /* CPU-specific state of this task */
14301da177e4SLinus Torvalds 	struct thread_struct thread;
14311da177e4SLinus Torvalds /* filesystem information */
14321da177e4SLinus Torvalds 	struct fs_struct *fs;
14331da177e4SLinus Torvalds /* open file information */
14341da177e4SLinus Torvalds 	struct files_struct *files;
14351651e14eSSerge E. Hallyn /* namespaces */
1436ab516013SSerge E. Hallyn 	struct nsproxy *nsproxy;
14371da177e4SLinus Torvalds /* signal handlers */
14381da177e4SLinus Torvalds 	struct signal_struct *signal;
14391da177e4SLinus Torvalds 	struct sighand_struct *sighand;
14401da177e4SLinus Torvalds 
14411da177e4SLinus Torvalds 	sigset_t blocked, real_blocked;
1442f3de272bSRoland McGrath 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
14431da177e4SLinus Torvalds 	struct sigpending pending;
14441da177e4SLinus Torvalds 
14451da177e4SLinus Torvalds 	unsigned long sas_ss_sp;
14461da177e4SLinus Torvalds 	size_t sas_ss_size;
14471da177e4SLinus Torvalds 	int (*notifier)(void *priv);
14481da177e4SLinus Torvalds 	void *notifier_data;
14491da177e4SLinus Torvalds 	sigset_t *notifier_mask;
14501da177e4SLinus Torvalds 	struct audit_context *audit_context;
1451bfef93a5SAl Viro #ifdef CONFIG_AUDITSYSCALL
1452bfef93a5SAl Viro 	uid_t loginuid;
14534746ec5bSEric Paris 	unsigned int sessionid;
1454bfef93a5SAl Viro #endif
1455*932ecebbSWill Drewry 	struct seccomp seccomp;
14561da177e4SLinus Torvalds 
14571da177e4SLinus Torvalds /* Thread group tracking */
14581da177e4SLinus Torvalds    	u32 parent_exec_id;
14591da177e4SLinus Torvalds    	u32 self_exec_id;
146058568d2aSMiao Xie /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
146158568d2aSMiao Xie  * mempolicy */
14621da177e4SLinus Torvalds 	spinlock_t alloc_lock;
14631da177e4SLinus Torvalds 
1464b29739f9SIngo Molnar 	/* Protection of the PI data structures: */
14651d615482SThomas Gleixner 	raw_spinlock_t pi_lock;
1466b29739f9SIngo Molnar 
146723f78d4aSIngo Molnar #ifdef CONFIG_RT_MUTEXES
146823f78d4aSIngo Molnar 	/* PI waiters blocked on a rt_mutex held by this task */
146923f78d4aSIngo Molnar 	struct plist_head pi_waiters;
147023f78d4aSIngo Molnar 	/* Deadlock detection and priority inheritance handling */
147123f78d4aSIngo Molnar 	struct rt_mutex_waiter *pi_blocked_on;
147223f78d4aSIngo Molnar #endif
147323f78d4aSIngo Molnar 
1474408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES
1475408894eeSIngo Molnar 	/* mutex deadlock detection */
1476408894eeSIngo Molnar 	struct mutex_waiter *blocked_on;
1477408894eeSIngo Molnar #endif
1478de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS
1479de30a2b3SIngo Molnar 	unsigned int irq_events;
1480de30a2b3SIngo Molnar 	unsigned long hardirq_enable_ip;
1481de30a2b3SIngo Molnar 	unsigned long hardirq_disable_ip;
1482fa1452e8SHiroshi Shimamoto 	unsigned int hardirq_enable_event;
1483de30a2b3SIngo Molnar 	unsigned int hardirq_disable_event;
1484fa1452e8SHiroshi Shimamoto 	int hardirqs_enabled;
1485de30a2b3SIngo Molnar 	int hardirq_context;
1486fa1452e8SHiroshi Shimamoto 	unsigned long softirq_disable_ip;
1487fa1452e8SHiroshi Shimamoto 	unsigned long softirq_enable_ip;
1488fa1452e8SHiroshi Shimamoto 	unsigned int softirq_disable_event;
1489fa1452e8SHiroshi Shimamoto 	unsigned int softirq_enable_event;
1490fa1452e8SHiroshi Shimamoto 	int softirqs_enabled;
1491de30a2b3SIngo Molnar 	int softirq_context;
1492de30a2b3SIngo Molnar #endif
1493fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP
1494bdb9441eSPeter Zijlstra # define MAX_LOCK_DEPTH 48UL
1495fbb9ce95SIngo Molnar 	u64 curr_chain_key;
1496fbb9ce95SIngo Molnar 	int lockdep_depth;
1497fbb9ce95SIngo Molnar 	unsigned int lockdep_recursion;
1498c7aceabaSRichard Kennedy 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1499cf40bd16SNick Piggin 	gfp_t lockdep_reclaim_gfp;
1500fbb9ce95SIngo Molnar #endif
1501408894eeSIngo Molnar 
15021da177e4SLinus Torvalds /* journalling filesystem info */
15031da177e4SLinus Torvalds 	void *journal_info;
15041da177e4SLinus Torvalds 
1505d89d8796SNeil Brown /* stacked block device info */
1506bddd87c7SAkinobu Mita 	struct bio_list *bio_list;
1507d89d8796SNeil Brown 
150873c10101SJens Axboe #ifdef CONFIG_BLOCK
150973c10101SJens Axboe /* stack plugging */
151073c10101SJens Axboe 	struct blk_plug *plug;
151173c10101SJens Axboe #endif
151273c10101SJens Axboe 
15131da177e4SLinus Torvalds /* VM state */
15141da177e4SLinus Torvalds 	struct reclaim_state *reclaim_state;
15151da177e4SLinus Torvalds 
15161da177e4SLinus Torvalds 	struct backing_dev_info *backing_dev_info;
15171da177e4SLinus Torvalds 
15181da177e4SLinus Torvalds 	struct io_context *io_context;
15191da177e4SLinus Torvalds 
15201da177e4SLinus Torvalds 	unsigned long ptrace_message;
15211da177e4SLinus Torvalds 	siginfo_t *last_siginfo; /* For ptrace use.  */
15227c3ab738SAndrew Morton 	struct task_io_accounting ioac;
15238f0ab514SJay Lan #if defined(CONFIG_TASK_XACCT)
15241da177e4SLinus Torvalds 	u64 acct_rss_mem1;	/* accumulated rss usage */
15251da177e4SLinus Torvalds 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
152649b5cf34SJonathan Lim 	cputime_t acct_timexpd;	/* stime + utime since last update */
15271da177e4SLinus Torvalds #endif
15281da177e4SLinus Torvalds #ifdef CONFIG_CPUSETS
152958568d2aSMiao Xie 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1530cc9a6c87SMel Gorman 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1531825a46afSPaul Jackson 	int cpuset_mem_spread_rotor;
15326adef3ebSJack Steiner 	int cpuset_slab_spread_rotor;
15331da177e4SLinus Torvalds #endif
1534ddbcc7e8SPaul Menage #ifdef CONFIG_CGROUPS
1535817929ecSPaul Menage 	/* Control Group info protected by css_set_lock */
15362c392b8cSArnd Bergmann 	struct css_set __rcu *cgroups;
1537817929ecSPaul Menage 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1538817929ecSPaul Menage 	struct list_head cg_list;
1539ddbcc7e8SPaul Menage #endif
154042b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX
15410771dfefSIngo Molnar 	struct robust_list_head __user *robust_list;
154234f192c6SIngo Molnar #ifdef CONFIG_COMPAT
154334f192c6SIngo Molnar 	struct compat_robust_list_head __user *compat_robust_list;
154434f192c6SIngo Molnar #endif
1545c87e2837SIngo Molnar 	struct list_head pi_state_list;
1546c87e2837SIngo Molnar 	struct futex_pi_state *pi_state_cache;
154742b2dd0aSAlexey Dobriyan #endif
1548cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
15498dc85d54SPeter Zijlstra 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1550cdd6c482SIngo Molnar 	struct mutex perf_event_mutex;
1551cdd6c482SIngo Molnar 	struct list_head perf_event_list;
1552a63eaf34SPaul Mackerras #endif
1553c7aceabaSRichard Kennedy #ifdef CONFIG_NUMA
155458568d2aSMiao Xie 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1555c7aceabaSRichard Kennedy 	short il_next;
1556207205a2SEric Dumazet 	short pref_node_fork;
1557c7aceabaSRichard Kennedy #endif
1558e56d0903SIngo Molnar 	struct rcu_head rcu;
1559b92ce558SJens Axboe 
1560b92ce558SJens Axboe 	/*
1561b92ce558SJens Axboe 	 * cache last used pipe for splice
1562b92ce558SJens Axboe 	 */
1563b92ce558SJens Axboe 	struct pipe_inode_info *splice_pipe;
1564ca74e92bSShailabh Nagar #ifdef	CONFIG_TASK_DELAY_ACCT
1565ca74e92bSShailabh Nagar 	struct task_delay_info *delays;
1566ca74e92bSShailabh Nagar #endif
1567f4f154fdSAkinobu Mita #ifdef CONFIG_FAULT_INJECTION
1568f4f154fdSAkinobu Mita 	int make_it_fail;
1569f4f154fdSAkinobu Mita #endif
15709d823e8fSWu Fengguang 	/*
15719d823e8fSWu Fengguang 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
15729d823e8fSWu Fengguang 	 * balance_dirty_pages() for some dirty throttling pause
15739d823e8fSWu Fengguang 	 */
15749d823e8fSWu Fengguang 	int nr_dirtied;
15759d823e8fSWu Fengguang 	int nr_dirtied_pause;
157683712358SWu Fengguang 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
15779d823e8fSWu Fengguang 
15789745512cSArjan van de Ven #ifdef CONFIG_LATENCYTOP
15799745512cSArjan van de Ven 	int latency_record_count;
15809745512cSArjan van de Ven 	struct latency_record latency_record[LT_SAVECOUNT];
15819745512cSArjan van de Ven #endif
15826976675dSArjan van de Ven 	/*
15836976675dSArjan van de Ven 	 * time slack values; these are used to round up poll() and
15846976675dSArjan van de Ven 	 * select() etc timeout values. These are in nanoseconds.
15856976675dSArjan van de Ven 	 */
15866976675dSArjan van de Ven 	unsigned long timer_slack_ns;
15876976675dSArjan van de Ven 	unsigned long default_timer_slack_ns;
1588f8d570a4SDavid Miller 
1589f8d570a4SDavid Miller 	struct list_head	*scm_work_list;
1590fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
15913ad2f3fbSDaniel Mack 	/* Index of current stored address in ret_stack */
1592f201ae23SFrederic Weisbecker 	int curr_ret_stack;
1593f201ae23SFrederic Weisbecker 	/* Stack of return addresses for return function tracing */
1594f201ae23SFrederic Weisbecker 	struct ftrace_ret_stack	*ret_stack;
15958aef2d28SSteven Rostedt 	/* time stamp for last schedule */
15968aef2d28SSteven Rostedt 	unsigned long long ftrace_timestamp;
1597f201ae23SFrederic Weisbecker 	/*
1598f201ae23SFrederic Weisbecker 	 * Number of functions that haven't been traced
1599f201ae23SFrederic Weisbecker 	 * because of depth overrun.
1600f201ae23SFrederic Weisbecker 	 */
1601f201ae23SFrederic Weisbecker 	atomic_t trace_overrun;
1602380c4b14SFrederic Weisbecker 	/* Pause for the tracing */
1603380c4b14SFrederic Weisbecker 	atomic_t tracing_graph_pause;
1604f201ae23SFrederic Weisbecker #endif
1605ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING
1606ea4e2bc4SSteven Rostedt 	/* state flags for use by tracers */
1607ea4e2bc4SSteven Rostedt 	unsigned long trace;
1608b1cff0adSSteven Rostedt 	/* bitmask and counter of trace recursion */
1609261842b7SSteven Rostedt 	unsigned long trace_recursion;
1610261842b7SSteven Rostedt #endif /* CONFIG_TRACING */
1611569b846dSKAMEZAWA Hiroyuki #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
1612569b846dSKAMEZAWA Hiroyuki 	struct memcg_batch_info {
1613569b846dSKAMEZAWA Hiroyuki 		int do_batch;	/* incremented when batch uncharge started */
1614569b846dSKAMEZAWA Hiroyuki 		struct mem_cgroup *memcg; /* target memcg of uncharge */
16157ffd4ca7SJohannes Weiner 		unsigned long nr_pages;	/* uncharged usage */
16167ffd4ca7SJohannes Weiner 		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1617569b846dSKAMEZAWA Hiroyuki 	} memcg_batch;
1618569b846dSKAMEZAWA Hiroyuki #endif
1619bf26c018SFrederic Weisbecker #ifdef CONFIG_HAVE_HW_BREAKPOINT
1620bf26c018SFrederic Weisbecker 	atomic_t ptrace_bp_refcnt;
1621bf26c018SFrederic Weisbecker #endif
16221da177e4SLinus Torvalds };
16231da177e4SLinus Torvalds 
162476e6eee0SRusty Russell /* Future-safe accessor for struct task_struct's cpus_allowed. */
1625a4636818SRusty Russell #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
162676e6eee0SRusty Russell 
1627e05606d3SIngo Molnar /*
1628e05606d3SIngo Molnar  * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1629e05606d3SIngo Molnar  * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1630e05606d3SIngo Molnar  * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1631e05606d3SIngo Molnar  * values are inverted: lower p->prio value means higher priority.
1632e05606d3SIngo Molnar  *
1633e05606d3SIngo Molnar  * The MAX_USER_RT_PRIO value allows the actual maximum
1634e05606d3SIngo Molnar  * RT priority to be separate from the value exported to
1635e05606d3SIngo Molnar  * user-space.  This allows kernel threads to set their
1636e05606d3SIngo Molnar  * priority to a value higher than any user task. Note:
1637e05606d3SIngo Molnar  * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1638e05606d3SIngo Molnar  */
1639e05606d3SIngo Molnar 
1640e05606d3SIngo Molnar #define MAX_USER_RT_PRIO	100
1641e05606d3SIngo Molnar #define MAX_RT_PRIO		MAX_USER_RT_PRIO
1642e05606d3SIngo Molnar 
1643e05606d3SIngo Molnar #define MAX_PRIO		(MAX_RT_PRIO + 40)
1644e05606d3SIngo Molnar #define DEFAULT_PRIO		(MAX_RT_PRIO + 20)
1645e05606d3SIngo Molnar 
1646e05606d3SIngo Molnar static inline int rt_prio(int prio)
1647e05606d3SIngo Molnar {
1648e05606d3SIngo Molnar 	if (unlikely(prio < MAX_RT_PRIO))
1649e05606d3SIngo Molnar 		return 1;
1650e05606d3SIngo Molnar 	return 0;
1651e05606d3SIngo Molnar }
1652e05606d3SIngo Molnar 
1653e868171aSAlexey Dobriyan static inline int rt_task(struct task_struct *p)
1654e05606d3SIngo Molnar {
1655e05606d3SIngo Molnar 	return rt_prio(p->prio);
1656e05606d3SIngo Molnar }
1657e05606d3SIngo Molnar 
1658e868171aSAlexey Dobriyan static inline struct pid *task_pid(struct task_struct *task)
165922c935f4SEric W. Biederman {
166022c935f4SEric W. Biederman 	return task->pids[PIDTYPE_PID].pid;
166122c935f4SEric W. Biederman }
166222c935f4SEric W. Biederman 
1663e868171aSAlexey Dobriyan static inline struct pid *task_tgid(struct task_struct *task)
166422c935f4SEric W. Biederman {
166522c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PID].pid;
166622c935f4SEric W. Biederman }
166722c935f4SEric W. Biederman 
16686dda81f4SOleg Nesterov /*
16696dda81f4SOleg Nesterov  * Without tasklist or rcu lock it is not safe to dereference
16706dda81f4SOleg Nesterov  * the result of task_pgrp/task_session even if task == current,
16716dda81f4SOleg Nesterov  * we can race with another thread doing sys_setsid/sys_setpgid.
16726dda81f4SOleg Nesterov  */
1673e868171aSAlexey Dobriyan static inline struct pid *task_pgrp(struct task_struct *task)
167422c935f4SEric W. Biederman {
167522c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PGID].pid;
167622c935f4SEric W. Biederman }
167722c935f4SEric W. Biederman 
1678e868171aSAlexey Dobriyan static inline struct pid *task_session(struct task_struct *task)
167922c935f4SEric W. Biederman {
168022c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_SID].pid;
168122c935f4SEric W. Biederman }
168222c935f4SEric W. Biederman 
16837af57294SPavel Emelyanov struct pid_namespace;
16847af57294SPavel Emelyanov 
16857af57294SPavel Emelyanov /*
16867af57294SPavel Emelyanov  * the helpers to get the task's different pids as they are seen
16877af57294SPavel Emelyanov  * from various namespaces
16887af57294SPavel Emelyanov  *
16897af57294SPavel Emelyanov  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
169044c4e1b2SEric W. Biederman  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
169144c4e1b2SEric W. Biederman  *                     current.
16927af57294SPavel Emelyanov  * task_xid_nr_ns()  : id seen from the ns specified;
16937af57294SPavel Emelyanov  *
16947af57294SPavel Emelyanov  * set_task_vxid()   : assigns a virtual id to a task;
16957af57294SPavel Emelyanov  *
16967af57294SPavel Emelyanov  * see also pid_nr() etc in include/linux/pid.h
16977af57294SPavel Emelyanov  */
169852ee2dfdSOleg Nesterov pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
169952ee2dfdSOleg Nesterov 			struct pid_namespace *ns);
17007af57294SPavel Emelyanov 
1701e868171aSAlexey Dobriyan static inline pid_t task_pid_nr(struct task_struct *tsk)
17027af57294SPavel Emelyanov {
17037af57294SPavel Emelyanov 	return tsk->pid;
17047af57294SPavel Emelyanov }
17057af57294SPavel Emelyanov 
170652ee2dfdSOleg Nesterov static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
170752ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
170852ee2dfdSOleg Nesterov {
170952ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
171052ee2dfdSOleg Nesterov }
17117af57294SPavel Emelyanov 
17127af57294SPavel Emelyanov static inline pid_t task_pid_vnr(struct task_struct *tsk)
17137af57294SPavel Emelyanov {
171452ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
17157af57294SPavel Emelyanov }
17167af57294SPavel Emelyanov 
17177af57294SPavel Emelyanov 
1718e868171aSAlexey Dobriyan static inline pid_t task_tgid_nr(struct task_struct *tsk)
17197af57294SPavel Emelyanov {
17207af57294SPavel Emelyanov 	return tsk->tgid;
17217af57294SPavel Emelyanov }
17227af57294SPavel Emelyanov 
17232f2a3a46SPavel Emelyanov pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
17247af57294SPavel Emelyanov 
17257af57294SPavel Emelyanov static inline pid_t task_tgid_vnr(struct task_struct *tsk)
17267af57294SPavel Emelyanov {
17277af57294SPavel Emelyanov 	return pid_vnr(task_tgid(tsk));
17287af57294SPavel Emelyanov }
17297af57294SPavel Emelyanov 
17307af57294SPavel Emelyanov 
173152ee2dfdSOleg Nesterov static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
173252ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
17337af57294SPavel Emelyanov {
173452ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
17357af57294SPavel Emelyanov }
17367af57294SPavel Emelyanov 
17377af57294SPavel Emelyanov static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
17387af57294SPavel Emelyanov {
173952ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
17407af57294SPavel Emelyanov }
17417af57294SPavel Emelyanov 
17427af57294SPavel Emelyanov 
174352ee2dfdSOleg Nesterov static inline pid_t task_session_nr_ns(struct task_struct *tsk,
174452ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
17457af57294SPavel Emelyanov {
174652ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
17477af57294SPavel Emelyanov }
17487af57294SPavel Emelyanov 
17497af57294SPavel Emelyanov static inline pid_t task_session_vnr(struct task_struct *tsk)
17507af57294SPavel Emelyanov {
175152ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
17527af57294SPavel Emelyanov }
17537af57294SPavel Emelyanov 
17541b0f7ffdSOleg Nesterov /* obsolete, do not use */
17551b0f7ffdSOleg Nesterov static inline pid_t task_pgrp_nr(struct task_struct *tsk)
17561b0f7ffdSOleg Nesterov {
17571b0f7ffdSOleg Nesterov 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
17581b0f7ffdSOleg Nesterov }
17597af57294SPavel Emelyanov 
17601da177e4SLinus Torvalds /**
17611da177e4SLinus Torvalds  * pid_alive - check that a task structure is not stale
17621da177e4SLinus Torvalds  * @p: Task structure to be checked.
17631da177e4SLinus Torvalds  *
17641da177e4SLinus Torvalds  * Test if a process is not yet dead (at most zombie state)
17651da177e4SLinus Torvalds  * If pid_alive fails, then pointers within the task structure
17661da177e4SLinus Torvalds  * can be stale and must not be dereferenced.
17671da177e4SLinus Torvalds  */
1768e868171aSAlexey Dobriyan static inline int pid_alive(struct task_struct *p)
17691da177e4SLinus Torvalds {
177092476d7fSEric W. Biederman 	return p->pids[PIDTYPE_PID].pid != NULL;
17711da177e4SLinus Torvalds }
17721da177e4SLinus Torvalds 
1773f400e198SSukadev Bhattiprolu /**
1774b460cbc5SSerge E. Hallyn  * is_global_init - check if a task structure is init
17753260259fSHenne  * @tsk: Task structure to be checked.
17763260259fSHenne  *
17773260259fSHenne  * Check if a task structure is the first user space task the kernel created.
1778f400e198SSukadev Bhattiprolu  */
1779e868171aSAlexey Dobriyan static inline int is_global_init(struct task_struct *tsk)
1780b461cc03SPavel Emelyanov {
1781b461cc03SPavel Emelyanov 	return tsk->pid == 1;
1782b461cc03SPavel Emelyanov }
1783b460cbc5SSerge E. Hallyn 
1784b460cbc5SSerge E. Hallyn /*
1785b460cbc5SSerge E. Hallyn  * is_container_init:
1786b460cbc5SSerge E. Hallyn  * check whether in the task is init in its own pid namespace.
1787b460cbc5SSerge E. Hallyn  */
1788b461cc03SPavel Emelyanov extern int is_container_init(struct task_struct *tsk);
1789f400e198SSukadev Bhattiprolu 
17909ec52099SCedric Le Goater extern struct pid *cad_pid;
17919ec52099SCedric Le Goater 
17921da177e4SLinus Torvalds extern void free_task(struct task_struct *tsk);
17931da177e4SLinus Torvalds #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1794e56d0903SIngo Molnar 
1795158d9ebdSAndrew Morton extern void __put_task_struct(struct task_struct *t);
1796e56d0903SIngo Molnar 
1797e56d0903SIngo Molnar static inline void put_task_struct(struct task_struct *t)
1798e56d0903SIngo Molnar {
1799e56d0903SIngo Molnar 	if (atomic_dec_and_test(&t->usage))
18008c7904a0SEric W. Biederman 		__put_task_struct(t);
1801e56d0903SIngo Molnar }
18021da177e4SLinus Torvalds 
1803d180c5bcSHidetoshi Seto extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
18040cf55e1eSHidetoshi Seto extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
180549048622SBalbir Singh 
18061da177e4SLinus Torvalds /*
18071da177e4SLinus Torvalds  * Per process flags
18081da177e4SLinus Torvalds  */
18091da177e4SLinus Torvalds #define PF_EXITING	0x00000004	/* getting shut down */
1810778e9a9cSAlexey Kuznetsov #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
181194886b84SLaurent Vivier #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
181221aa9af0STejun Heo #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
18131da177e4SLinus Torvalds #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
18144db96cf0SAndi Kleen #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
18151da177e4SLinus Torvalds #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
18161da177e4SLinus Torvalds #define PF_DUMPCORE	0x00000200	/* dumped core */
18171da177e4SLinus Torvalds #define PF_SIGNALED	0x00000400	/* killed by a signal */
18181da177e4SLinus Torvalds #define PF_MEMALLOC	0x00000800	/* Allocating memory */
181972fa5997SVasiliy Kulikov #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
18201da177e4SLinus Torvalds #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
18211da177e4SLinus Torvalds #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
18221da177e4SLinus Torvalds #define PF_FROZEN	0x00010000	/* frozen for system suspend */
18231da177e4SLinus Torvalds #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
18241da177e4SLinus Torvalds #define PF_KSWAPD	0x00040000	/* I am kswapd */
18251da177e4SLinus Torvalds #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1826246bb0b1SOleg Nesterov #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1827b31dc66aSJens Axboe #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1828b31dc66aSJens Axboe #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1829b31dc66aSJens Axboe #define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1830b31dc66aSJens Axboe #define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
18319985b0baSDavid Rientjes #define PF_THREAD_BOUND	0x04000000	/* Thread bound to specific cpu */
18324db96cf0SAndi Kleen #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1833c61afb18SPaul Jackson #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
183461a87122SThomas Gleixner #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
183558a69cb4STejun Heo #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
18361da177e4SLinus Torvalds 
18371da177e4SLinus Torvalds /*
18381da177e4SLinus Torvalds  * Only the _current_ task can read/write to tsk->flags, but other
18391da177e4SLinus Torvalds  * tasks can access tsk->flags in readonly mode for example
18401da177e4SLinus Torvalds  * with tsk_used_math (like during threaded core dumping).
18411da177e4SLinus Torvalds  * There is however an exception to this rule during ptrace
18421da177e4SLinus Torvalds  * or during fork: the ptracer task is allowed to write to the
18431da177e4SLinus Torvalds  * child->flags of its traced child (same goes for fork, the parent
18441da177e4SLinus Torvalds  * can write to the child->flags), because we're guaranteed the
18451da177e4SLinus Torvalds  * child is not running and in turn not changing child->flags
18461da177e4SLinus Torvalds  * at the same time the parent does it.
18471da177e4SLinus Torvalds  */
18481da177e4SLinus Torvalds #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
18491da177e4SLinus Torvalds #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
18501da177e4SLinus Torvalds #define clear_used_math() clear_stopped_child_used_math(current)
18511da177e4SLinus Torvalds #define set_used_math() set_stopped_child_used_math(current)
18521da177e4SLinus Torvalds #define conditional_stopped_child_used_math(condition, child) \
18531da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
18541da177e4SLinus Torvalds #define conditional_used_math(condition) \
18551da177e4SLinus Torvalds 	conditional_stopped_child_used_math(condition, current)
18561da177e4SLinus Torvalds #define copy_to_stopped_child_used_math(child) \
18571da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
18581da177e4SLinus Torvalds /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
18591da177e4SLinus Torvalds #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
18601da177e4SLinus Torvalds #define used_math() tsk_used_math(current)
18611da177e4SLinus Torvalds 
1862e5c1902eSTejun Heo /*
1863a8f072c1STejun Heo  * task->jobctl flags
1864e5c1902eSTejun Heo  */
1865a8f072c1STejun Heo #define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
1866e5c1902eSTejun Heo 
1867a8f072c1STejun Heo #define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
1868a8f072c1STejun Heo #define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
1869a8f072c1STejun Heo #define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
187073ddff2bSTejun Heo #define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
1871fb1d910cSTejun Heo #define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
1872a8f072c1STejun Heo #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
1873544b2c91STejun Heo #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
1874a8f072c1STejun Heo 
1875a8f072c1STejun Heo #define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT)
1876a8f072c1STejun Heo #define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT)
1877a8f072c1STejun Heo #define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT)
187873ddff2bSTejun Heo #define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT)
1879fb1d910cSTejun Heo #define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT)
1880a8f072c1STejun Heo #define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT)
1881544b2c91STejun Heo #define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT)
1882a8f072c1STejun Heo 
1883fb1d910cSTejun Heo #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
188473ddff2bSTejun Heo #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
18853759a0d9STejun Heo 
18867dd3db54STejun Heo extern bool task_set_jobctl_pending(struct task_struct *task,
18877dd3db54STejun Heo 				    unsigned int mask);
188873ddff2bSTejun Heo extern void task_clear_jobctl_trapping(struct task_struct *task);
18893759a0d9STejun Heo extern void task_clear_jobctl_pending(struct task_struct *task,
18903759a0d9STejun Heo 				      unsigned int mask);
189139efa3efSTejun Heo 
1892a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
1893f41d911fSPaul E. McKenney 
1894f41d911fSPaul E. McKenney #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
18951aa03f11SPaul E. McKenney #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1896f41d911fSPaul E. McKenney 
1897f41d911fSPaul E. McKenney static inline void rcu_copy_process(struct task_struct *p)
1898f41d911fSPaul E. McKenney {
1899f41d911fSPaul E. McKenney 	p->rcu_read_lock_nesting = 0;
1900f41d911fSPaul E. McKenney 	p->rcu_read_unlock_special = 0;
1901a57eb940SPaul E. McKenney #ifdef CONFIG_TREE_PREEMPT_RCU
1902dd5d19baSPaul E. McKenney 	p->rcu_blocked_node = NULL;
190324278d14SPaul E. McKenney #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
190424278d14SPaul E. McKenney #ifdef CONFIG_RCU_BOOST
190524278d14SPaul E. McKenney 	p->rcu_boost_mutex = NULL;
190624278d14SPaul E. McKenney #endif /* #ifdef CONFIG_RCU_BOOST */
1907f41d911fSPaul E. McKenney 	INIT_LIST_HEAD(&p->rcu_node_entry);
1908f41d911fSPaul E. McKenney }
1909f41d911fSPaul E. McKenney 
1910f41d911fSPaul E. McKenney #else
1911f41d911fSPaul E. McKenney 
1912f41d911fSPaul E. McKenney static inline void rcu_copy_process(struct task_struct *p)
1913f41d911fSPaul E. McKenney {
1914f41d911fSPaul E. McKenney }
1915f41d911fSPaul E. McKenney 
1916f41d911fSPaul E. McKenney #endif
1917f41d911fSPaul E. McKenney 
19181da177e4SLinus Torvalds #ifdef CONFIG_SMP
19191e1b6c51SKOSAKI Motohiro extern void do_set_cpus_allowed(struct task_struct *p,
19201e1b6c51SKOSAKI Motohiro 			       const struct cpumask *new_mask);
19211e1b6c51SKOSAKI Motohiro 
1922cd8ba7cdSMike Travis extern int set_cpus_allowed_ptr(struct task_struct *p,
192396f874e2SRusty Russell 				const struct cpumask *new_mask);
19241da177e4SLinus Torvalds #else
19251e1b6c51SKOSAKI Motohiro static inline void do_set_cpus_allowed(struct task_struct *p,
19261e1b6c51SKOSAKI Motohiro 				      const struct cpumask *new_mask)
19271e1b6c51SKOSAKI Motohiro {
19281e1b6c51SKOSAKI Motohiro }
1929cd8ba7cdSMike Travis static inline int set_cpus_allowed_ptr(struct task_struct *p,
193096f874e2SRusty Russell 				       const struct cpumask *new_mask)
19311da177e4SLinus Torvalds {
193296f874e2SRusty Russell 	if (!cpumask_test_cpu(0, new_mask))
19331da177e4SLinus Torvalds 		return -EINVAL;
19341da177e4SLinus Torvalds 	return 0;
19351da177e4SLinus Torvalds }
19361da177e4SLinus Torvalds #endif
1937e0ad9556SRusty Russell 
1938e0ad9556SRusty Russell #ifndef CONFIG_CPUMASK_OFFSTACK
1939cd8ba7cdSMike Travis static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1940cd8ba7cdSMike Travis {
1941cd8ba7cdSMike Travis 	return set_cpus_allowed_ptr(p, &new_mask);
1942cd8ba7cdSMike Travis }
1943e0ad9556SRusty Russell #endif
19441da177e4SLinus Torvalds 
1945b342501cSIngo Molnar /*
1946c676329aSPeter Zijlstra  * Do not use outside of architecture code which knows its limitations.
1947c676329aSPeter Zijlstra  *
1948c676329aSPeter Zijlstra  * sched_clock() has no promise of monotonicity or bounded drift between
1949c676329aSPeter Zijlstra  * CPUs, use (which you should not) requires disabling IRQs.
1950c676329aSPeter Zijlstra  *
1951c676329aSPeter Zijlstra  * Please use one of the three interfaces below.
1952b342501cSIngo Molnar  */
19531bbfa6f2SMike Frysinger extern unsigned long long notrace sched_clock(void);
1954c676329aSPeter Zijlstra /*
1955c676329aSPeter Zijlstra  * See the comment in kernel/sched_clock.c
1956c676329aSPeter Zijlstra  */
1957c676329aSPeter Zijlstra extern u64 cpu_clock(int cpu);
1958c676329aSPeter Zijlstra extern u64 local_clock(void);
1959c676329aSPeter Zijlstra extern u64 sched_clock_cpu(int cpu);
1960c676329aSPeter Zijlstra 
1961e436d800SIngo Molnar 
1962c1955a3dSPeter Zijlstra extern void sched_clock_init(void);
1963c1955a3dSPeter Zijlstra 
19643e51f33fSPeter Zijlstra #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
19653e51f33fSPeter Zijlstra static inline void sched_clock_tick(void)
19663e51f33fSPeter Zijlstra {
19673e51f33fSPeter Zijlstra }
19683e51f33fSPeter Zijlstra 
19693e51f33fSPeter Zijlstra static inline void sched_clock_idle_sleep_event(void)
19703e51f33fSPeter Zijlstra {
19713e51f33fSPeter Zijlstra }
19723e51f33fSPeter Zijlstra 
19733e51f33fSPeter Zijlstra static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
19743e51f33fSPeter Zijlstra {
19753e51f33fSPeter Zijlstra }
19763e51f33fSPeter Zijlstra #else
1977c676329aSPeter Zijlstra /*
1978c676329aSPeter Zijlstra  * Architectures can set this to 1 if they have specified
1979c676329aSPeter Zijlstra  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1980c676329aSPeter Zijlstra  * but then during bootup it turns out that sched_clock()
1981c676329aSPeter Zijlstra  * is reliable after all:
1982c676329aSPeter Zijlstra  */
1983c676329aSPeter Zijlstra extern int sched_clock_stable;
1984c676329aSPeter Zijlstra 
19853e51f33fSPeter Zijlstra extern void sched_clock_tick(void);
19863e51f33fSPeter Zijlstra extern void sched_clock_idle_sleep_event(void);
19873e51f33fSPeter Zijlstra extern void sched_clock_idle_wakeup_event(u64 delta_ns);
19883e51f33fSPeter Zijlstra #endif
19893e51f33fSPeter Zijlstra 
1990b52bfee4SVenkatesh Pallipadi #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1991b52bfee4SVenkatesh Pallipadi /*
1992b52bfee4SVenkatesh Pallipadi  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
1993b52bfee4SVenkatesh Pallipadi  * The reason for this explicit opt-in is not to have perf penalty with
1994b52bfee4SVenkatesh Pallipadi  * slow sched_clocks.
1995b52bfee4SVenkatesh Pallipadi  */
1996b52bfee4SVenkatesh Pallipadi extern void enable_sched_clock_irqtime(void);
1997b52bfee4SVenkatesh Pallipadi extern void disable_sched_clock_irqtime(void);
1998b52bfee4SVenkatesh Pallipadi #else
1999b52bfee4SVenkatesh Pallipadi static inline void enable_sched_clock_irqtime(void) {}
2000b52bfee4SVenkatesh Pallipadi static inline void disable_sched_clock_irqtime(void) {}
2001b52bfee4SVenkatesh Pallipadi #endif
2002b52bfee4SVenkatesh Pallipadi 
200336c8b586SIngo Molnar extern unsigned long long
200441b86e9cSIngo Molnar task_sched_runtime(struct task_struct *task);
20051da177e4SLinus Torvalds 
20061da177e4SLinus Torvalds /* sched_exec is called by processes performing an exec */
20071da177e4SLinus Torvalds #ifdef CONFIG_SMP
20081da177e4SLinus Torvalds extern void sched_exec(void);
20091da177e4SLinus Torvalds #else
20101da177e4SLinus Torvalds #define sched_exec()   {}
20111da177e4SLinus Torvalds #endif
20121da177e4SLinus Torvalds 
20132aa44d05SIngo Molnar extern void sched_clock_idle_sleep_event(void);
20142aa44d05SIngo Molnar extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2015bb29ab26SIngo Molnar 
20161da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU
20171da177e4SLinus Torvalds extern void idle_task_exit(void);
20181da177e4SLinus Torvalds #else
20191da177e4SLinus Torvalds static inline void idle_task_exit(void) {}
20201da177e4SLinus Torvalds #endif
20211da177e4SLinus Torvalds 
202206d8308cSThomas Gleixner #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
202306d8308cSThomas Gleixner extern void wake_up_idle_cpu(int cpu);
202406d8308cSThomas Gleixner #else
202506d8308cSThomas Gleixner static inline void wake_up_idle_cpu(int cpu) { }
202606d8308cSThomas Gleixner #endif
202706d8308cSThomas Gleixner 
202821805085SPeter Zijlstra extern unsigned int sysctl_sched_latency;
2029b2be5e96SPeter Zijlstra extern unsigned int sysctl_sched_min_granularity;
2030bf0f6f24SIngo Molnar extern unsigned int sysctl_sched_wakeup_granularity;
2031bf0f6f24SIngo Molnar extern unsigned int sysctl_sched_child_runs_first;
20321983a922SChristian Ehrhardt 
20331983a922SChristian Ehrhardt enum sched_tunable_scaling {
20341983a922SChristian Ehrhardt 	SCHED_TUNABLESCALING_NONE,
20351983a922SChristian Ehrhardt 	SCHED_TUNABLESCALING_LOG,
20361983a922SChristian Ehrhardt 	SCHED_TUNABLESCALING_LINEAR,
20371983a922SChristian Ehrhardt 	SCHED_TUNABLESCALING_END,
20381983a922SChristian Ehrhardt };
20391983a922SChristian Ehrhardt extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
20401983a922SChristian Ehrhardt 
20412bba22c5SMike Galbraith #ifdef CONFIG_SCHED_DEBUG
2042da84d961SIngo Molnar extern unsigned int sysctl_sched_migration_cost;
2043b82d9fddSPeter Zijlstra extern unsigned int sysctl_sched_nr_migrate;
2044e9e9250bSPeter Zijlstra extern unsigned int sysctl_sched_time_avg;
2045cd1bb94bSArun R Bharadwaj extern unsigned int sysctl_timer_migration;
2046a7a4f8a7SPaul Turner extern unsigned int sysctl_sched_shares_window;
2047b2be5e96SPeter Zijlstra 
20481983a922SChristian Ehrhardt int sched_proc_update_handler(struct ctl_table *table, int write,
20498d65af78SAlexey Dobriyan 		void __user *buffer, size_t *length,
2050b2be5e96SPeter Zijlstra 		loff_t *ppos);
20512bd8e6d4SIngo Molnar #endif
2052eea08f32SArun R Bharadwaj #ifdef CONFIG_SCHED_DEBUG
2053eea08f32SArun R Bharadwaj static inline unsigned int get_sysctl_timer_migration(void)
2054eea08f32SArun R Bharadwaj {
2055eea08f32SArun R Bharadwaj 	return sysctl_timer_migration;
2056eea08f32SArun R Bharadwaj }
2057eea08f32SArun R Bharadwaj #else
2058eea08f32SArun R Bharadwaj static inline unsigned int get_sysctl_timer_migration(void)
2059eea08f32SArun R Bharadwaj {
2060eea08f32SArun R Bharadwaj 	return 1;
2061eea08f32SArun R Bharadwaj }
2062eea08f32SArun R Bharadwaj #endif
20639f0c1e56SPeter Zijlstra extern unsigned int sysctl_sched_rt_period;
20649f0c1e56SPeter Zijlstra extern int sysctl_sched_rt_runtime;
20652bd8e6d4SIngo Molnar 
2066d0b27fa7SPeter Zijlstra int sched_rt_handler(struct ctl_table *table, int write,
20678d65af78SAlexey Dobriyan 		void __user *buffer, size_t *lenp,
2068d0b27fa7SPeter Zijlstra 		loff_t *ppos);
2069d0b27fa7SPeter Zijlstra 
20705091faa4SMike Galbraith #ifdef CONFIG_SCHED_AUTOGROUP
20715091faa4SMike Galbraith extern unsigned int sysctl_sched_autogroup_enabled;
20725091faa4SMike Galbraith 
20735091faa4SMike Galbraith extern void sched_autogroup_create_attach(struct task_struct *p);
20745091faa4SMike Galbraith extern void sched_autogroup_detach(struct task_struct *p);
20755091faa4SMike Galbraith extern void sched_autogroup_fork(struct signal_struct *sig);
20765091faa4SMike Galbraith extern void sched_autogroup_exit(struct signal_struct *sig);
20775091faa4SMike Galbraith #ifdef CONFIG_PROC_FS
20785091faa4SMike Galbraith extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
20792e5b5b3aSHiroshi Shimamoto extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
20805091faa4SMike Galbraith #endif
20815091faa4SMike Galbraith #else
20825091faa4SMike Galbraith static inline void sched_autogroup_create_attach(struct task_struct *p) { }
20835091faa4SMike Galbraith static inline void sched_autogroup_detach(struct task_struct *p) { }
20845091faa4SMike Galbraith static inline void sched_autogroup_fork(struct signal_struct *sig) { }
20855091faa4SMike Galbraith static inline void sched_autogroup_exit(struct signal_struct *sig) { }
20865091faa4SMike Galbraith #endif
20875091faa4SMike Galbraith 
2088ec12cb7fSPaul Turner #ifdef CONFIG_CFS_BANDWIDTH
2089ec12cb7fSPaul Turner extern unsigned int sysctl_sched_cfs_bandwidth_slice;
2090ec12cb7fSPaul Turner #endif
2091ec12cb7fSPaul Turner 
2092b29739f9SIngo Molnar #ifdef CONFIG_RT_MUTEXES
209336c8b586SIngo Molnar extern int rt_mutex_getprio(struct task_struct *p);
209436c8b586SIngo Molnar extern void rt_mutex_setprio(struct task_struct *p, int prio);
209536c8b586SIngo Molnar extern void rt_mutex_adjust_pi(struct task_struct *p);
20963c7d5184SThomas Gleixner static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
20973c7d5184SThomas Gleixner {
20983c7d5184SThomas Gleixner 	return tsk->pi_blocked_on != NULL;
20993c7d5184SThomas Gleixner }
2100b29739f9SIngo Molnar #else
2101e868171aSAlexey Dobriyan static inline int rt_mutex_getprio(struct task_struct *p)
2102b29739f9SIngo Molnar {
2103b29739f9SIngo Molnar 	return p->normal_prio;
2104b29739f9SIngo Molnar }
210595e02ca9SThomas Gleixner # define rt_mutex_adjust_pi(p)		do { } while (0)
21063c7d5184SThomas Gleixner static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
21073c7d5184SThomas Gleixner {
21083c7d5184SThomas Gleixner 	return false;
21093c7d5184SThomas Gleixner }
2110b29739f9SIngo Molnar #endif
2111b29739f9SIngo Molnar 
2112d95f4122SMike Galbraith extern bool yield_to(struct task_struct *p, bool preempt);
211336c8b586SIngo Molnar extern void set_user_nice(struct task_struct *p, long nice);
211436c8b586SIngo Molnar extern int task_prio(const struct task_struct *p);
211536c8b586SIngo Molnar extern int task_nice(const struct task_struct *p);
211636c8b586SIngo Molnar extern int can_nice(const struct task_struct *p, const int nice);
211736c8b586SIngo Molnar extern int task_curr(const struct task_struct *p);
21181da177e4SLinus Torvalds extern int idle_cpu(int cpu);
2119fe7de49fSKOSAKI Motohiro extern int sched_setscheduler(struct task_struct *, int,
2120fe7de49fSKOSAKI Motohiro 			      const struct sched_param *);
2121961ccdddSRusty Russell extern int sched_setscheduler_nocheck(struct task_struct *, int,
2122fe7de49fSKOSAKI Motohiro 				      const struct sched_param *);
212336c8b586SIngo Molnar extern struct task_struct *idle_task(int cpu);
2124c4f30608SPaul E. McKenney /**
2125c4f30608SPaul E. McKenney  * is_idle_task - is the specified task an idle task?
2126fa757281SRandy Dunlap  * @p: the task in question.
2127c4f30608SPaul E. McKenney  */
21287061ca3bSPaul E. McKenney static inline bool is_idle_task(const struct task_struct *p)
2129c4f30608SPaul E. McKenney {
2130c4f30608SPaul E. McKenney 	return p->pid == 0;
2131c4f30608SPaul E. McKenney }
213236c8b586SIngo Molnar extern struct task_struct *curr_task(int cpu);
213336c8b586SIngo Molnar extern void set_curr_task(int cpu, struct task_struct *p);
21341da177e4SLinus Torvalds 
21351da177e4SLinus Torvalds void yield(void);
21361da177e4SLinus Torvalds 
21371da177e4SLinus Torvalds /*
21381da177e4SLinus Torvalds  * The default (Linux) execution domain.
21391da177e4SLinus Torvalds  */
21401da177e4SLinus Torvalds extern struct exec_domain	default_exec_domain;
21411da177e4SLinus Torvalds 
21421da177e4SLinus Torvalds union thread_union {
21431da177e4SLinus Torvalds 	struct thread_info thread_info;
21441da177e4SLinus Torvalds 	unsigned long stack[THREAD_SIZE/sizeof(long)];
21451da177e4SLinus Torvalds };
21461da177e4SLinus Torvalds 
21471da177e4SLinus Torvalds #ifndef __HAVE_ARCH_KSTACK_END
21481da177e4SLinus Torvalds static inline int kstack_end(void *addr)
21491da177e4SLinus Torvalds {
21501da177e4SLinus Torvalds 	/* Reliable end of stack detection:
21511da177e4SLinus Torvalds 	 * Some APM bios versions misalign the stack
21521da177e4SLinus Torvalds 	 */
21531da177e4SLinus Torvalds 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
21541da177e4SLinus Torvalds }
21551da177e4SLinus Torvalds #endif
21561da177e4SLinus Torvalds 
21571da177e4SLinus Torvalds extern union thread_union init_thread_union;
21581da177e4SLinus Torvalds extern struct task_struct init_task;
21591da177e4SLinus Torvalds 
21601da177e4SLinus Torvalds extern struct   mm_struct init_mm;
21611da177e4SLinus Torvalds 
2162198fe21bSPavel Emelyanov extern struct pid_namespace init_pid_ns;
2163198fe21bSPavel Emelyanov 
2164198fe21bSPavel Emelyanov /*
2165198fe21bSPavel Emelyanov  * find a task by one of its numerical ids
2166198fe21bSPavel Emelyanov  *
2167198fe21bSPavel Emelyanov  * find_task_by_pid_ns():
2168198fe21bSPavel Emelyanov  *      finds a task by its pid in the specified namespace
2169228ebcbeSPavel Emelyanov  * find_task_by_vpid():
2170228ebcbeSPavel Emelyanov  *      finds a task by its virtual pid
2171198fe21bSPavel Emelyanov  *
2172e49859e7SPavel Emelyanov  * see also find_vpid() etc in include/linux/pid.h
2173198fe21bSPavel Emelyanov  */
2174198fe21bSPavel Emelyanov 
2175228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_vpid(pid_t nr);
2176228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2177228ebcbeSPavel Emelyanov 		struct pid_namespace *ns);
2178198fe21bSPavel Emelyanov 
21798520d7c7SOleg Nesterov extern void __set_special_pids(struct pid *pid);
21801da177e4SLinus Torvalds 
21811da177e4SLinus Torvalds /* per-UID process charging. */
2182acce292cSCedric Le Goater extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
21831da177e4SLinus Torvalds static inline struct user_struct *get_uid(struct user_struct *u)
21841da177e4SLinus Torvalds {
21851da177e4SLinus Torvalds 	atomic_inc(&u->__count);
21861da177e4SLinus Torvalds 	return u;
21871da177e4SLinus Torvalds }
21881da177e4SLinus Torvalds extern void free_uid(struct user_struct *);
218928f300d2SPavel Emelyanov extern void release_uids(struct user_namespace *ns);
21901da177e4SLinus Torvalds 
21911da177e4SLinus Torvalds #include <asm/current.h>
21921da177e4SLinus Torvalds 
2193f0af911aSTorben Hohn extern void xtime_update(unsigned long ticks);
21941da177e4SLinus Torvalds 
2195b3c97528SHarvey Harrison extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2196b3c97528SHarvey Harrison extern int wake_up_process(struct task_struct *tsk);
21973e51e3edSSamir Bellabes extern void wake_up_new_task(struct task_struct *tsk);
21981da177e4SLinus Torvalds #ifdef CONFIG_SMP
21991da177e4SLinus Torvalds  extern void kick_process(struct task_struct *tsk);
22001da177e4SLinus Torvalds #else
22011da177e4SLinus Torvalds  static inline void kick_process(struct task_struct *tsk) { }
22021da177e4SLinus Torvalds #endif
22033e51e3edSSamir Bellabes extern void sched_fork(struct task_struct *p);
2204ad46c2c4SIngo Molnar extern void sched_dead(struct task_struct *p);
22051da177e4SLinus Torvalds 
22061da177e4SLinus Torvalds extern void proc_caches_init(void);
22071da177e4SLinus Torvalds extern void flush_signals(struct task_struct *);
22083bcac026SDavid Howells extern void __flush_signals(struct task_struct *);
220910ab825bSOleg Nesterov extern void ignore_signals(struct task_struct *);
22101da177e4SLinus Torvalds extern void flush_signal_handlers(struct task_struct *, int force_default);
22111da177e4SLinus Torvalds extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
22121da177e4SLinus Torvalds 
22131da177e4SLinus Torvalds static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
22141da177e4SLinus Torvalds {
22151da177e4SLinus Torvalds 	unsigned long flags;
22161da177e4SLinus Torvalds 	int ret;
22171da177e4SLinus Torvalds 
22181da177e4SLinus Torvalds 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
22191da177e4SLinus Torvalds 	ret = dequeue_signal(tsk, mask, info);
22201da177e4SLinus Torvalds 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
22211da177e4SLinus Torvalds 
22221da177e4SLinus Torvalds 	return ret;
22231da177e4SLinus Torvalds }
22241da177e4SLinus Torvalds 
22251da177e4SLinus Torvalds extern void block_all_signals(int (*notifier)(void *priv), void *priv,
22261da177e4SLinus Torvalds 			      sigset_t *mask);
22271da177e4SLinus Torvalds extern void unblock_all_signals(void);
22281da177e4SLinus Torvalds extern void release_task(struct task_struct * p);
22291da177e4SLinus Torvalds extern int send_sig_info(int, struct siginfo *, struct task_struct *);
22301da177e4SLinus Torvalds extern int force_sigsegv(int, struct task_struct *);
22311da177e4SLinus Torvalds extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2232c4b92fc1SEric W. Biederman extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2233c4b92fc1SEric W. Biederman extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2234d178bc3aSSerge Hallyn extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2235d178bc3aSSerge Hallyn 				const struct cred *, u32);
2236c4b92fc1SEric W. Biederman extern int kill_pgrp(struct pid *pid, int sig, int priv);
2237c4b92fc1SEric W. Biederman extern int kill_pid(struct pid *pid, int sig, int priv);
2238c3de4b38SMatthew Wilcox extern int kill_proc_info(int, struct siginfo *, pid_t);
223986773473SOleg Nesterov extern __must_check bool do_notify_parent(struct task_struct *, int);
2240a7f0765eSOleg Nesterov extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
22411da177e4SLinus Torvalds extern void force_sig(int, struct task_struct *);
22421da177e4SLinus Torvalds extern int send_sig(int, struct task_struct *, int);
224309faef11SOleg Nesterov extern int zap_other_threads(struct task_struct *p);
22441da177e4SLinus Torvalds extern struct sigqueue *sigqueue_alloc(void);
22451da177e4SLinus Torvalds extern void sigqueue_free(struct sigqueue *);
2246ac5c2153SOleg Nesterov extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
22479ac95f2fSOleg Nesterov extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
22481da177e4SLinus Torvalds extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
22491da177e4SLinus Torvalds 
22509ec52099SCedric Le Goater static inline int kill_cad_pid(int sig, int priv)
22519ec52099SCedric Le Goater {
22529ec52099SCedric Le Goater 	return kill_pid(cad_pid, sig, priv);
22539ec52099SCedric Le Goater }
22549ec52099SCedric Le Goater 
22551da177e4SLinus Torvalds /* These can be the second arg to send_sig_info/send_group_sig_info.  */
22561da177e4SLinus Torvalds #define SEND_SIG_NOINFO ((struct siginfo *) 0)
22571da177e4SLinus Torvalds #define SEND_SIG_PRIV	((struct siginfo *) 1)
22581da177e4SLinus Torvalds #define SEND_SIG_FORCED	((struct siginfo *) 2)
22591da177e4SLinus Torvalds 
22602a855dd0SSebastian Andrzej Siewior /*
22612a855dd0SSebastian Andrzej Siewior  * True if we are on the alternate signal stack.
22622a855dd0SSebastian Andrzej Siewior  */
22631da177e4SLinus Torvalds static inline int on_sig_stack(unsigned long sp)
22641da177e4SLinus Torvalds {
22652a855dd0SSebastian Andrzej Siewior #ifdef CONFIG_STACK_GROWSUP
22662a855dd0SSebastian Andrzej Siewior 	return sp >= current->sas_ss_sp &&
22672a855dd0SSebastian Andrzej Siewior 		sp - current->sas_ss_sp < current->sas_ss_size;
22682a855dd0SSebastian Andrzej Siewior #else
22692a855dd0SSebastian Andrzej Siewior 	return sp > current->sas_ss_sp &&
22702a855dd0SSebastian Andrzej Siewior 		sp - current->sas_ss_sp <= current->sas_ss_size;
22712a855dd0SSebastian Andrzej Siewior #endif
22721da177e4SLinus Torvalds }
22731da177e4SLinus Torvalds 
22741da177e4SLinus Torvalds static inline int sas_ss_flags(unsigned long sp)
22751da177e4SLinus Torvalds {
22761da177e4SLinus Torvalds 	return (current->sas_ss_size == 0 ? SS_DISABLE
22771da177e4SLinus Torvalds 		: on_sig_stack(sp) ? SS_ONSTACK : 0);
22781da177e4SLinus Torvalds }
22791da177e4SLinus Torvalds 
22801da177e4SLinus Torvalds /*
22811da177e4SLinus Torvalds  * Routines for handling mm_structs
22821da177e4SLinus Torvalds  */
22831da177e4SLinus Torvalds extern struct mm_struct * mm_alloc(void);
22841da177e4SLinus Torvalds 
22851da177e4SLinus Torvalds /* mmdrop drops the mm and the page tables */
2286b3c97528SHarvey Harrison extern void __mmdrop(struct mm_struct *);
22871da177e4SLinus Torvalds static inline void mmdrop(struct mm_struct * mm)
22881da177e4SLinus Torvalds {
22896fb43d7bSIngo Molnar 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
22901da177e4SLinus Torvalds 		__mmdrop(mm);
22911da177e4SLinus Torvalds }
22921da177e4SLinus Torvalds 
22931da177e4SLinus Torvalds /* mmput gets rid of the mappings and all user-space */
22941da177e4SLinus Torvalds extern void mmput(struct mm_struct *);
22951da177e4SLinus Torvalds /* Grab a reference to a task's mm, if it is not already going away */
22961da177e4SLinus Torvalds extern struct mm_struct *get_task_mm(struct task_struct *task);
22978cdb878dSChristopher Yeoh /*
22988cdb878dSChristopher Yeoh  * Grab a reference to a task's mm, if it is not already going away
22998cdb878dSChristopher Yeoh  * and ptrace_may_access with the mode parameter passed to it
23008cdb878dSChristopher Yeoh  * succeeds.
23018cdb878dSChristopher Yeoh  */
23028cdb878dSChristopher Yeoh extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
23031da177e4SLinus Torvalds /* Remove the current tasks stale references to the old mm_struct */
23041da177e4SLinus Torvalds extern void mm_release(struct task_struct *, struct mm_struct *);
2305402b0862SCarsten Otte /* Allocate a new mm structure and copy contents from tsk->mm */
2306402b0862SCarsten Otte extern struct mm_struct *dup_mm(struct task_struct *tsk);
23071da177e4SLinus Torvalds 
23086f2c55b8SAlexey Dobriyan extern int copy_thread(unsigned long, unsigned long, unsigned long,
23096f2c55b8SAlexey Dobriyan 			struct task_struct *, struct pt_regs *);
23101da177e4SLinus Torvalds extern void flush_thread(void);
23111da177e4SLinus Torvalds extern void exit_thread(void);
23121da177e4SLinus Torvalds 
23131da177e4SLinus Torvalds extern void exit_files(struct task_struct *);
2314a7e5328aSOleg Nesterov extern void __cleanup_sighand(struct sighand_struct *);
2315cbaffba1SOleg Nesterov 
23161da177e4SLinus Torvalds extern void exit_itimers(struct signal_struct *);
2317cbaffba1SOleg Nesterov extern void flush_itimer_signals(void);
23181da177e4SLinus Torvalds 
23199402c95fSJoe Perches extern void do_group_exit(int);
23201da177e4SLinus Torvalds 
23211da177e4SLinus Torvalds extern void daemonize(const char *, ...);
23221da177e4SLinus Torvalds extern int allow_signal(int);
23231da177e4SLinus Torvalds extern int disallow_signal(int);
23241da177e4SLinus Torvalds 
2325d7627467SDavid Howells extern int do_execve(const char *,
2326d7627467SDavid Howells 		     const char __user * const __user *,
2327d7627467SDavid Howells 		     const char __user * const __user *, struct pt_regs *);
23281da177e4SLinus Torvalds extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
232936c8b586SIngo Molnar struct task_struct *fork_idle(int);
23301da177e4SLinus Torvalds 
23311da177e4SLinus Torvalds extern void set_task_comm(struct task_struct *tsk, char *from);
233259714d65SAndrew Morton extern char *get_task_comm(char *to, struct task_struct *tsk);
23331da177e4SLinus Torvalds 
23341da177e4SLinus Torvalds #ifdef CONFIG_SMP
2335317f3941SPeter Zijlstra void scheduler_ipi(void);
233685ba2d86SRoland McGrath extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
23371da177e4SLinus Torvalds #else
2338184748ccSPeter Zijlstra static inline void scheduler_ipi(void) { }
233985ba2d86SRoland McGrath static inline unsigned long wait_task_inactive(struct task_struct *p,
234085ba2d86SRoland McGrath 					       long match_state)
234185ba2d86SRoland McGrath {
234285ba2d86SRoland McGrath 	return 1;
234385ba2d86SRoland McGrath }
23441da177e4SLinus Torvalds #endif
23451da177e4SLinus Torvalds 
234605725f7eSJiri Pirko #define next_task(p) \
234705725f7eSJiri Pirko 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
23481da177e4SLinus Torvalds 
23491da177e4SLinus Torvalds #define for_each_process(p) \
23501da177e4SLinus Torvalds 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
23511da177e4SLinus Torvalds 
23525bb459bbSOleg Nesterov extern bool current_is_single_threaded(void);
2353d84f4f99SDavid Howells 
23541da177e4SLinus Torvalds /*
23551da177e4SLinus Torvalds  * Careful: do_each_thread/while_each_thread is a double loop so
23561da177e4SLinus Torvalds  *          'break' will not work as expected - use goto instead.
23571da177e4SLinus Torvalds  */
23581da177e4SLinus Torvalds #define do_each_thread(g, t) \
23591da177e4SLinus Torvalds 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
23601da177e4SLinus Torvalds 
23611da177e4SLinus Torvalds #define while_each_thread(g, t) \
23621da177e4SLinus Torvalds 	while ((t = next_thread(t)) != g)
23631da177e4SLinus Torvalds 
23647e49827cSOleg Nesterov static inline int get_nr_threads(struct task_struct *tsk)
23657e49827cSOleg Nesterov {
2366b3ac022cSOleg Nesterov 	return tsk->signal->nr_threads;
23677e49827cSOleg Nesterov }
23687e49827cSOleg Nesterov 
2369087806b1SOleg Nesterov static inline bool thread_group_leader(struct task_struct *p)
2370087806b1SOleg Nesterov {
2371087806b1SOleg Nesterov 	return p->exit_signal >= 0;
2372087806b1SOleg Nesterov }
23731da177e4SLinus Torvalds 
23740804ef4bSEric W. Biederman /* Do to the insanities of de_thread it is possible for a process
23750804ef4bSEric W. Biederman  * to have the pid of the thread group leader without actually being
23760804ef4bSEric W. Biederman  * the thread group leader.  For iteration through the pids in proc
23770804ef4bSEric W. Biederman  * all we care about is that we have a task with the appropriate
23780804ef4bSEric W. Biederman  * pid, we don't actually care if we have the right task.
23790804ef4bSEric W. Biederman  */
2380e868171aSAlexey Dobriyan static inline int has_group_leader_pid(struct task_struct *p)
23810804ef4bSEric W. Biederman {
23820804ef4bSEric W. Biederman 	return p->pid == p->tgid;
23830804ef4bSEric W. Biederman }
23840804ef4bSEric W. Biederman 
2385bac0abd6SPavel Emelyanov static inline
2386bac0abd6SPavel Emelyanov int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2387bac0abd6SPavel Emelyanov {
2388bac0abd6SPavel Emelyanov 	return p1->tgid == p2->tgid;
2389bac0abd6SPavel Emelyanov }
2390bac0abd6SPavel Emelyanov 
239136c8b586SIngo Molnar static inline struct task_struct *next_thread(const struct task_struct *p)
239247e65328SOleg Nesterov {
239305725f7eSJiri Pirko 	return list_entry_rcu(p->thread_group.next,
239436c8b586SIngo Molnar 			      struct task_struct, thread_group);
239547e65328SOleg Nesterov }
239647e65328SOleg Nesterov 
2397e868171aSAlexey Dobriyan static inline int thread_group_empty(struct task_struct *p)
23981da177e4SLinus Torvalds {
239947e65328SOleg Nesterov 	return list_empty(&p->thread_group);
24001da177e4SLinus Torvalds }
24011da177e4SLinus Torvalds 
24021da177e4SLinus Torvalds #define delay_group_leader(p) \
24031da177e4SLinus Torvalds 		(thread_group_leader(p) && !thread_group_empty(p))
24041da177e4SLinus Torvalds 
24051da177e4SLinus Torvalds /*
2406260ea101SEric W. Biederman  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
240722e2c507SJens Axboe  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2408ddbcc7e8SPaul Menage  * pins the final release of task.io_context.  Also protects ->cpuset and
2409d68b46feSOleg Nesterov  * ->cgroup.subsys[]. And ->vfork_done.
24101da177e4SLinus Torvalds  *
24111da177e4SLinus Torvalds  * Nests both inside and outside of read_lock(&tasklist_lock).
24121da177e4SLinus Torvalds  * It must not be nested with write_lock_irq(&tasklist_lock),
24131da177e4SLinus Torvalds  * neither inside nor outside.
24141da177e4SLinus Torvalds  */
24151da177e4SLinus Torvalds static inline void task_lock(struct task_struct *p)
24161da177e4SLinus Torvalds {
24171da177e4SLinus Torvalds 	spin_lock(&p->alloc_lock);
24181da177e4SLinus Torvalds }
24191da177e4SLinus Torvalds 
24201da177e4SLinus Torvalds static inline void task_unlock(struct task_struct *p)
24211da177e4SLinus Torvalds {
24221da177e4SLinus Torvalds 	spin_unlock(&p->alloc_lock);
24231da177e4SLinus Torvalds }
24241da177e4SLinus Torvalds 
2425b8ed374eSNamhyung Kim extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2426f63ee72eSOleg Nesterov 							unsigned long *flags);
2427f63ee72eSOleg Nesterov 
24289388dc30SAnton Vorontsov static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
24299388dc30SAnton Vorontsov 						       unsigned long *flags)
24309388dc30SAnton Vorontsov {
24319388dc30SAnton Vorontsov 	struct sighand_struct *ret;
24329388dc30SAnton Vorontsov 
24339388dc30SAnton Vorontsov 	ret = __lock_task_sighand(tsk, flags);
24349388dc30SAnton Vorontsov 	(void)__cond_lock(&tsk->sighand->siglock, ret);
24359388dc30SAnton Vorontsov 	return ret;
24369388dc30SAnton Vorontsov }
2437b8ed374eSNamhyung Kim 
2438f63ee72eSOleg Nesterov static inline void unlock_task_sighand(struct task_struct *tsk,
2439f63ee72eSOleg Nesterov 						unsigned long *flags)
2440f63ee72eSOleg Nesterov {
2441f63ee72eSOleg Nesterov 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2442f63ee72eSOleg Nesterov }
2443f63ee72eSOleg Nesterov 
24444714d1d3SBen Blum #ifdef CONFIG_CGROUPS
2445257058aeSTejun Heo static inline void threadgroup_change_begin(struct task_struct *tsk)
24464714d1d3SBen Blum {
2447257058aeSTejun Heo 	down_read(&tsk->signal->group_rwsem);
24484714d1d3SBen Blum }
2449257058aeSTejun Heo static inline void threadgroup_change_end(struct task_struct *tsk)
24504714d1d3SBen Blum {
2451257058aeSTejun Heo 	up_read(&tsk->signal->group_rwsem);
24524714d1d3SBen Blum }
245377e4ef99STejun Heo 
245477e4ef99STejun Heo /**
245577e4ef99STejun Heo  * threadgroup_lock - lock threadgroup
245677e4ef99STejun Heo  * @tsk: member task of the threadgroup to lock
245777e4ef99STejun Heo  *
245877e4ef99STejun Heo  * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
245977e4ef99STejun Heo  * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
246077e4ef99STejun Heo  * perform exec.  This is useful for cases where the threadgroup needs to
246177e4ef99STejun Heo  * stay stable across blockable operations.
246277e4ef99STejun Heo  *
246377e4ef99STejun Heo  * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
246477e4ef99STejun Heo  * synchronization.  While held, no new task will be added to threadgroup
246577e4ef99STejun Heo  * and no existing live task will have its PF_EXITING set.
246677e4ef99STejun Heo  *
246777e4ef99STejun Heo  * During exec, a task goes and puts its thread group through unusual
246877e4ef99STejun Heo  * changes.  After de-threading, exclusive access is assumed to resources
246977e4ef99STejun Heo  * which are usually shared by tasks in the same group - e.g. sighand may
247077e4ef99STejun Heo  * be replaced with a new one.  Also, the exec'ing task takes over group
247177e4ef99STejun Heo  * leader role including its pid.  Exclude these changes while locked by
247277e4ef99STejun Heo  * grabbing cred_guard_mutex which is used to synchronize exec path.
247377e4ef99STejun Heo  */
2474257058aeSTejun Heo static inline void threadgroup_lock(struct task_struct *tsk)
24754714d1d3SBen Blum {
247677e4ef99STejun Heo 	/*
247777e4ef99STejun Heo 	 * exec uses exit for de-threading nesting group_rwsem inside
247877e4ef99STejun Heo 	 * cred_guard_mutex. Grab cred_guard_mutex first.
247977e4ef99STejun Heo 	 */
248077e4ef99STejun Heo 	mutex_lock(&tsk->signal->cred_guard_mutex);
2481257058aeSTejun Heo 	down_write(&tsk->signal->group_rwsem);
24824714d1d3SBen Blum }
248377e4ef99STejun Heo 
248477e4ef99STejun Heo /**
248577e4ef99STejun Heo  * threadgroup_unlock - unlock threadgroup
248677e4ef99STejun Heo  * @tsk: member task of the threadgroup to unlock
248777e4ef99STejun Heo  *
248877e4ef99STejun Heo  * Reverse threadgroup_lock().
248977e4ef99STejun Heo  */
2490257058aeSTejun Heo static inline void threadgroup_unlock(struct task_struct *tsk)
24914714d1d3SBen Blum {
2492257058aeSTejun Heo 	up_write(&tsk->signal->group_rwsem);
249377e4ef99STejun Heo 	mutex_unlock(&tsk->signal->cred_guard_mutex);
24944714d1d3SBen Blum }
24954714d1d3SBen Blum #else
2496257058aeSTejun Heo static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2497257058aeSTejun Heo static inline void threadgroup_change_end(struct task_struct *tsk) {}
2498257058aeSTejun Heo static inline void threadgroup_lock(struct task_struct *tsk) {}
2499257058aeSTejun Heo static inline void threadgroup_unlock(struct task_struct *tsk) {}
25004714d1d3SBen Blum #endif
25014714d1d3SBen Blum 
2502f037360fSAl Viro #ifndef __HAVE_THREAD_FUNCTIONS
2503f037360fSAl Viro 
2504f7e4217bSRoman Zippel #define task_thread_info(task)	((struct thread_info *)(task)->stack)
2505f7e4217bSRoman Zippel #define task_stack_page(task)	((task)->stack)
2506a1261f54SAl Viro 
250710ebffdeSAl Viro static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
250810ebffdeSAl Viro {
250910ebffdeSAl Viro 	*task_thread_info(p) = *task_thread_info(org);
251010ebffdeSAl Viro 	task_thread_info(p)->task = p;
251110ebffdeSAl Viro }
251210ebffdeSAl Viro 
251310ebffdeSAl Viro static inline unsigned long *end_of_stack(struct task_struct *p)
251410ebffdeSAl Viro {
2515f7e4217bSRoman Zippel 	return (unsigned long *)(task_thread_info(p) + 1);
251610ebffdeSAl Viro }
251710ebffdeSAl Viro 
2518f037360fSAl Viro #endif
2519f037360fSAl Viro 
25208b05c7e6SFUJITA Tomonori static inline int object_is_on_stack(void *obj)
25218b05c7e6SFUJITA Tomonori {
25228b05c7e6SFUJITA Tomonori 	void *stack = task_stack_page(current);
25238b05c7e6SFUJITA Tomonori 
25248b05c7e6SFUJITA Tomonori 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
25258b05c7e6SFUJITA Tomonori }
25268b05c7e6SFUJITA Tomonori 
25278c9843e5SBenjamin Herrenschmidt extern void thread_info_cache_init(void);
25288c9843e5SBenjamin Herrenschmidt 
25297c9f8861SEric Sandeen #ifdef CONFIG_DEBUG_STACK_USAGE
25307c9f8861SEric Sandeen static inline unsigned long stack_not_used(struct task_struct *p)
25317c9f8861SEric Sandeen {
25327c9f8861SEric Sandeen 	unsigned long *n = end_of_stack(p);
25337c9f8861SEric Sandeen 
25347c9f8861SEric Sandeen 	do { 	/* Skip over canary */
25357c9f8861SEric Sandeen 		n++;
25367c9f8861SEric Sandeen 	} while (!*n);
25377c9f8861SEric Sandeen 
25387c9f8861SEric Sandeen 	return (unsigned long)n - (unsigned long)end_of_stack(p);
25397c9f8861SEric Sandeen }
25407c9f8861SEric Sandeen #endif
25417c9f8861SEric Sandeen 
25421da177e4SLinus Torvalds /* set thread flags in other task's structures
25431da177e4SLinus Torvalds  * - see asm/thread_info.h for TIF_xxxx flags available
25441da177e4SLinus Torvalds  */
25451da177e4SLinus Torvalds static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
25461da177e4SLinus Torvalds {
2547a1261f54SAl Viro 	set_ti_thread_flag(task_thread_info(tsk), flag);
25481da177e4SLinus Torvalds }
25491da177e4SLinus Torvalds 
25501da177e4SLinus Torvalds static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
25511da177e4SLinus Torvalds {
2552a1261f54SAl Viro 	clear_ti_thread_flag(task_thread_info(tsk), flag);
25531da177e4SLinus Torvalds }
25541da177e4SLinus Torvalds 
25551da177e4SLinus Torvalds static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
25561da177e4SLinus Torvalds {
2557a1261f54SAl Viro 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
25581da177e4SLinus Torvalds }
25591da177e4SLinus Torvalds 
25601da177e4SLinus Torvalds static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
25611da177e4SLinus Torvalds {
2562a1261f54SAl Viro 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
25631da177e4SLinus Torvalds }
25641da177e4SLinus Torvalds 
25651da177e4SLinus Torvalds static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
25661da177e4SLinus Torvalds {
2567a1261f54SAl Viro 	return test_ti_thread_flag(task_thread_info(tsk), flag);
25681da177e4SLinus Torvalds }
25691da177e4SLinus Torvalds 
25701da177e4SLinus Torvalds static inline void set_tsk_need_resched(struct task_struct *tsk)
25711da177e4SLinus Torvalds {
25721da177e4SLinus Torvalds 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
25731da177e4SLinus Torvalds }
25741da177e4SLinus Torvalds 
25751da177e4SLinus Torvalds static inline void clear_tsk_need_resched(struct task_struct *tsk)
25761da177e4SLinus Torvalds {
25771da177e4SLinus Torvalds 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
25781da177e4SLinus Torvalds }
25791da177e4SLinus Torvalds 
25808ae121acSGregory Haskins static inline int test_tsk_need_resched(struct task_struct *tsk)
25818ae121acSGregory Haskins {
25828ae121acSGregory Haskins 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
25838ae121acSGregory Haskins }
25848ae121acSGregory Haskins 
2585690cc3ffSEric W. Biederman static inline int restart_syscall(void)
2586690cc3ffSEric W. Biederman {
2587690cc3ffSEric W. Biederman 	set_tsk_thread_flag(current, TIF_SIGPENDING);
2588690cc3ffSEric W. Biederman 	return -ERESTARTNOINTR;
2589690cc3ffSEric W. Biederman }
2590690cc3ffSEric W. Biederman 
25911da177e4SLinus Torvalds static inline int signal_pending(struct task_struct *p)
25921da177e4SLinus Torvalds {
25931da177e4SLinus Torvalds 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
25941da177e4SLinus Torvalds }
25951da177e4SLinus Torvalds 
2596d9588725SRoland McGrath static inline int __fatal_signal_pending(struct task_struct *p)
2597d9588725SRoland McGrath {
2598d9588725SRoland McGrath 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2599d9588725SRoland McGrath }
2600f776d12dSMatthew Wilcox 
2601f776d12dSMatthew Wilcox static inline int fatal_signal_pending(struct task_struct *p)
2602f776d12dSMatthew Wilcox {
2603f776d12dSMatthew Wilcox 	return signal_pending(p) && __fatal_signal_pending(p);
2604f776d12dSMatthew Wilcox }
2605f776d12dSMatthew Wilcox 
260616882c1eSOleg Nesterov static inline int signal_pending_state(long state, struct task_struct *p)
260716882c1eSOleg Nesterov {
260816882c1eSOleg Nesterov 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
260916882c1eSOleg Nesterov 		return 0;
261016882c1eSOleg Nesterov 	if (!signal_pending(p))
261116882c1eSOleg Nesterov 		return 0;
261216882c1eSOleg Nesterov 
261316882c1eSOleg Nesterov 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
261416882c1eSOleg Nesterov }
261516882c1eSOleg Nesterov 
26161da177e4SLinus Torvalds static inline int need_resched(void)
26171da177e4SLinus Torvalds {
26189404ef02SLinus Torvalds 	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
26191da177e4SLinus Torvalds }
26201da177e4SLinus Torvalds 
26211da177e4SLinus Torvalds /*
26221da177e4SLinus Torvalds  * cond_resched() and cond_resched_lock(): latency reduction via
26231da177e4SLinus Torvalds  * explicit rescheduling in places that are safe. The return
26241da177e4SLinus Torvalds  * value indicates whether a reschedule was done in fact.
26251da177e4SLinus Torvalds  * cond_resched_lock() will drop the spinlock before scheduling,
26261da177e4SLinus Torvalds  * cond_resched_softirq() will enable bhs before scheduling.
26271da177e4SLinus Torvalds  */
2628c3921ab7SLinus Torvalds extern int _cond_resched(void);
26296f80bd98SFrederic Weisbecker 
2630613afbf8SFrederic Weisbecker #define cond_resched() ({			\
2631613afbf8SFrederic Weisbecker 	__might_sleep(__FILE__, __LINE__, 0);	\
2632613afbf8SFrederic Weisbecker 	_cond_resched();			\
2633613afbf8SFrederic Weisbecker })
26346f80bd98SFrederic Weisbecker 
2635613afbf8SFrederic Weisbecker extern int __cond_resched_lock(spinlock_t *lock);
2636613afbf8SFrederic Weisbecker 
2637bdd4e85dSFrederic Weisbecker #ifdef CONFIG_PREEMPT_COUNT
2638716a4234SFrederic Weisbecker #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
263902b67cc3SHerbert Xu #else
2640716a4234SFrederic Weisbecker #define PREEMPT_LOCK_OFFSET	0
264102b67cc3SHerbert Xu #endif
2642716a4234SFrederic Weisbecker 
2643613afbf8SFrederic Weisbecker #define cond_resched_lock(lock) ({				\
2644716a4234SFrederic Weisbecker 	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2645613afbf8SFrederic Weisbecker 	__cond_resched_lock(lock);				\
2646613afbf8SFrederic Weisbecker })
2647613afbf8SFrederic Weisbecker 
2648613afbf8SFrederic Weisbecker extern int __cond_resched_softirq(void);
2649613afbf8SFrederic Weisbecker 
2650613afbf8SFrederic Weisbecker #define cond_resched_softirq() ({					\
265175e1056fSVenkatesh Pallipadi 	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2652613afbf8SFrederic Weisbecker 	__cond_resched_softirq();					\
2653613afbf8SFrederic Weisbecker })
26541da177e4SLinus Torvalds 
26551da177e4SLinus Torvalds /*
26561da177e4SLinus Torvalds  * Does a critical section need to be broken due to another
265795c354feSNick Piggin  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
265895c354feSNick Piggin  * but a general need for low latency)
26591da177e4SLinus Torvalds  */
266095c354feSNick Piggin static inline int spin_needbreak(spinlock_t *lock)
26611da177e4SLinus Torvalds {
266295c354feSNick Piggin #ifdef CONFIG_PREEMPT
266395c354feSNick Piggin 	return spin_is_contended(lock);
266495c354feSNick Piggin #else
26651da177e4SLinus Torvalds 	return 0;
266695c354feSNick Piggin #endif
26671da177e4SLinus Torvalds }
26681da177e4SLinus Torvalds 
26697bb44adeSRoland McGrath /*
2670f06febc9SFrank Mayhar  * Thread group CPU time accounting.
2671f06febc9SFrank Mayhar  */
26724cd4c1b4SPeter Zijlstra void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
26734da94d49SPeter Zijlstra void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2674f06febc9SFrank Mayhar 
2675f06febc9SFrank Mayhar static inline void thread_group_cputime_init(struct signal_struct *sig)
2676f06febc9SFrank Mayhar {
2677ee30a7b2SThomas Gleixner 	raw_spin_lock_init(&sig->cputimer.lock);
2678f06febc9SFrank Mayhar }
2679f06febc9SFrank Mayhar 
2680f06febc9SFrank Mayhar /*
26817bb44adeSRoland McGrath  * Reevaluate whether the task has signals pending delivery.
26827bb44adeSRoland McGrath  * Wake the task if so.
26837bb44adeSRoland McGrath  * This is required every time the blocked sigset_t changes.
26847bb44adeSRoland McGrath  * callers must hold sighand->siglock.
26857bb44adeSRoland McGrath  */
26867bb44adeSRoland McGrath extern void recalc_sigpending_and_wake(struct task_struct *t);
26871da177e4SLinus Torvalds extern void recalc_sigpending(void);
26881da177e4SLinus Torvalds 
26891da177e4SLinus Torvalds extern void signal_wake_up(struct task_struct *t, int resume_stopped);
26901da177e4SLinus Torvalds 
26911da177e4SLinus Torvalds /*
26921da177e4SLinus Torvalds  * Wrappers for p->thread_info->cpu access. No-op on UP.
26931da177e4SLinus Torvalds  */
26941da177e4SLinus Torvalds #ifdef CONFIG_SMP
26951da177e4SLinus Torvalds 
26961da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
26971da177e4SLinus Torvalds {
2698a1261f54SAl Viro 	return task_thread_info(p)->cpu;
26991da177e4SLinus Torvalds }
27001da177e4SLinus Torvalds 
2701c65cc870SIngo Molnar extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
27021da177e4SLinus Torvalds 
27031da177e4SLinus Torvalds #else
27041da177e4SLinus Torvalds 
27051da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
27061da177e4SLinus Torvalds {
27071da177e4SLinus Torvalds 	return 0;
27081da177e4SLinus Torvalds }
27091da177e4SLinus Torvalds 
27101da177e4SLinus Torvalds static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
27111da177e4SLinus Torvalds {
27121da177e4SLinus Torvalds }
27131da177e4SLinus Torvalds 
27141da177e4SLinus Torvalds #endif /* CONFIG_SMP */
27151da177e4SLinus Torvalds 
271696f874e2SRusty Russell extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
271796f874e2SRusty Russell extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
27185c45bf27SSiddha, Suresh B 
27191da177e4SLinus Torvalds extern void normalize_rt_tasks(void);
27201da177e4SLinus Torvalds 
27217c941438SDhaval Giani #ifdef CONFIG_CGROUP_SCHED
27229b5b7751SSrivatsa Vaddagiri 
272307e06b01SYong Zhang extern struct task_group root_task_group;
27249b5b7751SSrivatsa Vaddagiri 
2725ec7dc8acSDhaval Giani extern struct task_group *sched_create_group(struct task_group *parent);
27264cf86d77SIngo Molnar extern void sched_destroy_group(struct task_group *tg);
27279b5b7751SSrivatsa Vaddagiri extern void sched_move_task(struct task_struct *tsk);
2728052f1dc7SPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
27294cf86d77SIngo Molnar extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
27305cb350baSDhaval Giani extern unsigned long sched_group_shares(struct task_group *tg);
2731052f1dc7SPeter Zijlstra #endif
2732052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
27339f0c1e56SPeter Zijlstra extern int sched_group_set_rt_runtime(struct task_group *tg,
27349f0c1e56SPeter Zijlstra 				      long rt_runtime_us);
27359f0c1e56SPeter Zijlstra extern long sched_group_rt_runtime(struct task_group *tg);
2736d0b27fa7SPeter Zijlstra extern int sched_group_set_rt_period(struct task_group *tg,
2737d0b27fa7SPeter Zijlstra 				      long rt_period_us);
2738d0b27fa7SPeter Zijlstra extern long sched_group_rt_period(struct task_group *tg);
273954e99124SDhaval Giani extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2740052f1dc7SPeter Zijlstra #endif
27419b5b7751SSrivatsa Vaddagiri #endif
27429b5b7751SSrivatsa Vaddagiri 
274354e99124SDhaval Giani extern int task_can_switch_user(struct user_struct *up,
274454e99124SDhaval Giani 					struct task_struct *tsk);
274554e99124SDhaval Giani 
27464b98d11bSAlexey Dobriyan #ifdef CONFIG_TASK_XACCT
27474b98d11bSAlexey Dobriyan static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
27484b98d11bSAlexey Dobriyan {
2749940389b8SAndrea Righi 	tsk->ioac.rchar += amt;
27504b98d11bSAlexey Dobriyan }
27514b98d11bSAlexey Dobriyan 
27524b98d11bSAlexey Dobriyan static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
27534b98d11bSAlexey Dobriyan {
2754940389b8SAndrea Righi 	tsk->ioac.wchar += amt;
27554b98d11bSAlexey Dobriyan }
27564b98d11bSAlexey Dobriyan 
27574b98d11bSAlexey Dobriyan static inline void inc_syscr(struct task_struct *tsk)
27584b98d11bSAlexey Dobriyan {
2759940389b8SAndrea Righi 	tsk->ioac.syscr++;
27604b98d11bSAlexey Dobriyan }
27614b98d11bSAlexey Dobriyan 
27624b98d11bSAlexey Dobriyan static inline void inc_syscw(struct task_struct *tsk)
27634b98d11bSAlexey Dobriyan {
2764940389b8SAndrea Righi 	tsk->ioac.syscw++;
27654b98d11bSAlexey Dobriyan }
27664b98d11bSAlexey Dobriyan #else
27674b98d11bSAlexey Dobriyan static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
27684b98d11bSAlexey Dobriyan {
27694b98d11bSAlexey Dobriyan }
27704b98d11bSAlexey Dobriyan 
27714b98d11bSAlexey Dobriyan static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
27724b98d11bSAlexey Dobriyan {
27734b98d11bSAlexey Dobriyan }
27744b98d11bSAlexey Dobriyan 
27754b98d11bSAlexey Dobriyan static inline void inc_syscr(struct task_struct *tsk)
27764b98d11bSAlexey Dobriyan {
27774b98d11bSAlexey Dobriyan }
27784b98d11bSAlexey Dobriyan 
27794b98d11bSAlexey Dobriyan static inline void inc_syscw(struct task_struct *tsk)
27804b98d11bSAlexey Dobriyan {
27814b98d11bSAlexey Dobriyan }
27824b98d11bSAlexey Dobriyan #endif
27834b98d11bSAlexey Dobriyan 
278482455257SDave Hansen #ifndef TASK_SIZE_OF
278582455257SDave Hansen #define TASK_SIZE_OF(tsk)	TASK_SIZE
278682455257SDave Hansen #endif
278782455257SDave Hansen 
2788cf475ad2SBalbir Singh #ifdef CONFIG_MM_OWNER
2789cf475ad2SBalbir Singh extern void mm_update_next_owner(struct mm_struct *mm);
2790cf475ad2SBalbir Singh extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2791cf475ad2SBalbir Singh #else
2792cf475ad2SBalbir Singh static inline void mm_update_next_owner(struct mm_struct *mm)
2793cf475ad2SBalbir Singh {
2794cf475ad2SBalbir Singh }
2795cf475ad2SBalbir Singh 
2796cf475ad2SBalbir Singh static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2797cf475ad2SBalbir Singh {
2798cf475ad2SBalbir Singh }
2799cf475ad2SBalbir Singh #endif /* CONFIG_MM_OWNER */
2800cf475ad2SBalbir Singh 
28013e10e716SJiri Slaby static inline unsigned long task_rlimit(const struct task_struct *tsk,
28023e10e716SJiri Slaby 		unsigned int limit)
28033e10e716SJiri Slaby {
28043e10e716SJiri Slaby 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
28053e10e716SJiri Slaby }
28063e10e716SJiri Slaby 
28073e10e716SJiri Slaby static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
28083e10e716SJiri Slaby 		unsigned int limit)
28093e10e716SJiri Slaby {
28103e10e716SJiri Slaby 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
28113e10e716SJiri Slaby }
28123e10e716SJiri Slaby 
28133e10e716SJiri Slaby static inline unsigned long rlimit(unsigned int limit)
28143e10e716SJiri Slaby {
28153e10e716SJiri Slaby 	return task_rlimit(current, limit);
28163e10e716SJiri Slaby }
28173e10e716SJiri Slaby 
28183e10e716SJiri Slaby static inline unsigned long rlimit_max(unsigned int limit)
28193e10e716SJiri Slaby {
28203e10e716SJiri Slaby 	return task_rlimit_max(current, limit);
28213e10e716SJiri Slaby }
28223e10e716SJiri Slaby 
28231da177e4SLinus Torvalds #endif /* __KERNEL__ */
28241da177e4SLinus Torvalds 
28251da177e4SLinus Torvalds #endif
2826