xref: /linux/include/linux/sched.h (revision ee761f629d598579594d7e1eb8c552f3c5f71e4d)
11da177e4SLinus Torvalds #ifndef _LINUX_SCHED_H
21da177e4SLinus Torvalds #define _LINUX_SCHED_H
31da177e4SLinus Torvalds 
4607ca46eSDavid Howells #include <uapi/linux/sched.h>
5b7b3c76aSDavid Woodhouse 
6b7b3c76aSDavid Woodhouse 
7b7b3c76aSDavid Woodhouse struct sched_param {
8b7b3c76aSDavid Woodhouse 	int sched_priority;
9b7b3c76aSDavid Woodhouse };
10b7b3c76aSDavid Woodhouse 
111da177e4SLinus Torvalds #include <asm/param.h>	/* for HZ */
121da177e4SLinus Torvalds 
131da177e4SLinus Torvalds #include <linux/capability.h>
141da177e4SLinus Torvalds #include <linux/threads.h>
151da177e4SLinus Torvalds #include <linux/kernel.h>
161da177e4SLinus Torvalds #include <linux/types.h>
171da177e4SLinus Torvalds #include <linux/timex.h>
181da177e4SLinus Torvalds #include <linux/jiffies.h>
191da177e4SLinus Torvalds #include <linux/rbtree.h>
201da177e4SLinus Torvalds #include <linux/thread_info.h>
211da177e4SLinus Torvalds #include <linux/cpumask.h>
221da177e4SLinus Torvalds #include <linux/errno.h>
231da177e4SLinus Torvalds #include <linux/nodemask.h>
24c92ff1bdSMartin Schwidefsky #include <linux/mm_types.h>
251da177e4SLinus Torvalds 
261da177e4SLinus Torvalds #include <asm/page.h>
271da177e4SLinus Torvalds #include <asm/ptrace.h>
281da177e4SLinus Torvalds #include <asm/cputime.h>
291da177e4SLinus Torvalds 
301da177e4SLinus Torvalds #include <linux/smp.h>
311da177e4SLinus Torvalds #include <linux/sem.h>
321da177e4SLinus Torvalds #include <linux/signal.h>
331da177e4SLinus Torvalds #include <linux/compiler.h>
341da177e4SLinus Torvalds #include <linux/completion.h>
351da177e4SLinus Torvalds #include <linux/pid.h>
361da177e4SLinus Torvalds #include <linux/percpu.h>
371da177e4SLinus Torvalds #include <linux/topology.h>
383e26c149SPeter Zijlstra #include <linux/proportions.h>
391da177e4SLinus Torvalds #include <linux/seccomp.h>
40e56d0903SIngo Molnar #include <linux/rcupdate.h>
4105725f7eSJiri Pirko #include <linux/rculist.h>
4223f78d4aSIngo Molnar #include <linux/rtmutex.h>
431da177e4SLinus Torvalds 
44a3b6714eSDavid Woodhouse #include <linux/time.h>
45a3b6714eSDavid Woodhouse #include <linux/param.h>
46a3b6714eSDavid Woodhouse #include <linux/resource.h>
47a3b6714eSDavid Woodhouse #include <linux/timer.h>
48a3b6714eSDavid Woodhouse #include <linux/hrtimer.h>
497c3ab738SAndrew Morton #include <linux/task_io_accounting.h>
509745512cSArjan van de Ven #include <linux/latencytop.h>
519e2b2dc4SDavid Howells #include <linux/cred.h>
52fa14ff4aSPeter Zijlstra #include <linux/llist.h>
537b44ab97SEric W. Biederman #include <linux/uidgid.h>
5421caf2fcSMing Lei #include <linux/gfp.h>
55a3b6714eSDavid Woodhouse 
56a3b6714eSDavid Woodhouse #include <asm/processor.h>
5736d57ac4SH. J. Lu 
581da177e4SLinus Torvalds struct exec_domain;
59c87e2837SIngo Molnar struct futex_pi_state;
60286100a6SAlexey Dobriyan struct robust_list_head;
61bddd87c7SAkinobu Mita struct bio_list;
625ad4e53bSAl Viro struct fs_struct;
63cdd6c482SIngo Molnar struct perf_event_context;
6473c10101SJens Axboe struct blk_plug;
651da177e4SLinus Torvalds 
661da177e4SLinus Torvalds /*
671da177e4SLinus Torvalds  * List of flags we want to share for kernel threads,
681da177e4SLinus Torvalds  * if only because they are not used by them anyway.
691da177e4SLinus Torvalds  */
701da177e4SLinus Torvalds #define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
711da177e4SLinus Torvalds 
721da177e4SLinus Torvalds /*
731da177e4SLinus Torvalds  * These are the constant used to fake the fixed-point load-average
741da177e4SLinus Torvalds  * counting. Some notes:
751da177e4SLinus Torvalds  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
761da177e4SLinus Torvalds  *    a load-average precision of 10 bits integer + 11 bits fractional
771da177e4SLinus Torvalds  *  - if you want to count load-averages more often, you need more
781da177e4SLinus Torvalds  *    precision, or rounding will get you. With 2-second counting freq,
791da177e4SLinus Torvalds  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
801da177e4SLinus Torvalds  *    11 bit fractions.
811da177e4SLinus Torvalds  */
821da177e4SLinus Torvalds extern unsigned long avenrun[];		/* Load averages */
832d02494fSThomas Gleixner extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
841da177e4SLinus Torvalds 
851da177e4SLinus Torvalds #define FSHIFT		11		/* nr of bits of precision */
861da177e4SLinus Torvalds #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
870c2043abSLinus Torvalds #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
881da177e4SLinus Torvalds #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
891da177e4SLinus Torvalds #define EXP_5		2014		/* 1/exp(5sec/5min) */
901da177e4SLinus Torvalds #define EXP_15		2037		/* 1/exp(5sec/15min) */
911da177e4SLinus Torvalds 
921da177e4SLinus Torvalds #define CALC_LOAD(load,exp,n) \
931da177e4SLinus Torvalds 	load *= exp; \
941da177e4SLinus Torvalds 	load += n*(FIXED_1-exp); \
951da177e4SLinus Torvalds 	load >>= FSHIFT;
961da177e4SLinus Torvalds 
971da177e4SLinus Torvalds extern unsigned long total_forks;
981da177e4SLinus Torvalds extern int nr_threads;
991da177e4SLinus Torvalds DECLARE_PER_CPU(unsigned long, process_counts);
1001da177e4SLinus Torvalds extern int nr_processes(void);
1011da177e4SLinus Torvalds extern unsigned long nr_running(void);
1021da177e4SLinus Torvalds extern unsigned long nr_iowait(void);
1038c215bd3SPeter Zijlstra extern unsigned long nr_iowait_cpu(int cpu);
10469d25870SArjan van de Ven extern unsigned long this_cpu_load(void);
10569d25870SArjan van de Ven 
10669d25870SArjan van de Ven 
1070f004f5aSPeter Zijlstra extern void calc_global_load(unsigned long ticks);
1085aaa0b7aSPeter Zijlstra extern void update_cpu_load_nohz(void);
1091da177e4SLinus Torvalds 
110582b336eSMarcelo Tosatti /* Notifier for when a task gets migrated to a new CPU */
111582b336eSMarcelo Tosatti struct task_migration_notifier {
112582b336eSMarcelo Tosatti 	struct task_struct *task;
113582b336eSMarcelo Tosatti 	int from_cpu;
114582b336eSMarcelo Tosatti 	int to_cpu;
115582b336eSMarcelo Tosatti };
116582b336eSMarcelo Tosatti extern void register_task_migration_notifier(struct notifier_block *n);
117582b336eSMarcelo Tosatti 
1187e49fcceSSteven Rostedt extern unsigned long get_parent_ip(unsigned long addr);
1197e49fcceSSteven Rostedt 
120b637a328SPaul E. McKenney extern void dump_cpu_task(int cpu);
121b637a328SPaul E. McKenney 
12243ae34cbSIngo Molnar struct seq_file;
12343ae34cbSIngo Molnar struct cfs_rq;
1244cf86d77SIngo Molnar struct task_group;
12543ae34cbSIngo Molnar #ifdef CONFIG_SCHED_DEBUG
12643ae34cbSIngo Molnar extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
12743ae34cbSIngo Molnar extern void proc_sched_set_task(struct task_struct *p);
12843ae34cbSIngo Molnar extern void
1295cef9ecaSIngo Molnar print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
13043ae34cbSIngo Molnar #else
13143ae34cbSIngo Molnar static inline void
13243ae34cbSIngo Molnar proc_sched_show_task(struct task_struct *p, struct seq_file *m)
13343ae34cbSIngo Molnar {
13443ae34cbSIngo Molnar }
13543ae34cbSIngo Molnar static inline void proc_sched_set_task(struct task_struct *p)
13643ae34cbSIngo Molnar {
13743ae34cbSIngo Molnar }
13843ae34cbSIngo Molnar static inline void
1395cef9ecaSIngo Molnar print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
14043ae34cbSIngo Molnar {
14143ae34cbSIngo Molnar }
14243ae34cbSIngo Molnar #endif
1431da177e4SLinus Torvalds 
1444a8342d2SLinus Torvalds /*
1454a8342d2SLinus Torvalds  * Task state bitmask. NOTE! These bits are also
1464a8342d2SLinus Torvalds  * encoded in fs/proc/array.c: get_task_state().
1474a8342d2SLinus Torvalds  *
1484a8342d2SLinus Torvalds  * We have two separate sets of flags: task->state
1494a8342d2SLinus Torvalds  * is about runnability, while task->exit_state are
1504a8342d2SLinus Torvalds  * about the task exiting. Confusing, but this way
1514a8342d2SLinus Torvalds  * modifying one set can't modify the other one by
1524a8342d2SLinus Torvalds  * mistake.
1534a8342d2SLinus Torvalds  */
1541da177e4SLinus Torvalds #define TASK_RUNNING		0
1551da177e4SLinus Torvalds #define TASK_INTERRUPTIBLE	1
1561da177e4SLinus Torvalds #define TASK_UNINTERRUPTIBLE	2
157f021a3c2SMatthew Wilcox #define __TASK_STOPPED		4
158f021a3c2SMatthew Wilcox #define __TASK_TRACED		8
1594a8342d2SLinus Torvalds /* in tsk->exit_state */
1604a8342d2SLinus Torvalds #define EXIT_ZOMBIE		16
1614a8342d2SLinus Torvalds #define EXIT_DEAD		32
1624a8342d2SLinus Torvalds /* in tsk->state again */
163af927232SMike Galbraith #define TASK_DEAD		64
164f021a3c2SMatthew Wilcox #define TASK_WAKEKILL		128
165e9c84311SPeter Zijlstra #define TASK_WAKING		256
166e1781538SPeter Zijlstra #define TASK_STATE_MAX		512
167f021a3c2SMatthew Wilcox 
16844d90df6SPeter Zijlstra #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
16973342151SPeter Zijlstra 
170e1781538SPeter Zijlstra extern char ___assert_task_state[1 - 2*!!(
171e1781538SPeter Zijlstra 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
172f021a3c2SMatthew Wilcox 
173f021a3c2SMatthew Wilcox /* Convenience macros for the sake of set_task_state */
174f021a3c2SMatthew Wilcox #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
175f021a3c2SMatthew Wilcox #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
176f021a3c2SMatthew Wilcox #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
1771da177e4SLinus Torvalds 
17892a1f4bcSMatthew Wilcox /* Convenience macros for the sake of wake_up */
17992a1f4bcSMatthew Wilcox #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
180f021a3c2SMatthew Wilcox #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
18192a1f4bcSMatthew Wilcox 
18292a1f4bcSMatthew Wilcox /* get_task_state() */
18392a1f4bcSMatthew Wilcox #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
184f021a3c2SMatthew Wilcox 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
185f021a3c2SMatthew Wilcox 				 __TASK_TRACED)
18692a1f4bcSMatthew Wilcox 
187f021a3c2SMatthew Wilcox #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
188f021a3c2SMatthew Wilcox #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
1898f92054eSDavid Howells #define task_is_dead(task)	((task)->exit_state != 0)
19092a1f4bcSMatthew Wilcox #define task_is_stopped_or_traced(task)	\
191f021a3c2SMatthew Wilcox 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
19292a1f4bcSMatthew Wilcox #define task_contributes_to_load(task)	\
193e3c8ca83SNathan Lynch 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
194376fede8STejun Heo 				 (task->flags & PF_FROZEN) == 0)
1951da177e4SLinus Torvalds 
1961da177e4SLinus Torvalds #define __set_task_state(tsk, state_value)		\
1971da177e4SLinus Torvalds 	do { (tsk)->state = (state_value); } while (0)
1981da177e4SLinus Torvalds #define set_task_state(tsk, state_value)		\
1991da177e4SLinus Torvalds 	set_mb((tsk)->state, (state_value))
2001da177e4SLinus Torvalds 
201498d0c57SAndrew Morton /*
202498d0c57SAndrew Morton  * set_current_state() includes a barrier so that the write of current->state
203498d0c57SAndrew Morton  * is correctly serialised wrt the caller's subsequent test of whether to
204498d0c57SAndrew Morton  * actually sleep:
205498d0c57SAndrew Morton  *
206498d0c57SAndrew Morton  *	set_current_state(TASK_UNINTERRUPTIBLE);
207498d0c57SAndrew Morton  *	if (do_i_need_to_sleep())
208498d0c57SAndrew Morton  *		schedule();
209498d0c57SAndrew Morton  *
210498d0c57SAndrew Morton  * If the caller does not need such serialisation then use __set_current_state()
211498d0c57SAndrew Morton  */
2121da177e4SLinus Torvalds #define __set_current_state(state_value)			\
2131da177e4SLinus Torvalds 	do { current->state = (state_value); } while (0)
2141da177e4SLinus Torvalds #define set_current_state(state_value)		\
2151da177e4SLinus Torvalds 	set_mb(current->state, (state_value))
2161da177e4SLinus Torvalds 
2171da177e4SLinus Torvalds /* Task command name length */
2181da177e4SLinus Torvalds #define TASK_COMM_LEN 16
2191da177e4SLinus Torvalds 
2201da177e4SLinus Torvalds #include <linux/spinlock.h>
2211da177e4SLinus Torvalds 
2221da177e4SLinus Torvalds /*
2231da177e4SLinus Torvalds  * This serializes "schedule()" and also protects
2241da177e4SLinus Torvalds  * the run-queue from deletions/modifications (but
2251da177e4SLinus Torvalds  * _adding_ to the beginning of the run-queue has
2261da177e4SLinus Torvalds  * a separate lock).
2271da177e4SLinus Torvalds  */
2281da177e4SLinus Torvalds extern rwlock_t tasklist_lock;
2291da177e4SLinus Torvalds extern spinlock_t mmlist_lock;
2301da177e4SLinus Torvalds 
23136c8b586SIngo Molnar struct task_struct;
2321da177e4SLinus Torvalds 
233db1466b3SPaul E. McKenney #ifdef CONFIG_PROVE_RCU
234db1466b3SPaul E. McKenney extern int lockdep_tasklist_lock_is_held(void);
235db1466b3SPaul E. McKenney #endif /* #ifdef CONFIG_PROVE_RCU */
236db1466b3SPaul E. McKenney 
2371da177e4SLinus Torvalds extern void sched_init(void);
2381da177e4SLinus Torvalds extern void sched_init_smp(void);
2392d07b255SHarvey Harrison extern asmlinkage void schedule_tail(struct task_struct *prev);
24036c8b586SIngo Molnar extern void init_idle(struct task_struct *idle, int cpu);
2411df21055SIngo Molnar extern void init_idle_bootup_task(struct task_struct *idle);
2421da177e4SLinus Torvalds 
24389f19f04SAndrew Morton extern int runqueue_is_locked(int cpu);
244017730c1SIngo Molnar 
24546cb4b7cSSiddha, Suresh B #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
246c1cc017cSAlex Shi extern void nohz_balance_enter_idle(int cpu);
24769e1e811SSuresh Siddha extern void set_cpu_sd_state_idle(void);
24883cd4fe2SVenkatesh Pallipadi extern int get_nohz_timer_target(void);
24946cb4b7cSSiddha, Suresh B #else
250c1cc017cSAlex Shi static inline void nohz_balance_enter_idle(int cpu) { }
251fdaabd80SPeter Zijlstra static inline void set_cpu_sd_state_idle(void) { }
25246cb4b7cSSiddha, Suresh B #endif
2531da177e4SLinus Torvalds 
254e59e2ae2SIngo Molnar /*
25539bc89fdSIngo Molnar  * Only dump TASK_* tasks. (0 for all tasks)
256e59e2ae2SIngo Molnar  */
257e59e2ae2SIngo Molnar extern void show_state_filter(unsigned long state_filter);
258e59e2ae2SIngo Molnar 
259e59e2ae2SIngo Molnar static inline void show_state(void)
260e59e2ae2SIngo Molnar {
26139bc89fdSIngo Molnar 	show_state_filter(0);
262e59e2ae2SIngo Molnar }
263e59e2ae2SIngo Molnar 
2641da177e4SLinus Torvalds extern void show_regs(struct pt_regs *);
2651da177e4SLinus Torvalds 
2661da177e4SLinus Torvalds /*
2671da177e4SLinus Torvalds  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
2681da177e4SLinus Torvalds  * task), SP is the stack pointer of the first frame that should be shown in the back
2691da177e4SLinus Torvalds  * trace (or NULL if the entire call-chain of the task should be shown).
2701da177e4SLinus Torvalds  */
2711da177e4SLinus Torvalds extern void show_stack(struct task_struct *task, unsigned long *sp);
2721da177e4SLinus Torvalds 
2731da177e4SLinus Torvalds void io_schedule(void);
2741da177e4SLinus Torvalds long io_schedule_timeout(long timeout);
2751da177e4SLinus Torvalds 
2761da177e4SLinus Torvalds extern void cpu_init (void);
2771da177e4SLinus Torvalds extern void trap_init(void);
2781da177e4SLinus Torvalds extern void update_process_times(int user);
2791da177e4SLinus Torvalds extern void scheduler_tick(void);
2801da177e4SLinus Torvalds 
28182a1fcb9SIngo Molnar extern void sched_show_task(struct task_struct *p);
28282a1fcb9SIngo Molnar 
28319cc36c0SFrederic Weisbecker #ifdef CONFIG_LOCKUP_DETECTOR
2848446f1d3SIngo Molnar extern void touch_softlockup_watchdog(void);
285d6ad3e28SJason Wessel extern void touch_softlockup_watchdog_sync(void);
28604c9167fSJeremy Fitzhardinge extern void touch_all_softlockup_watchdogs(void);
287332fbdbcSDon Zickus extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
2888d65af78SAlexey Dobriyan 				  void __user *buffer,
289baf48f65SMandeep Singh Baines 				  size_t *lenp, loff_t *ppos);
2909c44bc03SIngo Molnar extern unsigned int  softlockup_panic;
291004417a6SPeter Zijlstra void lockup_detector_init(void);
2928446f1d3SIngo Molnar #else
2938446f1d3SIngo Molnar static inline void touch_softlockup_watchdog(void)
2948446f1d3SIngo Molnar {
2958446f1d3SIngo Molnar }
296d6ad3e28SJason Wessel static inline void touch_softlockup_watchdog_sync(void)
297d6ad3e28SJason Wessel {
298d6ad3e28SJason Wessel }
29904c9167fSJeremy Fitzhardinge static inline void touch_all_softlockup_watchdogs(void)
30004c9167fSJeremy Fitzhardinge {
30104c9167fSJeremy Fitzhardinge }
302004417a6SPeter Zijlstra static inline void lockup_detector_init(void)
303004417a6SPeter Zijlstra {
304004417a6SPeter Zijlstra }
3058446f1d3SIngo Molnar #endif
3068446f1d3SIngo Molnar 
3071da177e4SLinus Torvalds /* Attach to any functions which should be ignored in wchan output. */
3081da177e4SLinus Torvalds #define __sched		__attribute__((__section__(".sched.text")))
309deaf2227SIngo Molnar 
310deaf2227SIngo Molnar /* Linker adds these: start and end of __sched functions */
311deaf2227SIngo Molnar extern char __sched_text_start[], __sched_text_end[];
312deaf2227SIngo Molnar 
3131da177e4SLinus Torvalds /* Is this address in the __sched functions? */
3141da177e4SLinus Torvalds extern int in_sched_functions(unsigned long addr);
3151da177e4SLinus Torvalds 
3161da177e4SLinus Torvalds #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
317b3c97528SHarvey Harrison extern signed long schedule_timeout(signed long timeout);
31864ed93a2SNishanth Aravamudan extern signed long schedule_timeout_interruptible(signed long timeout);
319294d5cc2SMatthew Wilcox extern signed long schedule_timeout_killable(signed long timeout);
32064ed93a2SNishanth Aravamudan extern signed long schedule_timeout_uninterruptible(signed long timeout);
3211da177e4SLinus Torvalds asmlinkage void schedule(void);
322c5491ea7SThomas Gleixner extern void schedule_preempt_disabled(void);
323c6eb3ddaSPeter Zijlstra extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
3241da177e4SLinus Torvalds 
325ab516013SSerge E. Hallyn struct nsproxy;
326acce292cSCedric Le Goater struct user_namespace;
3271da177e4SLinus Torvalds 
3281da177e4SLinus Torvalds #include <linux/aio.h>
3291da177e4SLinus Torvalds 
330efc1a3b1SDavid Howells #ifdef CONFIG_MMU
331efc1a3b1SDavid Howells extern void arch_pick_mmap_layout(struct mm_struct *mm);
3321da177e4SLinus Torvalds extern unsigned long
3331da177e4SLinus Torvalds arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
3341da177e4SLinus Torvalds 		       unsigned long, unsigned long);
3351da177e4SLinus Torvalds extern unsigned long
3361da177e4SLinus Torvalds arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
3371da177e4SLinus Torvalds 			  unsigned long len, unsigned long pgoff,
3381da177e4SLinus Torvalds 			  unsigned long flags);
3391363c3cdSWolfgang Wander extern void arch_unmap_area(struct mm_struct *, unsigned long);
3401363c3cdSWolfgang Wander extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
341efc1a3b1SDavid Howells #else
342efc1a3b1SDavid Howells static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
343efc1a3b1SDavid Howells #endif
3441da177e4SLinus Torvalds 
345901608d9SOleg Nesterov 
3466c5d5238SKawai, Hidehiro extern void set_dumpable(struct mm_struct *mm, int value);
3476c5d5238SKawai, Hidehiro extern int get_dumpable(struct mm_struct *mm);
3486c5d5238SKawai, Hidehiro 
3496c5d5238SKawai, Hidehiro /* mm flags */
3503cb4a0bbSKawai, Hidehiro /* dumpable bits */
3516c5d5238SKawai, Hidehiro #define MMF_DUMPABLE      0  /* core dump is permitted */
3526c5d5238SKawai, Hidehiro #define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
353f8af4da3SHugh Dickins 
3543cb4a0bbSKawai, Hidehiro #define MMF_DUMPABLE_BITS 2
355f8af4da3SHugh Dickins #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
3563cb4a0bbSKawai, Hidehiro 
3573cb4a0bbSKawai, Hidehiro /* coredump filter bits */
3583cb4a0bbSKawai, Hidehiro #define MMF_DUMP_ANON_PRIVATE	2
3593cb4a0bbSKawai, Hidehiro #define MMF_DUMP_ANON_SHARED	3
3603cb4a0bbSKawai, Hidehiro #define MMF_DUMP_MAPPED_PRIVATE	4
3613cb4a0bbSKawai, Hidehiro #define MMF_DUMP_MAPPED_SHARED	5
36282df3973SRoland McGrath #define MMF_DUMP_ELF_HEADERS	6
363e575f111SKOSAKI Motohiro #define MMF_DUMP_HUGETLB_PRIVATE 7
364e575f111SKOSAKI Motohiro #define MMF_DUMP_HUGETLB_SHARED  8
365f8af4da3SHugh Dickins 
3663cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
367e575f111SKOSAKI Motohiro #define MMF_DUMP_FILTER_BITS	7
3683cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_MASK \
3693cb4a0bbSKawai, Hidehiro 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
3703cb4a0bbSKawai, Hidehiro #define MMF_DUMP_FILTER_DEFAULT \
371e575f111SKOSAKI Motohiro 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
372656eb2cdSRoland McGrath 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
373656eb2cdSRoland McGrath 
374656eb2cdSRoland McGrath #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
375656eb2cdSRoland McGrath # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
376656eb2cdSRoland McGrath #else
377656eb2cdSRoland McGrath # define MMF_DUMP_MASK_DEFAULT_ELF	0
378656eb2cdSRoland McGrath #endif
379f8af4da3SHugh Dickins 					/* leave room for more dump flags */
380f8af4da3SHugh Dickins #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
381ba76149fSAndrea Arcangeli #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
382bafb282dSKonstantin Khlebnikov #define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
383f8af4da3SHugh Dickins 
3849f68f672SOleg Nesterov #define MMF_HAS_UPROBES		19	/* has uprobes */
3859f68f672SOleg Nesterov #define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
386f8ac4ec9SOleg Nesterov 
387f8af4da3SHugh Dickins #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
3886c5d5238SKawai, Hidehiro 
3891da177e4SLinus Torvalds struct sighand_struct {
3901da177e4SLinus Torvalds 	atomic_t		count;
3911da177e4SLinus Torvalds 	struct k_sigaction	action[_NSIG];
3921da177e4SLinus Torvalds 	spinlock_t		siglock;
393b8fceee1SDavide Libenzi 	wait_queue_head_t	signalfd_wqh;
3941da177e4SLinus Torvalds };
3951da177e4SLinus Torvalds 
3960e464814SKaiGai Kohei struct pacct_struct {
397f6ec29a4SKaiGai Kohei 	int			ac_flag;
398f6ec29a4SKaiGai Kohei 	long			ac_exitcode;
3990e464814SKaiGai Kohei 	unsigned long		ac_mem;
40077787bfbSKaiGai Kohei 	cputime_t		ac_utime, ac_stime;
40177787bfbSKaiGai Kohei 	unsigned long		ac_minflt, ac_majflt;
4020e464814SKaiGai Kohei };
4030e464814SKaiGai Kohei 
40442c4ab41SStanislaw Gruszka struct cpu_itimer {
40542c4ab41SStanislaw Gruszka 	cputime_t expires;
40642c4ab41SStanislaw Gruszka 	cputime_t incr;
4078356b5f9SStanislaw Gruszka 	u32 error;
4088356b5f9SStanislaw Gruszka 	u32 incr_error;
40942c4ab41SStanislaw Gruszka };
41042c4ab41SStanislaw Gruszka 
411f06febc9SFrank Mayhar /**
412d37f761dSFrederic Weisbecker  * struct cputime - snaphsot of system and user cputime
413d37f761dSFrederic Weisbecker  * @utime: time spent in user mode
414d37f761dSFrederic Weisbecker  * @stime: time spent in system mode
415d37f761dSFrederic Weisbecker  *
416d37f761dSFrederic Weisbecker  * Gathers a generic snapshot of user and system time.
417d37f761dSFrederic Weisbecker  */
418d37f761dSFrederic Weisbecker struct cputime {
419d37f761dSFrederic Weisbecker 	cputime_t utime;
420d37f761dSFrederic Weisbecker 	cputime_t stime;
421d37f761dSFrederic Weisbecker };
422d37f761dSFrederic Weisbecker 
423d37f761dSFrederic Weisbecker /**
424f06febc9SFrank Mayhar  * struct task_cputime - collected CPU time counts
425f06febc9SFrank Mayhar  * @utime:		time spent in user mode, in &cputime_t units
426f06febc9SFrank Mayhar  * @stime:		time spent in kernel mode, in &cputime_t units
427f06febc9SFrank Mayhar  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
428f06febc9SFrank Mayhar  *
429d37f761dSFrederic Weisbecker  * This is an extension of struct cputime that includes the total runtime
430d37f761dSFrederic Weisbecker  * spent by the task from the scheduler point of view.
431d37f761dSFrederic Weisbecker  *
432d37f761dSFrederic Weisbecker  * As a result, this structure groups together three kinds of CPU time
433d37f761dSFrederic Weisbecker  * that are tracked for threads and thread groups.  Most things considering
434f06febc9SFrank Mayhar  * CPU time want to group these counts together and treat all three
435f06febc9SFrank Mayhar  * of them in parallel.
436f06febc9SFrank Mayhar  */
437f06febc9SFrank Mayhar struct task_cputime {
438f06febc9SFrank Mayhar 	cputime_t utime;
439f06febc9SFrank Mayhar 	cputime_t stime;
440f06febc9SFrank Mayhar 	unsigned long long sum_exec_runtime;
441f06febc9SFrank Mayhar };
442f06febc9SFrank Mayhar /* Alternate field names when used to cache expirations. */
443f06febc9SFrank Mayhar #define prof_exp	stime
444f06febc9SFrank Mayhar #define virt_exp	utime
445f06febc9SFrank Mayhar #define sched_exp	sum_exec_runtime
446f06febc9SFrank Mayhar 
4474cd4c1b4SPeter Zijlstra #define INIT_CPUTIME	\
4484cd4c1b4SPeter Zijlstra 	(struct task_cputime) {					\
44964861634SMartin Schwidefsky 		.utime = 0,					\
45064861634SMartin Schwidefsky 		.stime = 0,					\
4514cd4c1b4SPeter Zijlstra 		.sum_exec_runtime = 0,				\
4524cd4c1b4SPeter Zijlstra 	}
4534cd4c1b4SPeter Zijlstra 
454c99e6efeSPeter Zijlstra /*
455c99e6efeSPeter Zijlstra  * Disable preemption until the scheduler is running.
456c99e6efeSPeter Zijlstra  * Reset by start_kernel()->sched_init()->init_idle().
457d86ee480SPeter Zijlstra  *
458d86ee480SPeter Zijlstra  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
459d86ee480SPeter Zijlstra  * before the scheduler is active -- see should_resched().
460c99e6efeSPeter Zijlstra  */
461d86ee480SPeter Zijlstra #define INIT_PREEMPT_COUNT	(1 + PREEMPT_ACTIVE)
462c99e6efeSPeter Zijlstra 
463f06febc9SFrank Mayhar /**
4644cd4c1b4SPeter Zijlstra  * struct thread_group_cputimer - thread group interval timer counts
4654cd4c1b4SPeter Zijlstra  * @cputime:		thread group interval timers.
4664cd4c1b4SPeter Zijlstra  * @running:		non-zero when there are timers running and
4674cd4c1b4SPeter Zijlstra  * 			@cputime receives updates.
4684cd4c1b4SPeter Zijlstra  * @lock:		lock for fields in this struct.
469f06febc9SFrank Mayhar  *
470f06febc9SFrank Mayhar  * This structure contains the version of task_cputime, above, that is
4714cd4c1b4SPeter Zijlstra  * used for thread group CPU timer calculations.
472f06febc9SFrank Mayhar  */
4734cd4c1b4SPeter Zijlstra struct thread_group_cputimer {
4744cd4c1b4SPeter Zijlstra 	struct task_cputime cputime;
4754cd4c1b4SPeter Zijlstra 	int running;
476ee30a7b2SThomas Gleixner 	raw_spinlock_t lock;
477f06febc9SFrank Mayhar };
478f06febc9SFrank Mayhar 
4794714d1d3SBen Blum #include <linux/rwsem.h>
4805091faa4SMike Galbraith struct autogroup;
4815091faa4SMike Galbraith 
4821da177e4SLinus Torvalds /*
483e815f0a8SJonathan Neuschäfer  * NOTE! "signal_struct" does not have its own
4841da177e4SLinus Torvalds  * locking, because a shared signal_struct always
4851da177e4SLinus Torvalds  * implies a shared sighand_struct, so locking
4861da177e4SLinus Torvalds  * sighand_struct is always a proper superset of
4871da177e4SLinus Torvalds  * the locking of signal_struct.
4881da177e4SLinus Torvalds  */
4891da177e4SLinus Torvalds struct signal_struct {
490ea6d290cSOleg Nesterov 	atomic_t		sigcnt;
4911da177e4SLinus Torvalds 	atomic_t		live;
492b3ac022cSOleg Nesterov 	int			nr_threads;
4931da177e4SLinus Torvalds 
4941da177e4SLinus Torvalds 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
4951da177e4SLinus Torvalds 
4961da177e4SLinus Torvalds 	/* current thread group signal load-balancing target: */
49736c8b586SIngo Molnar 	struct task_struct	*curr_target;
4981da177e4SLinus Torvalds 
4991da177e4SLinus Torvalds 	/* shared signal handling: */
5001da177e4SLinus Torvalds 	struct sigpending	shared_pending;
5011da177e4SLinus Torvalds 
5021da177e4SLinus Torvalds 	/* thread group exit support */
5031da177e4SLinus Torvalds 	int			group_exit_code;
5041da177e4SLinus Torvalds 	/* overloaded:
5051da177e4SLinus Torvalds 	 * - notify group_exit_task when ->count is equal to notify_count
5061da177e4SLinus Torvalds 	 * - everyone except group_exit_task is stopped during signal delivery
5071da177e4SLinus Torvalds 	 *   of fatal signals, group_exit_task processes the signal.
5081da177e4SLinus Torvalds 	 */
5091da177e4SLinus Torvalds 	int			notify_count;
51007dd20e0SRichard Kennedy 	struct task_struct	*group_exit_task;
5111da177e4SLinus Torvalds 
5121da177e4SLinus Torvalds 	/* thread group stop support, overloads group_exit_code too */
5131da177e4SLinus Torvalds 	int			group_stop_count;
5141da177e4SLinus Torvalds 	unsigned int		flags; /* see SIGNAL_* flags below */
5151da177e4SLinus Torvalds 
516ebec18a6SLennart Poettering 	/*
517ebec18a6SLennart Poettering 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
518ebec18a6SLennart Poettering 	 * manager, to re-parent orphan (double-forking) child processes
519ebec18a6SLennart Poettering 	 * to this process instead of 'init'. The service manager is
520ebec18a6SLennart Poettering 	 * able to receive SIGCHLD signals and is able to investigate
521ebec18a6SLennart Poettering 	 * the process until it calls wait(). All children of this
522ebec18a6SLennart Poettering 	 * process will inherit a flag if they should look for a
523ebec18a6SLennart Poettering 	 * child_subreaper process at exit.
524ebec18a6SLennart Poettering 	 */
525ebec18a6SLennart Poettering 	unsigned int		is_child_subreaper:1;
526ebec18a6SLennart Poettering 	unsigned int		has_child_subreaper:1;
527ebec18a6SLennart Poettering 
5281da177e4SLinus Torvalds 	/* POSIX.1b Interval Timers */
5291da177e4SLinus Torvalds 	struct list_head posix_timers;
5301da177e4SLinus Torvalds 
5311da177e4SLinus Torvalds 	/* ITIMER_REAL timer for the process */
5322ff678b8SThomas Gleixner 	struct hrtimer real_timer;
533fea9d175SOleg Nesterov 	struct pid *leader_pid;
5342ff678b8SThomas Gleixner 	ktime_t it_real_incr;
5351da177e4SLinus Torvalds 
53642c4ab41SStanislaw Gruszka 	/*
53742c4ab41SStanislaw Gruszka 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
53842c4ab41SStanislaw Gruszka 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
53942c4ab41SStanislaw Gruszka 	 * values are defined to 0 and 1 respectively
54042c4ab41SStanislaw Gruszka 	 */
54142c4ab41SStanislaw Gruszka 	struct cpu_itimer it[2];
5421da177e4SLinus Torvalds 
543f06febc9SFrank Mayhar 	/*
5444cd4c1b4SPeter Zijlstra 	 * Thread group totals for process CPU timers.
5454cd4c1b4SPeter Zijlstra 	 * See thread_group_cputimer(), et al, for details.
546f06febc9SFrank Mayhar 	 */
5474cd4c1b4SPeter Zijlstra 	struct thread_group_cputimer cputimer;
548f06febc9SFrank Mayhar 
549f06febc9SFrank Mayhar 	/* Earliest-expiration cache. */
550f06febc9SFrank Mayhar 	struct task_cputime cputime_expires;
551f06febc9SFrank Mayhar 
552f06febc9SFrank Mayhar 	struct list_head cpu_timers[3];
553f06febc9SFrank Mayhar 
554ab521dc0SEric W. Biederman 	struct pid *tty_old_pgrp;
5551ec320afSCedric Le Goater 
5561da177e4SLinus Torvalds 	/* boolean value for session group leader */
5571da177e4SLinus Torvalds 	int leader;
5581da177e4SLinus Torvalds 
5591da177e4SLinus Torvalds 	struct tty_struct *tty; /* NULL if no tty */
5601da177e4SLinus Torvalds 
5615091faa4SMike Galbraith #ifdef CONFIG_SCHED_AUTOGROUP
5625091faa4SMike Galbraith 	struct autogroup *autogroup;
5635091faa4SMike Galbraith #endif
5641da177e4SLinus Torvalds 	/*
5651da177e4SLinus Torvalds 	 * Cumulative resource counters for dead threads in the group,
5661da177e4SLinus Torvalds 	 * and for reaped dead child processes forked by this group.
5671da177e4SLinus Torvalds 	 * Live threads maintain their own counters and add to these
5681da177e4SLinus Torvalds 	 * in __exit_signal, except for the group leader.
5691da177e4SLinus Torvalds 	 */
57032bd671dSPeter Zijlstra 	cputime_t utime, stime, cutime, cstime;
5719ac52315SLaurent Vivier 	cputime_t gtime;
5729ac52315SLaurent Vivier 	cputime_t cgtime;
5730cf55e1eSHidetoshi Seto #ifndef CONFIG_VIRT_CPU_ACCOUNTING
574d37f761dSFrederic Weisbecker 	struct cputime prev_cputime;
5750cf55e1eSHidetoshi Seto #endif
5761da177e4SLinus Torvalds 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
5771da177e4SLinus Torvalds 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
5786eaeeabaSEric Dumazet 	unsigned long inblock, oublock, cinblock, coublock;
5791f10206cSJiri Pirko 	unsigned long maxrss, cmaxrss;
580940389b8SAndrea Righi 	struct task_io_accounting ioac;
5811da177e4SLinus Torvalds 
5821da177e4SLinus Torvalds 	/*
58332bd671dSPeter Zijlstra 	 * Cumulative ns of schedule CPU time fo dead threads in the
58432bd671dSPeter Zijlstra 	 * group, not including a zombie group leader, (This only differs
58532bd671dSPeter Zijlstra 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
58632bd671dSPeter Zijlstra 	 * other than jiffies.)
58732bd671dSPeter Zijlstra 	 */
58832bd671dSPeter Zijlstra 	unsigned long long sum_sched_runtime;
58932bd671dSPeter Zijlstra 
59032bd671dSPeter Zijlstra 	/*
5911da177e4SLinus Torvalds 	 * We don't bother to synchronize most readers of this at all,
5921da177e4SLinus Torvalds 	 * because there is no reader checking a limit that actually needs
5931da177e4SLinus Torvalds 	 * to get both rlim_cur and rlim_max atomically, and either one
5941da177e4SLinus Torvalds 	 * alone is a single word that can safely be read normally.
5951da177e4SLinus Torvalds 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
5961da177e4SLinus Torvalds 	 * protect this instead of the siglock, because they really
5971da177e4SLinus Torvalds 	 * have no need to disable irqs.
5981da177e4SLinus Torvalds 	 */
5991da177e4SLinus Torvalds 	struct rlimit rlim[RLIM_NLIMITS];
6001da177e4SLinus Torvalds 
6010e464814SKaiGai Kohei #ifdef CONFIG_BSD_PROCESS_ACCT
6020e464814SKaiGai Kohei 	struct pacct_struct pacct;	/* per-process accounting information */
6030e464814SKaiGai Kohei #endif
604ad4ecbcbSShailabh Nagar #ifdef CONFIG_TASKSTATS
605ad4ecbcbSShailabh Nagar 	struct taskstats *stats;
606ad4ecbcbSShailabh Nagar #endif
607522ed776SMiloslav Trmac #ifdef CONFIG_AUDIT
608522ed776SMiloslav Trmac 	unsigned audit_tty;
609522ed776SMiloslav Trmac 	struct tty_audit_buf *tty_audit_buf;
610522ed776SMiloslav Trmac #endif
6114714d1d3SBen Blum #ifdef CONFIG_CGROUPS
6124714d1d3SBen Blum 	/*
61377e4ef99STejun Heo 	 * group_rwsem prevents new tasks from entering the threadgroup and
61477e4ef99STejun Heo 	 * member tasks from exiting,a more specifically, setting of
61577e4ef99STejun Heo 	 * PF_EXITING.  fork and exit paths are protected with this rwsem
61677e4ef99STejun Heo 	 * using threadgroup_change_begin/end().  Users which require
61777e4ef99STejun Heo 	 * threadgroup to remain stable should use threadgroup_[un]lock()
61877e4ef99STejun Heo 	 * which also takes care of exec path.  Currently, cgroup is the
61977e4ef99STejun Heo 	 * only user.
6204714d1d3SBen Blum 	 */
621257058aeSTejun Heo 	struct rw_semaphore group_rwsem;
6224714d1d3SBen Blum #endif
62328b83c51SKOSAKI Motohiro 
624e1e12d2fSDavid Rientjes 	oom_flags_t oom_flags;
625a9c58b90SDavid Rientjes 	short oom_score_adj;		/* OOM kill score adjustment */
626a9c58b90SDavid Rientjes 	short oom_score_adj_min;	/* OOM kill score adjustment min value.
627dabb16f6SMandeep Singh Baines 					 * Only settable by CAP_SYS_RESOURCE. */
6289b1bf12dSKOSAKI Motohiro 
6299b1bf12dSKOSAKI Motohiro 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
6309b1bf12dSKOSAKI Motohiro 					 * credential calculations
6319b1bf12dSKOSAKI Motohiro 					 * (notably. ptrace) */
6321da177e4SLinus Torvalds };
6331da177e4SLinus Torvalds 
6341da177e4SLinus Torvalds /*
6351da177e4SLinus Torvalds  * Bits in flags field of signal_struct.
6361da177e4SLinus Torvalds  */
6371da177e4SLinus Torvalds #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
638ee77f075SOleg Nesterov #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
639ee77f075SOleg Nesterov #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
640e4420551SOleg Nesterov /*
641e4420551SOleg Nesterov  * Pending notifications to parent.
642e4420551SOleg Nesterov  */
643e4420551SOleg Nesterov #define SIGNAL_CLD_STOPPED	0x00000010
644e4420551SOleg Nesterov #define SIGNAL_CLD_CONTINUED	0x00000020
645e4420551SOleg Nesterov #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
6461da177e4SLinus Torvalds 
647fae5fa44SOleg Nesterov #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
648fae5fa44SOleg Nesterov 
649ed5d2cacSOleg Nesterov /* If true, all threads except ->group_exit_task have pending SIGKILL */
650ed5d2cacSOleg Nesterov static inline int signal_group_exit(const struct signal_struct *sig)
651ed5d2cacSOleg Nesterov {
652ed5d2cacSOleg Nesterov 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
653ed5d2cacSOleg Nesterov 		(sig->group_exit_task != NULL);
654ed5d2cacSOleg Nesterov }
655ed5d2cacSOleg Nesterov 
6561da177e4SLinus Torvalds /*
6571da177e4SLinus Torvalds  * Some day this will be a full-fledged user tracking system..
6581da177e4SLinus Torvalds  */
6591da177e4SLinus Torvalds struct user_struct {
6601da177e4SLinus Torvalds 	atomic_t __count;	/* reference count */
6611da177e4SLinus Torvalds 	atomic_t processes;	/* How many processes does this user have? */
6621da177e4SLinus Torvalds 	atomic_t files;		/* How many open files does this user have? */
6631da177e4SLinus Torvalds 	atomic_t sigpending;	/* How many pending signals does this user have? */
6642d9048e2SAmy Griffis #ifdef CONFIG_INOTIFY_USER
6650eeca283SRobert Love 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
6660eeca283SRobert Love 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
6670eeca283SRobert Love #endif
6684afeff85SEric Paris #ifdef CONFIG_FANOTIFY
6694afeff85SEric Paris 	atomic_t fanotify_listeners;
6704afeff85SEric Paris #endif
6717ef9964eSDavide Libenzi #ifdef CONFIG_EPOLL
67252bd19f7SRobin Holt 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
6737ef9964eSDavide Libenzi #endif
674970a8645SAlexey Dobriyan #ifdef CONFIG_POSIX_MQUEUE
6751da177e4SLinus Torvalds 	/* protected by mq_lock	*/
6761da177e4SLinus Torvalds 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
677970a8645SAlexey Dobriyan #endif
6781da177e4SLinus Torvalds 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
6791da177e4SLinus Torvalds 
6801da177e4SLinus Torvalds #ifdef CONFIG_KEYS
6811da177e4SLinus Torvalds 	struct key *uid_keyring;	/* UID specific keyring */
6821da177e4SLinus Torvalds 	struct key *session_keyring;	/* UID's default session keyring */
6831da177e4SLinus Torvalds #endif
6841da177e4SLinus Torvalds 
6851da177e4SLinus Torvalds 	/* Hash table maintenance information */
686735de223SPavel Emelyanov 	struct hlist_node uidhash_node;
6877b44ab97SEric W. Biederman 	kuid_t uid;
68824e377a8SSrivatsa Vaddagiri 
689cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
690789f90fcSPeter Zijlstra 	atomic_long_t locked_vm;
691789f90fcSPeter Zijlstra #endif
6921da177e4SLinus Torvalds };
6931da177e4SLinus Torvalds 
694eb41d946SKay Sievers extern int uids_sysfs_init(void);
6955cb350baSDhaval Giani 
6967b44ab97SEric W. Biederman extern struct user_struct *find_user(kuid_t);
6971da177e4SLinus Torvalds 
6981da177e4SLinus Torvalds extern struct user_struct root_user;
6991da177e4SLinus Torvalds #define INIT_USER (&root_user)
7001da177e4SLinus Torvalds 
701b6dff3ecSDavid Howells 
7021da177e4SLinus Torvalds struct backing_dev_info;
7031da177e4SLinus Torvalds struct reclaim_state;
7041da177e4SLinus Torvalds 
70552f17b6cSChandra Seetharaman #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
7061da177e4SLinus Torvalds struct sched_info {
7071da177e4SLinus Torvalds 	/* cumulative counters */
7082d72376bSIngo Molnar 	unsigned long pcount;	      /* # of times run on this cpu */
7099c2c4802SKen Chen 	unsigned long long run_delay; /* time spent waiting on a runqueue */
7101da177e4SLinus Torvalds 
7111da177e4SLinus Torvalds 	/* timestamps */
712172ba844SBalbir Singh 	unsigned long long last_arrival,/* when we last ran on a cpu */
7131da177e4SLinus Torvalds 			   last_queued;	/* when we were last queued to run */
7141da177e4SLinus Torvalds };
71552f17b6cSChandra Seetharaman #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
7161da177e4SLinus Torvalds 
717ca74e92bSShailabh Nagar #ifdef CONFIG_TASK_DELAY_ACCT
718ca74e92bSShailabh Nagar struct task_delay_info {
719ca74e92bSShailabh Nagar 	spinlock_t	lock;
720ca74e92bSShailabh Nagar 	unsigned int	flags;	/* Private per-task flags */
721ca74e92bSShailabh Nagar 
722ca74e92bSShailabh Nagar 	/* For each stat XXX, add following, aligned appropriately
723ca74e92bSShailabh Nagar 	 *
724ca74e92bSShailabh Nagar 	 * struct timespec XXX_start, XXX_end;
725ca74e92bSShailabh Nagar 	 * u64 XXX_delay;
726ca74e92bSShailabh Nagar 	 * u32 XXX_count;
727ca74e92bSShailabh Nagar 	 *
728ca74e92bSShailabh Nagar 	 * Atomicity of updates to XXX_delay, XXX_count protected by
729ca74e92bSShailabh Nagar 	 * single lock above (split into XXX_lock if contention is an issue).
730ca74e92bSShailabh Nagar 	 */
7310ff92245SShailabh Nagar 
7320ff92245SShailabh Nagar 	/*
7330ff92245SShailabh Nagar 	 * XXX_count is incremented on every XXX operation, the delay
7340ff92245SShailabh Nagar 	 * associated with the operation is added to XXX_delay.
7350ff92245SShailabh Nagar 	 * XXX_delay contains the accumulated delay time in nanoseconds.
7360ff92245SShailabh Nagar 	 */
7370ff92245SShailabh Nagar 	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
7380ff92245SShailabh Nagar 	u64 blkio_delay;	/* wait for sync block io completion */
7390ff92245SShailabh Nagar 	u64 swapin_delay;	/* wait for swapin block io completion */
7400ff92245SShailabh Nagar 	u32 blkio_count;	/* total count of the number of sync block */
7410ff92245SShailabh Nagar 				/* io operations performed */
7420ff92245SShailabh Nagar 	u32 swapin_count;	/* total count of the number of swapin block */
7430ff92245SShailabh Nagar 				/* io operations performed */
744873b4771SKeika Kobayashi 
745873b4771SKeika Kobayashi 	struct timespec freepages_start, freepages_end;
746873b4771SKeika Kobayashi 	u64 freepages_delay;	/* wait for memory reclaim */
747873b4771SKeika Kobayashi 	u32 freepages_count;	/* total count of memory reclaim */
748ca74e92bSShailabh Nagar };
74952f17b6cSChandra Seetharaman #endif	/* CONFIG_TASK_DELAY_ACCT */
75052f17b6cSChandra Seetharaman 
75152f17b6cSChandra Seetharaman static inline int sched_info_on(void)
75252f17b6cSChandra Seetharaman {
75352f17b6cSChandra Seetharaman #ifdef CONFIG_SCHEDSTATS
75452f17b6cSChandra Seetharaman 	return 1;
75552f17b6cSChandra Seetharaman #elif defined(CONFIG_TASK_DELAY_ACCT)
75652f17b6cSChandra Seetharaman 	extern int delayacct_on;
75752f17b6cSChandra Seetharaman 	return delayacct_on;
75852f17b6cSChandra Seetharaman #else
75952f17b6cSChandra Seetharaman 	return 0;
760ca74e92bSShailabh Nagar #endif
76152f17b6cSChandra Seetharaman }
762ca74e92bSShailabh Nagar 
763d15bcfdbSIngo Molnar enum cpu_idle_type {
764d15bcfdbSIngo Molnar 	CPU_IDLE,
765d15bcfdbSIngo Molnar 	CPU_NOT_IDLE,
766d15bcfdbSIngo Molnar 	CPU_NEWLY_IDLE,
767d15bcfdbSIngo Molnar 	CPU_MAX_IDLE_TYPES
7681da177e4SLinus Torvalds };
7691da177e4SLinus Torvalds 
7701da177e4SLinus Torvalds /*
771c8b28116SNikhil Rao  * Increase resolution of nice-level calculations for 64-bit architectures.
772c8b28116SNikhil Rao  * The extra resolution improves shares distribution and load balancing of
773c8b28116SNikhil Rao  * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
774c8b28116SNikhil Rao  * hierarchies, especially on larger systems. This is not a user-visible change
775c8b28116SNikhil Rao  * and does not change the user-interface for setting shares/weights.
776c8b28116SNikhil Rao  *
777c8b28116SNikhil Rao  * We increase resolution only if we have enough bits to allow this increased
778c8b28116SNikhil Rao  * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
779c8b28116SNikhil Rao  * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
780c8b28116SNikhil Rao  * increased costs.
7811da177e4SLinus Torvalds  */
782e4c2fb0dSPeter Zijlstra #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load  */
783c8b28116SNikhil Rao # define SCHED_LOAD_RESOLUTION	10
784c8b28116SNikhil Rao # define scale_load(w)		((w) << SCHED_LOAD_RESOLUTION)
785c8b28116SNikhil Rao # define scale_load_down(w)	((w) >> SCHED_LOAD_RESOLUTION)
786c8b28116SNikhil Rao #else
787c8b28116SNikhil Rao # define SCHED_LOAD_RESOLUTION	0
788c8b28116SNikhil Rao # define scale_load(w)		(w)
789c8b28116SNikhil Rao # define scale_load_down(w)	(w)
790c8b28116SNikhil Rao #endif
7919aa7b369SIngo Molnar 
792c8b28116SNikhil Rao #define SCHED_LOAD_SHIFT	(10 + SCHED_LOAD_RESOLUTION)
7939aa7b369SIngo Molnar #define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
7949aa7b369SIngo Molnar 
7951399fa78SNikhil Rao /*
7961399fa78SNikhil Rao  * Increase resolution of cpu_power calculations
7971399fa78SNikhil Rao  */
7981399fa78SNikhil Rao #define SCHED_POWER_SHIFT	10
7991399fa78SNikhil Rao #define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT)
8001da177e4SLinus Torvalds 
8011399fa78SNikhil Rao /*
8021399fa78SNikhil Rao  * sched-domains (multiprocessor balancing) declarations:
8031399fa78SNikhil Rao  */
8042dd73a4fSPeter Williams #ifdef CONFIG_SMP
805b5d978e0SPeter Zijlstra #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
806b5d978e0SPeter Zijlstra #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
807b5d978e0SPeter Zijlstra #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
808b5d978e0SPeter Zijlstra #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
809c88d5910SPeter Zijlstra #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
810b5d978e0SPeter Zijlstra #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
811b5d978e0SPeter Zijlstra #define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
812b5d978e0SPeter Zijlstra #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
813b5d978e0SPeter Zijlstra #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
814532cb4c4SMichael Neuling #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
815b5d978e0SPeter Zijlstra #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
816e3589f6cSPeter Zijlstra #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
8175c45bf27SSiddha, Suresh B 
818532cb4c4SMichael Neuling extern int __weak arch_sd_sibiling_asym_packing(void);
819532cb4c4SMichael Neuling 
8209c3f75cbSPeter Zijlstra struct sched_group_power {
821e3589f6cSPeter Zijlstra 	atomic_t ref;
8221da177e4SLinus Torvalds 	/*
8231da177e4SLinus Torvalds 	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
82418a3885fSPeter Zijlstra 	 * single CPU.
8251da177e4SLinus Torvalds 	 */
8269c3f75cbSPeter Zijlstra 	unsigned int power, power_orig;
8274ec4412eSVincent Guittot 	unsigned long next_update;
82869e1e811SSuresh Siddha 	/*
82969e1e811SSuresh Siddha 	 * Number of busy cpus in this group.
83069e1e811SSuresh Siddha 	 */
83169e1e811SSuresh Siddha 	atomic_t nr_busy_cpus;
832c1174876SPeter Zijlstra 
833c1174876SPeter Zijlstra 	unsigned long cpumask[0]; /* iteration mask */
8349c3f75cbSPeter Zijlstra };
8359c3f75cbSPeter Zijlstra 
8369c3f75cbSPeter Zijlstra struct sched_group {
8379c3f75cbSPeter Zijlstra 	struct sched_group *next;	/* Must be a circular list */
8389c3f75cbSPeter Zijlstra 	atomic_t ref;
8399c3f75cbSPeter Zijlstra 
840aae6d3ddSSuresh Siddha 	unsigned int group_weight;
8419c3f75cbSPeter Zijlstra 	struct sched_group_power *sgp;
8426c99e9adSRusty Russell 
8434200efd9SIngo Molnar 	/*
8444200efd9SIngo Molnar 	 * The CPUs this group covers.
8454200efd9SIngo Molnar 	 *
8464200efd9SIngo Molnar 	 * NOTE: this field is variable length. (Allocated dynamically
8474200efd9SIngo Molnar 	 * by attaching extra space to the end of the structure,
8484200efd9SIngo Molnar 	 * depending on how many CPUs the kernel has booted up with)
8494200efd9SIngo Molnar 	 */
8504200efd9SIngo Molnar 	unsigned long cpumask[0];
8511da177e4SLinus Torvalds };
8521da177e4SLinus Torvalds 
853758b2cdcSRusty Russell static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
854758b2cdcSRusty Russell {
8556c99e9adSRusty Russell 	return to_cpumask(sg->cpumask);
856758b2cdcSRusty Russell }
857758b2cdcSRusty Russell 
858c1174876SPeter Zijlstra /*
859c1174876SPeter Zijlstra  * cpumask masking which cpus in the group are allowed to iterate up the domain
860c1174876SPeter Zijlstra  * tree.
861c1174876SPeter Zijlstra  */
862c1174876SPeter Zijlstra static inline struct cpumask *sched_group_mask(struct sched_group *sg)
863c1174876SPeter Zijlstra {
864c1174876SPeter Zijlstra 	return to_cpumask(sg->sgp->cpumask);
865c1174876SPeter Zijlstra }
866c1174876SPeter Zijlstra 
867029632fbSPeter Zijlstra /**
868029632fbSPeter Zijlstra  * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
869029632fbSPeter Zijlstra  * @group: The group whose first cpu is to be returned.
870029632fbSPeter Zijlstra  */
871029632fbSPeter Zijlstra static inline unsigned int group_first_cpu(struct sched_group *group)
872029632fbSPeter Zijlstra {
873029632fbSPeter Zijlstra 	return cpumask_first(sched_group_cpus(group));
874029632fbSPeter Zijlstra }
875029632fbSPeter Zijlstra 
8761d3504fcSHidetoshi Seto struct sched_domain_attr {
8771d3504fcSHidetoshi Seto 	int relax_domain_level;
8781d3504fcSHidetoshi Seto };
8791d3504fcSHidetoshi Seto 
8801d3504fcSHidetoshi Seto #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
8811d3504fcSHidetoshi Seto 	.relax_domain_level = -1,			\
8821d3504fcSHidetoshi Seto }
8831d3504fcSHidetoshi Seto 
88460495e77SPeter Zijlstra extern int sched_domain_level_max;
88560495e77SPeter Zijlstra 
8861da177e4SLinus Torvalds struct sched_domain {
8871da177e4SLinus Torvalds 	/* These fields must be setup */
8881da177e4SLinus Torvalds 	struct sched_domain *parent;	/* top domain must be null terminated */
8891a848870SSiddha, Suresh B 	struct sched_domain *child;	/* bottom domain must be null terminated */
8901da177e4SLinus Torvalds 	struct sched_group *groups;	/* the balancing groups of the domain */
8911da177e4SLinus Torvalds 	unsigned long min_interval;	/* Minimum balance interval ms */
8921da177e4SLinus Torvalds 	unsigned long max_interval;	/* Maximum balance interval ms */
8931da177e4SLinus Torvalds 	unsigned int busy_factor;	/* less balancing by factor if busy */
8941da177e4SLinus Torvalds 	unsigned int imbalance_pct;	/* No balance until over watermark */
8951da177e4SLinus Torvalds 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
8967897986bSNick Piggin 	unsigned int busy_idx;
8977897986bSNick Piggin 	unsigned int idle_idx;
8987897986bSNick Piggin 	unsigned int newidle_idx;
8997897986bSNick Piggin 	unsigned int wake_idx;
900147cbb4bSNick Piggin 	unsigned int forkexec_idx;
901a52bfd73SPeter Zijlstra 	unsigned int smt_gain;
9021da177e4SLinus Torvalds 	int flags;			/* See SD_* */
90360495e77SPeter Zijlstra 	int level;
9041da177e4SLinus Torvalds 
9051da177e4SLinus Torvalds 	/* Runtime fields. */
9061da177e4SLinus Torvalds 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
9071da177e4SLinus Torvalds 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
9081da177e4SLinus Torvalds 	unsigned int nr_balance_failed; /* initialise to 0 */
9091da177e4SLinus Torvalds 
9102398f2c6SPeter Zijlstra 	u64 last_update;
9112398f2c6SPeter Zijlstra 
9121da177e4SLinus Torvalds #ifdef CONFIG_SCHEDSTATS
9131da177e4SLinus Torvalds 	/* load_balance() stats */
914480b9434SKen Chen 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
915480b9434SKen Chen 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
916480b9434SKen Chen 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
917480b9434SKen Chen 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
918480b9434SKen Chen 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
919480b9434SKen Chen 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
920480b9434SKen Chen 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
921480b9434SKen Chen 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
9221da177e4SLinus Torvalds 
9231da177e4SLinus Torvalds 	/* Active load balancing */
924480b9434SKen Chen 	unsigned int alb_count;
925480b9434SKen Chen 	unsigned int alb_failed;
926480b9434SKen Chen 	unsigned int alb_pushed;
9271da177e4SLinus Torvalds 
92868767a0aSNick Piggin 	/* SD_BALANCE_EXEC stats */
929480b9434SKen Chen 	unsigned int sbe_count;
930480b9434SKen Chen 	unsigned int sbe_balanced;
931480b9434SKen Chen 	unsigned int sbe_pushed;
9321da177e4SLinus Torvalds 
93368767a0aSNick Piggin 	/* SD_BALANCE_FORK stats */
934480b9434SKen Chen 	unsigned int sbf_count;
935480b9434SKen Chen 	unsigned int sbf_balanced;
936480b9434SKen Chen 	unsigned int sbf_pushed;
93768767a0aSNick Piggin 
9381da177e4SLinus Torvalds 	/* try_to_wake_up() stats */
939480b9434SKen Chen 	unsigned int ttwu_wake_remote;
940480b9434SKen Chen 	unsigned int ttwu_move_affine;
941480b9434SKen Chen 	unsigned int ttwu_move_balance;
9421da177e4SLinus Torvalds #endif
943a5d8c348SIngo Molnar #ifdef CONFIG_SCHED_DEBUG
944a5d8c348SIngo Molnar 	char *name;
945a5d8c348SIngo Molnar #endif
946dce840a0SPeter Zijlstra 	union {
947dce840a0SPeter Zijlstra 		void *private;		/* used during construction */
948dce840a0SPeter Zijlstra 		struct rcu_head rcu;	/* used during destruction */
949dce840a0SPeter Zijlstra 	};
9506c99e9adSRusty Russell 
951669c55e9SPeter Zijlstra 	unsigned int span_weight;
9524200efd9SIngo Molnar 	/*
9534200efd9SIngo Molnar 	 * Span of all CPUs in this domain.
9544200efd9SIngo Molnar 	 *
9554200efd9SIngo Molnar 	 * NOTE: this field is variable length. (Allocated dynamically
9564200efd9SIngo Molnar 	 * by attaching extra space to the end of the structure,
9574200efd9SIngo Molnar 	 * depending on how many CPUs the kernel has booted up with)
9584200efd9SIngo Molnar 	 */
9594200efd9SIngo Molnar 	unsigned long span[0];
9601da177e4SLinus Torvalds };
9611da177e4SLinus Torvalds 
962758b2cdcSRusty Russell static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
963758b2cdcSRusty Russell {
9646c99e9adSRusty Russell 	return to_cpumask(sd->span);
965758b2cdcSRusty Russell }
966758b2cdcSRusty Russell 
967acc3f5d7SRusty Russell extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
9681d3504fcSHidetoshi Seto 				    struct sched_domain_attr *dattr_new);
969029190c5SPaul Jackson 
970acc3f5d7SRusty Russell /* Allocate an array of sched domains, for partition_sched_domains(). */
971acc3f5d7SRusty Russell cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
972acc3f5d7SRusty Russell void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
973acc3f5d7SRusty Russell 
97406aaf76aSIngo Molnar /* Test a flag in parent sched domain */
97506aaf76aSIngo Molnar static inline int test_sd_parent(struct sched_domain *sd, int flag)
97606aaf76aSIngo Molnar {
97706aaf76aSIngo Molnar 	if (sd->parent && (sd->parent->flags & flag))
97806aaf76aSIngo Molnar 		return 1;
97906aaf76aSIngo Molnar 
98006aaf76aSIngo Molnar 	return 0;
98106aaf76aSIngo Molnar }
9821da177e4SLinus Torvalds 
98347fe38fcSPeter Zijlstra unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
98447fe38fcSPeter Zijlstra unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
98547fe38fcSPeter Zijlstra 
98639be3501SPeter Zijlstra bool cpus_share_cache(int this_cpu, int that_cpu);
98739be3501SPeter Zijlstra 
9881b427c15SIngo Molnar #else /* CONFIG_SMP */
9891da177e4SLinus Torvalds 
9901b427c15SIngo Molnar struct sched_domain_attr;
9911b427c15SIngo Molnar 
9921b427c15SIngo Molnar static inline void
993acc3f5d7SRusty Russell partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
9941b427c15SIngo Molnar 			struct sched_domain_attr *dattr_new)
995d02c7a8cSCon Kolivas {
996d02c7a8cSCon Kolivas }
99739be3501SPeter Zijlstra 
99839be3501SPeter Zijlstra static inline bool cpus_share_cache(int this_cpu, int that_cpu)
99939be3501SPeter Zijlstra {
100039be3501SPeter Zijlstra 	return true;
100139be3501SPeter Zijlstra }
100239be3501SPeter Zijlstra 
10031b427c15SIngo Molnar #endif	/* !CONFIG_SMP */
10041da177e4SLinus Torvalds 
100547fe38fcSPeter Zijlstra 
10061da177e4SLinus Torvalds struct io_context;			/* See blkdev.h */
10071da177e4SLinus Torvalds 
10081da177e4SLinus Torvalds 
1009383f2835SChen, Kenneth W #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
101036c8b586SIngo Molnar extern void prefetch_stack(struct task_struct *t);
1011383f2835SChen, Kenneth W #else
1012383f2835SChen, Kenneth W static inline void prefetch_stack(struct task_struct *t) { }
1013383f2835SChen, Kenneth W #endif
10141da177e4SLinus Torvalds 
10151da177e4SLinus Torvalds struct audit_context;		/* See audit.c */
10161da177e4SLinus Torvalds struct mempolicy;
1017b92ce558SJens Axboe struct pipe_inode_info;
10184865ecf1SSerge E. Hallyn struct uts_namespace;
10191da177e4SLinus Torvalds 
102020b8a59fSIngo Molnar struct rq;
102120b8a59fSIngo Molnar struct sched_domain;
102220b8a59fSIngo Molnar 
10237d478721SPeter Zijlstra /*
10247d478721SPeter Zijlstra  * wake flags
10257d478721SPeter Zijlstra  */
10267d478721SPeter Zijlstra #define WF_SYNC		0x01		/* waker goes to sleep after wakup */
1027a7558e01SPeter Zijlstra #define WF_FORK		0x02		/* child wakeup after fork */
1028f339b9dcSPeter Zijlstra #define WF_MIGRATED	0x04		/* internal use, task got migrated */
10297d478721SPeter Zijlstra 
1030371fd7e7SPeter Zijlstra #define ENQUEUE_WAKEUP		1
103174f8e4b2SPeter Zijlstra #define ENQUEUE_HEAD		2
103274f8e4b2SPeter Zijlstra #ifdef CONFIG_SMP
103374f8e4b2SPeter Zijlstra #define ENQUEUE_WAKING		4	/* sched_class::task_waking was called */
103474f8e4b2SPeter Zijlstra #else
103574f8e4b2SPeter Zijlstra #define ENQUEUE_WAKING		0
103674f8e4b2SPeter Zijlstra #endif
1037371fd7e7SPeter Zijlstra 
1038371fd7e7SPeter Zijlstra #define DEQUEUE_SLEEP		1
1039371fd7e7SPeter Zijlstra 
104020b8a59fSIngo Molnar struct sched_class {
10415522d5d5SIngo Molnar 	const struct sched_class *next;
104220b8a59fSIngo Molnar 
1043371fd7e7SPeter Zijlstra 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1044371fd7e7SPeter Zijlstra 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
10454530d7abSDmitry Adamushko 	void (*yield_task) (struct rq *rq);
1046d95f4122SMike Galbraith 	bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
104720b8a59fSIngo Molnar 
10487d478721SPeter Zijlstra 	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
104920b8a59fSIngo Molnar 
1050fb8d4724SIngo Molnar 	struct task_struct * (*pick_next_task) (struct rq *rq);
105131ee529cSIngo Molnar 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
105220b8a59fSIngo Molnar 
1053681f3e68SPeter Williams #ifdef CONFIG_SMP
10547608dec2SPeter Zijlstra 	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
10550a74bef8SPaul Turner 	void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
10564ce72a2cSLi Zefan 
10579a897c5aSSteven Rostedt 	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
10589a897c5aSSteven Rostedt 	void (*post_schedule) (struct rq *this_rq);
105974f8e4b2SPeter Zijlstra 	void (*task_waking) (struct task_struct *task);
1060efbbd05aSPeter Zijlstra 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1061e1d1484fSPeter Williams 
1062cd8ba7cdSMike Travis 	void (*set_cpus_allowed)(struct task_struct *p,
106396f874e2SRusty Russell 				 const struct cpumask *newmask);
106457d885feSGregory Haskins 
10651f11eb6aSGregory Haskins 	void (*rq_online)(struct rq *rq);
10661f11eb6aSGregory Haskins 	void (*rq_offline)(struct rq *rq);
10674ce72a2cSLi Zefan #endif
10684ce72a2cSLi Zefan 
10694ce72a2cSLi Zefan 	void (*set_curr_task) (struct rq *rq);
10704ce72a2cSLi Zefan 	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1071cd29fe6fSPeter Zijlstra 	void (*task_fork) (struct task_struct *p);
1072cb469845SSteven Rostedt 
1073da7a735eSPeter Zijlstra 	void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1074da7a735eSPeter Zijlstra 	void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1075cb469845SSteven Rostedt 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1076da7a735eSPeter Zijlstra 			     int oldprio);
1077810b3817SPeter Zijlstra 
1078dba091b9SThomas Gleixner 	unsigned int (*get_rr_interval) (struct rq *rq,
1079dba091b9SThomas Gleixner 					 struct task_struct *task);
10800d721ceaSPeter Williams 
1081810b3817SPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
1082b2b5ce02SPeter Zijlstra 	void (*task_move_group) (struct task_struct *p, int on_rq);
1083810b3817SPeter Zijlstra #endif
108420b8a59fSIngo Molnar };
108520b8a59fSIngo Molnar 
108620b8a59fSIngo Molnar struct load_weight {
108720b8a59fSIngo Molnar 	unsigned long weight, inv_weight;
108820b8a59fSIngo Molnar };
108920b8a59fSIngo Molnar 
10909d85f21cSPaul Turner struct sched_avg {
10919d85f21cSPaul Turner 	/*
10929d85f21cSPaul Turner 	 * These sums represent an infinite geometric series and so are bound
10939d85f21cSPaul Turner 	 * above by 1024/(1-y).  Thus we only need a u32 to store them for for all
10949d85f21cSPaul Turner 	 * choices of y < 1-2^(-32)*1024.
10959d85f21cSPaul Turner 	 */
10969d85f21cSPaul Turner 	u32 runnable_avg_sum, runnable_avg_period;
10979d85f21cSPaul Turner 	u64 last_runnable_update;
10989ee474f5SPaul Turner 	s64 decay_count;
10992dac754eSPaul Turner 	unsigned long load_avg_contrib;
11009d85f21cSPaul Turner };
11019d85f21cSPaul Turner 
110294c18227SIngo Molnar #ifdef CONFIG_SCHEDSTATS
110341acab88SLucas De Marchi struct sched_statistics {
110494c18227SIngo Molnar 	u64			wait_start;
110594c18227SIngo Molnar 	u64			wait_max;
11066d082592SArjan van de Ven 	u64			wait_count;
11076d082592SArjan van de Ven 	u64			wait_sum;
11088f0dfc34SArjan van de Ven 	u64			iowait_count;
11098f0dfc34SArjan van de Ven 	u64			iowait_sum;
111094c18227SIngo Molnar 
111194c18227SIngo Molnar 	u64			sleep_start;
111220b8a59fSIngo Molnar 	u64			sleep_max;
111394c18227SIngo Molnar 	s64			sum_sleep_runtime;
111494c18227SIngo Molnar 
111594c18227SIngo Molnar 	u64			block_start;
111620b8a59fSIngo Molnar 	u64			block_max;
111720b8a59fSIngo Molnar 	u64			exec_max;
1118eba1ed4bSIngo Molnar 	u64			slice_max;
1119cc367732SIngo Molnar 
1120cc367732SIngo Molnar 	u64			nr_migrations_cold;
1121cc367732SIngo Molnar 	u64			nr_failed_migrations_affine;
1122cc367732SIngo Molnar 	u64			nr_failed_migrations_running;
1123cc367732SIngo Molnar 	u64			nr_failed_migrations_hot;
1124cc367732SIngo Molnar 	u64			nr_forced_migrations;
1125cc367732SIngo Molnar 
1126cc367732SIngo Molnar 	u64			nr_wakeups;
1127cc367732SIngo Molnar 	u64			nr_wakeups_sync;
1128cc367732SIngo Molnar 	u64			nr_wakeups_migrate;
1129cc367732SIngo Molnar 	u64			nr_wakeups_local;
1130cc367732SIngo Molnar 	u64			nr_wakeups_remote;
1131cc367732SIngo Molnar 	u64			nr_wakeups_affine;
1132cc367732SIngo Molnar 	u64			nr_wakeups_affine_attempts;
1133cc367732SIngo Molnar 	u64			nr_wakeups_passive;
1134cc367732SIngo Molnar 	u64			nr_wakeups_idle;
113541acab88SLucas De Marchi };
113641acab88SLucas De Marchi #endif
113741acab88SLucas De Marchi 
113841acab88SLucas De Marchi struct sched_entity {
113941acab88SLucas De Marchi 	struct load_weight	load;		/* for load-balancing */
114041acab88SLucas De Marchi 	struct rb_node		run_node;
114141acab88SLucas De Marchi 	struct list_head	group_node;
114241acab88SLucas De Marchi 	unsigned int		on_rq;
114341acab88SLucas De Marchi 
114441acab88SLucas De Marchi 	u64			exec_start;
114541acab88SLucas De Marchi 	u64			sum_exec_runtime;
114641acab88SLucas De Marchi 	u64			vruntime;
114741acab88SLucas De Marchi 	u64			prev_sum_exec_runtime;
114841acab88SLucas De Marchi 
114941acab88SLucas De Marchi 	u64			nr_migrations;
115041acab88SLucas De Marchi 
115141acab88SLucas De Marchi #ifdef CONFIG_SCHEDSTATS
115241acab88SLucas De Marchi 	struct sched_statistics statistics;
115394c18227SIngo Molnar #endif
115494c18227SIngo Molnar 
115520b8a59fSIngo Molnar #ifdef CONFIG_FAIR_GROUP_SCHED
115620b8a59fSIngo Molnar 	struct sched_entity	*parent;
115720b8a59fSIngo Molnar 	/* rq on which this entity is (to be) queued: */
115820b8a59fSIngo Molnar 	struct cfs_rq		*cfs_rq;
115920b8a59fSIngo Molnar 	/* rq "owned" by this entity/group: */
116020b8a59fSIngo Molnar 	struct cfs_rq		*my_q;
116120b8a59fSIngo Molnar #endif
11628bd75c77SClark Williams 
1163f4e26b12SPaul Turner /*
1164f4e26b12SPaul Turner  * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
1165f4e26b12SPaul Turner  * removed when useful for applications beyond shares distribution (e.g.
1166f4e26b12SPaul Turner  * load-balance).
1167f4e26b12SPaul Turner  */
1168f4e26b12SPaul Turner #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
1169f4e26b12SPaul Turner 	/* Per-entity load-tracking */
11709d85f21cSPaul Turner 	struct sched_avg	avg;
11719d85f21cSPaul Turner #endif
117220b8a59fSIngo Molnar };
117370b97a7fSIngo Molnar 
1174fa717060SPeter Zijlstra struct sched_rt_entity {
1175fa717060SPeter Zijlstra 	struct list_head run_list;
117678f2c7dbSPeter Zijlstra 	unsigned long timeout;
117757d2aa00SYing Xue 	unsigned long watchdog_stamp;
1178bee367edSRichard Kennedy 	unsigned int time_slice;
11796f505b16SPeter Zijlstra 
118058d6c2d7SPeter Zijlstra 	struct sched_rt_entity *back;
1181052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
11826f505b16SPeter Zijlstra 	struct sched_rt_entity	*parent;
11836f505b16SPeter Zijlstra 	/* rq on which this entity is (to be) queued: */
11846f505b16SPeter Zijlstra 	struct rt_rq		*rt_rq;
11856f505b16SPeter Zijlstra 	/* rq "owned" by this entity/group: */
11866f505b16SPeter Zijlstra 	struct rt_rq		*my_q;
11876f505b16SPeter Zijlstra #endif
1188fa717060SPeter Zijlstra };
1189fa717060SPeter Zijlstra 
11908bd75c77SClark Williams 
119186848966SPaul E. McKenney struct rcu_node;
119286848966SPaul E. McKenney 
11938dc85d54SPeter Zijlstra enum perf_event_task_context {
11948dc85d54SPeter Zijlstra 	perf_invalid_context = -1,
11958dc85d54SPeter Zijlstra 	perf_hw_context = 0,
119689a1e187SPeter Zijlstra 	perf_sw_context,
11978dc85d54SPeter Zijlstra 	perf_nr_task_contexts,
11988dc85d54SPeter Zijlstra };
11998dc85d54SPeter Zijlstra 
12001da177e4SLinus Torvalds struct task_struct {
12011da177e4SLinus Torvalds 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1202f7e4217bSRoman Zippel 	void *stack;
12031da177e4SLinus Torvalds 	atomic_t usage;
120497dc32cdSWilliam Cohen 	unsigned int flags;	/* per process flags, defined below */
120597dc32cdSWilliam Cohen 	unsigned int ptrace;
12061da177e4SLinus Torvalds 
12072dd73a4fSPeter Williams #ifdef CONFIG_SMP
1208fa14ff4aSPeter Zijlstra 	struct llist_node wake_entry;
12093ca7a440SPeter Zijlstra 	int on_cpu;
12104866cde0SNick Piggin #endif
1211fd2f4419SPeter Zijlstra 	int on_rq;
121250e645a8SIngo Molnar 
1213b29739f9SIngo Molnar 	int prio, static_prio, normal_prio;
1214c7aceabaSRichard Kennedy 	unsigned int rt_priority;
12155522d5d5SIngo Molnar 	const struct sched_class *sched_class;
121620b8a59fSIngo Molnar 	struct sched_entity se;
1217fa717060SPeter Zijlstra 	struct sched_rt_entity rt;
12188323f26cSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
12198323f26cSPeter Zijlstra 	struct task_group *sched_task_group;
12208323f26cSPeter Zijlstra #endif
12211da177e4SLinus Torvalds 
1222e107be36SAvi Kivity #ifdef CONFIG_PREEMPT_NOTIFIERS
1223e107be36SAvi Kivity 	/* list of struct preempt_notifier: */
1224e107be36SAvi Kivity 	struct hlist_head preempt_notifiers;
1225e107be36SAvi Kivity #endif
1226e107be36SAvi Kivity 
122718796aa0SAlexey Dobriyan 	/*
122818796aa0SAlexey Dobriyan 	 * fpu_counter contains the number of consecutive context switches
122918796aa0SAlexey Dobriyan 	 * that the FPU is used. If this is over a threshold, the lazy fpu
123018796aa0SAlexey Dobriyan 	 * saving becomes unlazy to save the trap. This is an unsigned char
123118796aa0SAlexey Dobriyan 	 * so that after 256 times the counter wraps and the behavior turns
123218796aa0SAlexey Dobriyan 	 * lazy again; this to deal with bursty apps that only use FPU for
123318796aa0SAlexey Dobriyan 	 * a short time
123418796aa0SAlexey Dobriyan 	 */
123518796aa0SAlexey Dobriyan 	unsigned char fpu_counter;
12366c5c9341SAlexey Dobriyan #ifdef CONFIG_BLK_DEV_IO_TRACE
12372056a782SJens Axboe 	unsigned int btrace_seq;
12386c5c9341SAlexey Dobriyan #endif
12391da177e4SLinus Torvalds 
124097dc32cdSWilliam Cohen 	unsigned int policy;
124129baa747SPeter Zijlstra 	int nr_cpus_allowed;
12421da177e4SLinus Torvalds 	cpumask_t cpus_allowed;
12431da177e4SLinus Torvalds 
1244a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
1245e260be67SPaul E. McKenney 	int rcu_read_lock_nesting;
1246f41d911fSPaul E. McKenney 	char rcu_read_unlock_special;
1247f41d911fSPaul E. McKenney 	struct list_head rcu_node_entry;
1248a57eb940SPaul E. McKenney #endif /* #ifdef CONFIG_PREEMPT_RCU */
1249a57eb940SPaul E. McKenney #ifdef CONFIG_TREE_PREEMPT_RCU
1250a57eb940SPaul E. McKenney 	struct rcu_node *rcu_blocked_node;
1251f41d911fSPaul E. McKenney #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
125224278d14SPaul E. McKenney #ifdef CONFIG_RCU_BOOST
125324278d14SPaul E. McKenney 	struct rt_mutex *rcu_boost_mutex;
125424278d14SPaul E. McKenney #endif /* #ifdef CONFIG_RCU_BOOST */
1255e260be67SPaul E. McKenney 
125652f17b6cSChandra Seetharaman #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
12571da177e4SLinus Torvalds 	struct sched_info sched_info;
12581da177e4SLinus Torvalds #endif
12591da177e4SLinus Torvalds 
12601da177e4SLinus Torvalds 	struct list_head tasks;
1261806c09a7SDario Faggioli #ifdef CONFIG_SMP
1262917b627dSGregory Haskins 	struct plist_node pushable_tasks;
1263806c09a7SDario Faggioli #endif
12641da177e4SLinus Torvalds 
12651da177e4SLinus Torvalds 	struct mm_struct *mm, *active_mm;
12664471a675SJiri Kosina #ifdef CONFIG_COMPAT_BRK
12674471a675SJiri Kosina 	unsigned brk_randomized:1;
12684471a675SJiri Kosina #endif
126934e55232SKAMEZAWA Hiroyuki #if defined(SPLIT_RSS_COUNTING)
127034e55232SKAMEZAWA Hiroyuki 	struct task_rss_stat	rss_stat;
127134e55232SKAMEZAWA Hiroyuki #endif
12721da177e4SLinus Torvalds /* task state */
127397dc32cdSWilliam Cohen 	int exit_state;
12741da177e4SLinus Torvalds 	int exit_code, exit_signal;
12751da177e4SLinus Torvalds 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1276a8f072c1STejun Heo 	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
12771da177e4SLinus Torvalds 	/* ??? */
127897dc32cdSWilliam Cohen 	unsigned int personality;
12791da177e4SLinus Torvalds 	unsigned did_exec:1;
1280f9ce1f1cSKentaro Takeda 	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1281f9ce1f1cSKentaro Takeda 				 * execve */
12828f0dfc34SArjan van de Ven 	unsigned in_iowait:1;
12838f0dfc34SArjan van de Ven 
1284259e5e6cSAndy Lutomirski 	/* task may not gain privileges */
1285259e5e6cSAndy Lutomirski 	unsigned no_new_privs:1;
1286ca94c442SLennart Poettering 
1287ca94c442SLennart Poettering 	/* Revert to default priority/policy when forking */
1288ca94c442SLennart Poettering 	unsigned sched_reset_on_fork:1;
1289a8e4f2eaSPeter Zijlstra 	unsigned sched_contributes_to_load:1;
1290ca94c442SLennart Poettering 
12911da177e4SLinus Torvalds 	pid_t pid;
12921da177e4SLinus Torvalds 	pid_t tgid;
12930a425405SArjan van de Ven 
12941314562aSHiroshi Shimamoto #ifdef CONFIG_CC_STACKPROTECTOR
12950a425405SArjan van de Ven 	/* Canary value for the -fstack-protector gcc feature */
12960a425405SArjan van de Ven 	unsigned long stack_canary;
12971314562aSHiroshi Shimamoto #endif
12981da177e4SLinus Torvalds 	/*
12991da177e4SLinus Torvalds 	 * pointers to (original) parent process, youngest child, younger sibling,
13001da177e4SLinus Torvalds 	 * older sibling, respectively.  (p->father can be replaced with
1301f470021aSRoland McGrath 	 * p->real_parent->pid)
13021da177e4SLinus Torvalds 	 */
1303abd63bc3SKees Cook 	struct task_struct __rcu *real_parent; /* real parent process */
1304abd63bc3SKees Cook 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
13051da177e4SLinus Torvalds 	/*
1306f470021aSRoland McGrath 	 * children/sibling forms the list of my natural children
13071da177e4SLinus Torvalds 	 */
13081da177e4SLinus Torvalds 	struct list_head children;	/* list of my children */
13091da177e4SLinus Torvalds 	struct list_head sibling;	/* linkage in my parent's children list */
13101da177e4SLinus Torvalds 	struct task_struct *group_leader;	/* threadgroup leader */
13111da177e4SLinus Torvalds 
1312f470021aSRoland McGrath 	/*
1313f470021aSRoland McGrath 	 * ptraced is the list of tasks this task is using ptrace on.
1314f470021aSRoland McGrath 	 * This includes both natural children and PTRACE_ATTACH targets.
1315f470021aSRoland McGrath 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1316f470021aSRoland McGrath 	 */
1317f470021aSRoland McGrath 	struct list_head ptraced;
1318f470021aSRoland McGrath 	struct list_head ptrace_entry;
1319f470021aSRoland McGrath 
13201da177e4SLinus Torvalds 	/* PID/PID hash table linkage. */
132192476d7fSEric W. Biederman 	struct pid_link pids[PIDTYPE_MAX];
132247e65328SOleg Nesterov 	struct list_head thread_group;
13231da177e4SLinus Torvalds 
13241da177e4SLinus Torvalds 	struct completion *vfork_done;		/* for vfork() */
13251da177e4SLinus Torvalds 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
13261da177e4SLinus Torvalds 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
13271da177e4SLinus Torvalds 
1328c66f08beSMichael Neuling 	cputime_t utime, stime, utimescaled, stimescaled;
13299ac52315SLaurent Vivier 	cputime_t gtime;
1330d99ca3b9SHidetoshi Seto #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1331d37f761dSFrederic Weisbecker 	struct cputime prev_cputime;
1332d99ca3b9SHidetoshi Seto #endif
13336a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
13346a61671bSFrederic Weisbecker 	seqlock_t vtime_seqlock;
13356a61671bSFrederic Weisbecker 	unsigned long long vtime_snap;
13366a61671bSFrederic Weisbecker 	enum {
13376a61671bSFrederic Weisbecker 		VTIME_SLEEPING = 0,
13386a61671bSFrederic Weisbecker 		VTIME_USER,
13396a61671bSFrederic Weisbecker 		VTIME_SYS,
13406a61671bSFrederic Weisbecker 	} vtime_snap_whence;
13416a61671bSFrederic Weisbecker #endif
13421da177e4SLinus Torvalds 	unsigned long nvcsw, nivcsw; /* context switch counts */
1343924b42d5STomas Janousek 	struct timespec start_time; 		/* monotonic time */
1344924b42d5STomas Janousek 	struct timespec real_start_time;	/* boot based time */
13451da177e4SLinus Torvalds /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
13461da177e4SLinus Torvalds 	unsigned long min_flt, maj_flt;
13471da177e4SLinus Torvalds 
1348f06febc9SFrank Mayhar 	struct task_cputime cputime_expires;
13491da177e4SLinus Torvalds 	struct list_head cpu_timers[3];
13501da177e4SLinus Torvalds 
13511da177e4SLinus Torvalds /* process credentials */
13521b0ba1c9SArnd Bergmann 	const struct cred __rcu *real_cred; /* objective and real subjective task
13533b11a1deSDavid Howells 					 * credentials (COW) */
13541b0ba1c9SArnd Bergmann 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
13553b11a1deSDavid Howells 					 * credentials (COW) */
135636772092SPaolo 'Blaisorblade' Giarrusso 	char comm[TASK_COMM_LEN]; /* executable name excluding path
135736772092SPaolo 'Blaisorblade' Giarrusso 				     - access with [gs]et_task_comm (which lock
135836772092SPaolo 'Blaisorblade' Giarrusso 				       it with task_lock())
1359221af7f8SLinus Torvalds 				     - initialized normally by setup_new_exec */
13601da177e4SLinus Torvalds /* file system info */
13611da177e4SLinus Torvalds 	int link_count, total_link_count;
13623d5b6fccSAlexey Dobriyan #ifdef CONFIG_SYSVIPC
13631da177e4SLinus Torvalds /* ipc stuff */
13641da177e4SLinus Torvalds 	struct sysv_sem sysvsem;
13653d5b6fccSAlexey Dobriyan #endif
1366e162b39aSMandeep Singh Baines #ifdef CONFIG_DETECT_HUNG_TASK
136782a1fcb9SIngo Molnar /* hung task detection */
136882a1fcb9SIngo Molnar 	unsigned long last_switch_count;
136982a1fcb9SIngo Molnar #endif
13701da177e4SLinus Torvalds /* CPU-specific state of this task */
13711da177e4SLinus Torvalds 	struct thread_struct thread;
13721da177e4SLinus Torvalds /* filesystem information */
13731da177e4SLinus Torvalds 	struct fs_struct *fs;
13741da177e4SLinus Torvalds /* open file information */
13751da177e4SLinus Torvalds 	struct files_struct *files;
13761651e14eSSerge E. Hallyn /* namespaces */
1377ab516013SSerge E. Hallyn 	struct nsproxy *nsproxy;
13781da177e4SLinus Torvalds /* signal handlers */
13791da177e4SLinus Torvalds 	struct signal_struct *signal;
13801da177e4SLinus Torvalds 	struct sighand_struct *sighand;
13811da177e4SLinus Torvalds 
13821da177e4SLinus Torvalds 	sigset_t blocked, real_blocked;
1383f3de272bSRoland McGrath 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
13841da177e4SLinus Torvalds 	struct sigpending pending;
13851da177e4SLinus Torvalds 
13861da177e4SLinus Torvalds 	unsigned long sas_ss_sp;
13871da177e4SLinus Torvalds 	size_t sas_ss_size;
13881da177e4SLinus Torvalds 	int (*notifier)(void *priv);
13891da177e4SLinus Torvalds 	void *notifier_data;
13901da177e4SLinus Torvalds 	sigset_t *notifier_mask;
139167d12145SAl Viro 	struct callback_head *task_works;
1392e73f8959SOleg Nesterov 
13931da177e4SLinus Torvalds 	struct audit_context *audit_context;
1394bfef93a5SAl Viro #ifdef CONFIG_AUDITSYSCALL
1395e1760bd5SEric W. Biederman 	kuid_t loginuid;
13964746ec5bSEric Paris 	unsigned int sessionid;
1397bfef93a5SAl Viro #endif
1398932ecebbSWill Drewry 	struct seccomp seccomp;
13991da177e4SLinus Torvalds 
14001da177e4SLinus Torvalds /* Thread group tracking */
14011da177e4SLinus Torvalds    	u32 parent_exec_id;
14021da177e4SLinus Torvalds    	u32 self_exec_id;
140358568d2aSMiao Xie /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
140458568d2aSMiao Xie  * mempolicy */
14051da177e4SLinus Torvalds 	spinlock_t alloc_lock;
14061da177e4SLinus Torvalds 
1407b29739f9SIngo Molnar 	/* Protection of the PI data structures: */
14081d615482SThomas Gleixner 	raw_spinlock_t pi_lock;
1409b29739f9SIngo Molnar 
141023f78d4aSIngo Molnar #ifdef CONFIG_RT_MUTEXES
141123f78d4aSIngo Molnar 	/* PI waiters blocked on a rt_mutex held by this task */
141223f78d4aSIngo Molnar 	struct plist_head pi_waiters;
141323f78d4aSIngo Molnar 	/* Deadlock detection and priority inheritance handling */
141423f78d4aSIngo Molnar 	struct rt_mutex_waiter *pi_blocked_on;
141523f78d4aSIngo Molnar #endif
141623f78d4aSIngo Molnar 
1417408894eeSIngo Molnar #ifdef CONFIG_DEBUG_MUTEXES
1418408894eeSIngo Molnar 	/* mutex deadlock detection */
1419408894eeSIngo Molnar 	struct mutex_waiter *blocked_on;
1420408894eeSIngo Molnar #endif
1421de30a2b3SIngo Molnar #ifdef CONFIG_TRACE_IRQFLAGS
1422de30a2b3SIngo Molnar 	unsigned int irq_events;
1423de30a2b3SIngo Molnar 	unsigned long hardirq_enable_ip;
1424de30a2b3SIngo Molnar 	unsigned long hardirq_disable_ip;
1425fa1452e8SHiroshi Shimamoto 	unsigned int hardirq_enable_event;
1426de30a2b3SIngo Molnar 	unsigned int hardirq_disable_event;
1427fa1452e8SHiroshi Shimamoto 	int hardirqs_enabled;
1428de30a2b3SIngo Molnar 	int hardirq_context;
1429fa1452e8SHiroshi Shimamoto 	unsigned long softirq_disable_ip;
1430fa1452e8SHiroshi Shimamoto 	unsigned long softirq_enable_ip;
1431fa1452e8SHiroshi Shimamoto 	unsigned int softirq_disable_event;
1432fa1452e8SHiroshi Shimamoto 	unsigned int softirq_enable_event;
1433fa1452e8SHiroshi Shimamoto 	int softirqs_enabled;
1434de30a2b3SIngo Molnar 	int softirq_context;
1435de30a2b3SIngo Molnar #endif
1436fbb9ce95SIngo Molnar #ifdef CONFIG_LOCKDEP
1437bdb9441eSPeter Zijlstra # define MAX_LOCK_DEPTH 48UL
1438fbb9ce95SIngo Molnar 	u64 curr_chain_key;
1439fbb9ce95SIngo Molnar 	int lockdep_depth;
1440fbb9ce95SIngo Molnar 	unsigned int lockdep_recursion;
1441c7aceabaSRichard Kennedy 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1442cf40bd16SNick Piggin 	gfp_t lockdep_reclaim_gfp;
1443fbb9ce95SIngo Molnar #endif
1444408894eeSIngo Molnar 
14451da177e4SLinus Torvalds /* journalling filesystem info */
14461da177e4SLinus Torvalds 	void *journal_info;
14471da177e4SLinus Torvalds 
1448d89d8796SNeil Brown /* stacked block device info */
1449bddd87c7SAkinobu Mita 	struct bio_list *bio_list;
1450d89d8796SNeil Brown 
145173c10101SJens Axboe #ifdef CONFIG_BLOCK
145273c10101SJens Axboe /* stack plugging */
145373c10101SJens Axboe 	struct blk_plug *plug;
145473c10101SJens Axboe #endif
145573c10101SJens Axboe 
14561da177e4SLinus Torvalds /* VM state */
14571da177e4SLinus Torvalds 	struct reclaim_state *reclaim_state;
14581da177e4SLinus Torvalds 
14591da177e4SLinus Torvalds 	struct backing_dev_info *backing_dev_info;
14601da177e4SLinus Torvalds 
14611da177e4SLinus Torvalds 	struct io_context *io_context;
14621da177e4SLinus Torvalds 
14631da177e4SLinus Torvalds 	unsigned long ptrace_message;
14641da177e4SLinus Torvalds 	siginfo_t *last_siginfo; /* For ptrace use.  */
14657c3ab738SAndrew Morton 	struct task_io_accounting ioac;
14668f0ab514SJay Lan #if defined(CONFIG_TASK_XACCT)
14671da177e4SLinus Torvalds 	u64 acct_rss_mem1;	/* accumulated rss usage */
14681da177e4SLinus Torvalds 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
146949b5cf34SJonathan Lim 	cputime_t acct_timexpd;	/* stime + utime since last update */
14701da177e4SLinus Torvalds #endif
14711da177e4SLinus Torvalds #ifdef CONFIG_CPUSETS
147258568d2aSMiao Xie 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1473cc9a6c87SMel Gorman 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1474825a46afSPaul Jackson 	int cpuset_mem_spread_rotor;
14756adef3ebSJack Steiner 	int cpuset_slab_spread_rotor;
14761da177e4SLinus Torvalds #endif
1477ddbcc7e8SPaul Menage #ifdef CONFIG_CGROUPS
1478817929ecSPaul Menage 	/* Control Group info protected by css_set_lock */
14792c392b8cSArnd Bergmann 	struct css_set __rcu *cgroups;
1480817929ecSPaul Menage 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1481817929ecSPaul Menage 	struct list_head cg_list;
1482ddbcc7e8SPaul Menage #endif
148342b2dd0aSAlexey Dobriyan #ifdef CONFIG_FUTEX
14840771dfefSIngo Molnar 	struct robust_list_head __user *robust_list;
148534f192c6SIngo Molnar #ifdef CONFIG_COMPAT
148634f192c6SIngo Molnar 	struct compat_robust_list_head __user *compat_robust_list;
148734f192c6SIngo Molnar #endif
1488c87e2837SIngo Molnar 	struct list_head pi_state_list;
1489c87e2837SIngo Molnar 	struct futex_pi_state *pi_state_cache;
149042b2dd0aSAlexey Dobriyan #endif
1491cdd6c482SIngo Molnar #ifdef CONFIG_PERF_EVENTS
14928dc85d54SPeter Zijlstra 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1493cdd6c482SIngo Molnar 	struct mutex perf_event_mutex;
1494cdd6c482SIngo Molnar 	struct list_head perf_event_list;
1495a63eaf34SPaul Mackerras #endif
1496c7aceabaSRichard Kennedy #ifdef CONFIG_NUMA
149758568d2aSMiao Xie 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1498c7aceabaSRichard Kennedy 	short il_next;
1499207205a2SEric Dumazet 	short pref_node_fork;
1500c7aceabaSRichard Kennedy #endif
1501cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
1502cbee9f88SPeter Zijlstra 	int numa_scan_seq;
1503cbee9f88SPeter Zijlstra 	int numa_migrate_seq;
1504cbee9f88SPeter Zijlstra 	unsigned int numa_scan_period;
1505cbee9f88SPeter Zijlstra 	u64 node_stamp;			/* migration stamp  */
1506cbee9f88SPeter Zijlstra 	struct callback_head numa_work;
1507cbee9f88SPeter Zijlstra #endif /* CONFIG_NUMA_BALANCING */
1508cbee9f88SPeter Zijlstra 
1509e56d0903SIngo Molnar 	struct rcu_head rcu;
1510b92ce558SJens Axboe 
1511b92ce558SJens Axboe 	/*
1512b92ce558SJens Axboe 	 * cache last used pipe for splice
1513b92ce558SJens Axboe 	 */
1514b92ce558SJens Axboe 	struct pipe_inode_info *splice_pipe;
15155640f768SEric Dumazet 
15165640f768SEric Dumazet 	struct page_frag task_frag;
15175640f768SEric Dumazet 
1518ca74e92bSShailabh Nagar #ifdef	CONFIG_TASK_DELAY_ACCT
1519ca74e92bSShailabh Nagar 	struct task_delay_info *delays;
1520ca74e92bSShailabh Nagar #endif
1521f4f154fdSAkinobu Mita #ifdef CONFIG_FAULT_INJECTION
1522f4f154fdSAkinobu Mita 	int make_it_fail;
1523f4f154fdSAkinobu Mita #endif
15249d823e8fSWu Fengguang 	/*
15259d823e8fSWu Fengguang 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
15269d823e8fSWu Fengguang 	 * balance_dirty_pages() for some dirty throttling pause
15279d823e8fSWu Fengguang 	 */
15289d823e8fSWu Fengguang 	int nr_dirtied;
15299d823e8fSWu Fengguang 	int nr_dirtied_pause;
153083712358SWu Fengguang 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
15319d823e8fSWu Fengguang 
15329745512cSArjan van de Ven #ifdef CONFIG_LATENCYTOP
15339745512cSArjan van de Ven 	int latency_record_count;
15349745512cSArjan van de Ven 	struct latency_record latency_record[LT_SAVECOUNT];
15359745512cSArjan van de Ven #endif
15366976675dSArjan van de Ven 	/*
15376976675dSArjan van de Ven 	 * time slack values; these are used to round up poll() and
15386976675dSArjan van de Ven 	 * select() etc timeout values. These are in nanoseconds.
15396976675dSArjan van de Ven 	 */
15406976675dSArjan van de Ven 	unsigned long timer_slack_ns;
15416976675dSArjan van de Ven 	unsigned long default_timer_slack_ns;
1542f8d570a4SDavid Miller 
1543fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
15443ad2f3fbSDaniel Mack 	/* Index of current stored address in ret_stack */
1545f201ae23SFrederic Weisbecker 	int curr_ret_stack;
1546f201ae23SFrederic Weisbecker 	/* Stack of return addresses for return function tracing */
1547f201ae23SFrederic Weisbecker 	struct ftrace_ret_stack	*ret_stack;
15488aef2d28SSteven Rostedt 	/* time stamp for last schedule */
15498aef2d28SSteven Rostedt 	unsigned long long ftrace_timestamp;
1550f201ae23SFrederic Weisbecker 	/*
1551f201ae23SFrederic Weisbecker 	 * Number of functions that haven't been traced
1552f201ae23SFrederic Weisbecker 	 * because of depth overrun.
1553f201ae23SFrederic Weisbecker 	 */
1554f201ae23SFrederic Weisbecker 	atomic_t trace_overrun;
1555380c4b14SFrederic Weisbecker 	/* Pause for the tracing */
1556380c4b14SFrederic Weisbecker 	atomic_t tracing_graph_pause;
1557f201ae23SFrederic Weisbecker #endif
1558ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING
1559ea4e2bc4SSteven Rostedt 	/* state flags for use by tracers */
1560ea4e2bc4SSteven Rostedt 	unsigned long trace;
1561b1cff0adSSteven Rostedt 	/* bitmask and counter of trace recursion */
1562261842b7SSteven Rostedt 	unsigned long trace_recursion;
1563261842b7SSteven Rostedt #endif /* CONFIG_TRACING */
1564c255a458SAndrew Morton #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
1565569b846dSKAMEZAWA Hiroyuki 	struct memcg_batch_info {
1566569b846dSKAMEZAWA Hiroyuki 		int do_batch;	/* incremented when batch uncharge started */
1567569b846dSKAMEZAWA Hiroyuki 		struct mem_cgroup *memcg; /* target memcg of uncharge */
15687ffd4ca7SJohannes Weiner 		unsigned long nr_pages;	/* uncharged usage */
15697ffd4ca7SJohannes Weiner 		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1570569b846dSKAMEZAWA Hiroyuki 	} memcg_batch;
15710e9d92f2SGlauber Costa 	unsigned int memcg_kmem_skip_account;
1572569b846dSKAMEZAWA Hiroyuki #endif
1573bf26c018SFrederic Weisbecker #ifdef CONFIG_HAVE_HW_BREAKPOINT
1574bf26c018SFrederic Weisbecker 	atomic_t ptrace_bp_refcnt;
1575bf26c018SFrederic Weisbecker #endif
15760326f5a9SSrikar Dronamraju #ifdef CONFIG_UPROBES
15770326f5a9SSrikar Dronamraju 	struct uprobe_task *utask;
15780326f5a9SSrikar Dronamraju #endif
15791da177e4SLinus Torvalds };
15801da177e4SLinus Torvalds 
158176e6eee0SRusty Russell /* Future-safe accessor for struct task_struct's cpus_allowed. */
1582a4636818SRusty Russell #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
158376e6eee0SRusty Russell 
1584cbee9f88SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
1585b8593bfdSMel Gorman extern void task_numa_fault(int node, int pages, bool migrated);
15861a687c2eSMel Gorman extern void set_numabalancing_state(bool enabled);
1587cbee9f88SPeter Zijlstra #else
1588b8593bfdSMel Gorman static inline void task_numa_fault(int node, int pages, bool migrated)
1589cbee9f88SPeter Zijlstra {
1590cbee9f88SPeter Zijlstra }
15911a687c2eSMel Gorman static inline void set_numabalancing_state(bool enabled)
15921a687c2eSMel Gorman {
15931a687c2eSMel Gorman }
1594cbee9f88SPeter Zijlstra #endif
1595cbee9f88SPeter Zijlstra 
1596e868171aSAlexey Dobriyan static inline struct pid *task_pid(struct task_struct *task)
159722c935f4SEric W. Biederman {
159822c935f4SEric W. Biederman 	return task->pids[PIDTYPE_PID].pid;
159922c935f4SEric W. Biederman }
160022c935f4SEric W. Biederman 
1601e868171aSAlexey Dobriyan static inline struct pid *task_tgid(struct task_struct *task)
160222c935f4SEric W. Biederman {
160322c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PID].pid;
160422c935f4SEric W. Biederman }
160522c935f4SEric W. Biederman 
16066dda81f4SOleg Nesterov /*
16076dda81f4SOleg Nesterov  * Without tasklist or rcu lock it is not safe to dereference
16086dda81f4SOleg Nesterov  * the result of task_pgrp/task_session even if task == current,
16096dda81f4SOleg Nesterov  * we can race with another thread doing sys_setsid/sys_setpgid.
16106dda81f4SOleg Nesterov  */
1611e868171aSAlexey Dobriyan static inline struct pid *task_pgrp(struct task_struct *task)
161222c935f4SEric W. Biederman {
161322c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_PGID].pid;
161422c935f4SEric W. Biederman }
161522c935f4SEric W. Biederman 
1616e868171aSAlexey Dobriyan static inline struct pid *task_session(struct task_struct *task)
161722c935f4SEric W. Biederman {
161822c935f4SEric W. Biederman 	return task->group_leader->pids[PIDTYPE_SID].pid;
161922c935f4SEric W. Biederman }
162022c935f4SEric W. Biederman 
16217af57294SPavel Emelyanov struct pid_namespace;
16227af57294SPavel Emelyanov 
16237af57294SPavel Emelyanov /*
16247af57294SPavel Emelyanov  * the helpers to get the task's different pids as they are seen
16257af57294SPavel Emelyanov  * from various namespaces
16267af57294SPavel Emelyanov  *
16277af57294SPavel Emelyanov  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
162844c4e1b2SEric W. Biederman  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
162944c4e1b2SEric W. Biederman  *                     current.
16307af57294SPavel Emelyanov  * task_xid_nr_ns()  : id seen from the ns specified;
16317af57294SPavel Emelyanov  *
16327af57294SPavel Emelyanov  * set_task_vxid()   : assigns a virtual id to a task;
16337af57294SPavel Emelyanov  *
16347af57294SPavel Emelyanov  * see also pid_nr() etc in include/linux/pid.h
16357af57294SPavel Emelyanov  */
163652ee2dfdSOleg Nesterov pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
163752ee2dfdSOleg Nesterov 			struct pid_namespace *ns);
16387af57294SPavel Emelyanov 
1639e868171aSAlexey Dobriyan static inline pid_t task_pid_nr(struct task_struct *tsk)
16407af57294SPavel Emelyanov {
16417af57294SPavel Emelyanov 	return tsk->pid;
16427af57294SPavel Emelyanov }
16437af57294SPavel Emelyanov 
164452ee2dfdSOleg Nesterov static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
164552ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
164652ee2dfdSOleg Nesterov {
164752ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
164852ee2dfdSOleg Nesterov }
16497af57294SPavel Emelyanov 
16507af57294SPavel Emelyanov static inline pid_t task_pid_vnr(struct task_struct *tsk)
16517af57294SPavel Emelyanov {
165252ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
16537af57294SPavel Emelyanov }
16547af57294SPavel Emelyanov 
16557af57294SPavel Emelyanov 
1656e868171aSAlexey Dobriyan static inline pid_t task_tgid_nr(struct task_struct *tsk)
16577af57294SPavel Emelyanov {
16587af57294SPavel Emelyanov 	return tsk->tgid;
16597af57294SPavel Emelyanov }
16607af57294SPavel Emelyanov 
16612f2a3a46SPavel Emelyanov pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
16627af57294SPavel Emelyanov 
16637af57294SPavel Emelyanov static inline pid_t task_tgid_vnr(struct task_struct *tsk)
16647af57294SPavel Emelyanov {
16657af57294SPavel Emelyanov 	return pid_vnr(task_tgid(tsk));
16667af57294SPavel Emelyanov }
16677af57294SPavel Emelyanov 
16687af57294SPavel Emelyanov 
166952ee2dfdSOleg Nesterov static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
167052ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
16717af57294SPavel Emelyanov {
167252ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
16737af57294SPavel Emelyanov }
16747af57294SPavel Emelyanov 
16757af57294SPavel Emelyanov static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
16767af57294SPavel Emelyanov {
167752ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
16787af57294SPavel Emelyanov }
16797af57294SPavel Emelyanov 
16807af57294SPavel Emelyanov 
168152ee2dfdSOleg Nesterov static inline pid_t task_session_nr_ns(struct task_struct *tsk,
168252ee2dfdSOleg Nesterov 					struct pid_namespace *ns)
16837af57294SPavel Emelyanov {
168452ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
16857af57294SPavel Emelyanov }
16867af57294SPavel Emelyanov 
16877af57294SPavel Emelyanov static inline pid_t task_session_vnr(struct task_struct *tsk)
16887af57294SPavel Emelyanov {
168952ee2dfdSOleg Nesterov 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
16907af57294SPavel Emelyanov }
16917af57294SPavel Emelyanov 
16921b0f7ffdSOleg Nesterov /* obsolete, do not use */
16931b0f7ffdSOleg Nesterov static inline pid_t task_pgrp_nr(struct task_struct *tsk)
16941b0f7ffdSOleg Nesterov {
16951b0f7ffdSOleg Nesterov 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
16961b0f7ffdSOleg Nesterov }
16977af57294SPavel Emelyanov 
16981da177e4SLinus Torvalds /**
16991da177e4SLinus Torvalds  * pid_alive - check that a task structure is not stale
17001da177e4SLinus Torvalds  * @p: Task structure to be checked.
17011da177e4SLinus Torvalds  *
17021da177e4SLinus Torvalds  * Test if a process is not yet dead (at most zombie state)
17031da177e4SLinus Torvalds  * If pid_alive fails, then pointers within the task structure
17041da177e4SLinus Torvalds  * can be stale and must not be dereferenced.
17051da177e4SLinus Torvalds  */
1706e868171aSAlexey Dobriyan static inline int pid_alive(struct task_struct *p)
17071da177e4SLinus Torvalds {
170892476d7fSEric W. Biederman 	return p->pids[PIDTYPE_PID].pid != NULL;
17091da177e4SLinus Torvalds }
17101da177e4SLinus Torvalds 
1711f400e198SSukadev Bhattiprolu /**
1712b460cbc5SSerge E. Hallyn  * is_global_init - check if a task structure is init
17133260259fSHenne  * @tsk: Task structure to be checked.
17143260259fSHenne  *
17153260259fSHenne  * Check if a task structure is the first user space task the kernel created.
1716f400e198SSukadev Bhattiprolu  */
1717e868171aSAlexey Dobriyan static inline int is_global_init(struct task_struct *tsk)
1718b461cc03SPavel Emelyanov {
1719b461cc03SPavel Emelyanov 	return tsk->pid == 1;
1720b461cc03SPavel Emelyanov }
1721b460cbc5SSerge E. Hallyn 
17229ec52099SCedric Le Goater extern struct pid *cad_pid;
17239ec52099SCedric Le Goater 
17241da177e4SLinus Torvalds extern void free_task(struct task_struct *tsk);
17251da177e4SLinus Torvalds #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1726e56d0903SIngo Molnar 
1727158d9ebdSAndrew Morton extern void __put_task_struct(struct task_struct *t);
1728e56d0903SIngo Molnar 
1729e56d0903SIngo Molnar static inline void put_task_struct(struct task_struct *t)
1730e56d0903SIngo Molnar {
1731e56d0903SIngo Molnar 	if (atomic_dec_and_test(&t->usage))
17328c7904a0SEric W. Biederman 		__put_task_struct(t);
1733e56d0903SIngo Molnar }
17341da177e4SLinus Torvalds 
17356a61671bSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
17366a61671bSFrederic Weisbecker extern void task_cputime(struct task_struct *t,
17376a61671bSFrederic Weisbecker 			 cputime_t *utime, cputime_t *stime);
17386a61671bSFrederic Weisbecker extern void task_cputime_scaled(struct task_struct *t,
17396a61671bSFrederic Weisbecker 				cputime_t *utimescaled, cputime_t *stimescaled);
17406a61671bSFrederic Weisbecker extern cputime_t task_gtime(struct task_struct *t);
17416a61671bSFrederic Weisbecker #else
17426fac4829SFrederic Weisbecker static inline void task_cputime(struct task_struct *t,
17436fac4829SFrederic Weisbecker 				cputime_t *utime, cputime_t *stime)
17446fac4829SFrederic Weisbecker {
17456fac4829SFrederic Weisbecker 	if (utime)
17466fac4829SFrederic Weisbecker 		*utime = t->utime;
17476fac4829SFrederic Weisbecker 	if (stime)
17486fac4829SFrederic Weisbecker 		*stime = t->stime;
17496fac4829SFrederic Weisbecker }
17506fac4829SFrederic Weisbecker 
17516fac4829SFrederic Weisbecker static inline void task_cputime_scaled(struct task_struct *t,
17526fac4829SFrederic Weisbecker 				       cputime_t *utimescaled,
17536fac4829SFrederic Weisbecker 				       cputime_t *stimescaled)
17546fac4829SFrederic Weisbecker {
17556fac4829SFrederic Weisbecker 	if (utimescaled)
17566fac4829SFrederic Weisbecker 		*utimescaled = t->utimescaled;
17576fac4829SFrederic Weisbecker 	if (stimescaled)
17586fac4829SFrederic Weisbecker 		*stimescaled = t->stimescaled;
17596fac4829SFrederic Weisbecker }
17606a61671bSFrederic Weisbecker 
17616a61671bSFrederic Weisbecker static inline cputime_t task_gtime(struct task_struct *t)
17626a61671bSFrederic Weisbecker {
17636a61671bSFrederic Weisbecker 	return t->gtime;
17646a61671bSFrederic Weisbecker }
17656a61671bSFrederic Weisbecker #endif
1766e80d0a1aSFrederic Weisbecker extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1767e80d0a1aSFrederic Weisbecker extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
176849048622SBalbir Singh 
17691da177e4SLinus Torvalds /*
17701da177e4SLinus Torvalds  * Per process flags
17711da177e4SLinus Torvalds  */
17721da177e4SLinus Torvalds #define PF_EXITING	0x00000004	/* getting shut down */
1773778e9a9cSAlexey Kuznetsov #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
177494886b84SLaurent Vivier #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
177521aa9af0STejun Heo #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
17761da177e4SLinus Torvalds #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
17774db96cf0SAndi Kleen #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
17781da177e4SLinus Torvalds #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
17791da177e4SLinus Torvalds #define PF_DUMPCORE	0x00000200	/* dumped core */
17801da177e4SLinus Torvalds #define PF_SIGNALED	0x00000400	/* killed by a signal */
17811da177e4SLinus Torvalds #define PF_MEMALLOC	0x00000800	/* Allocating memory */
178272fa5997SVasiliy Kulikov #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
17831da177e4SLinus Torvalds #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1784774a1221STejun Heo #define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
17851da177e4SLinus Torvalds #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
17861da177e4SLinus Torvalds #define PF_FROZEN	0x00010000	/* frozen for system suspend */
17871da177e4SLinus Torvalds #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
17881da177e4SLinus Torvalds #define PF_KSWAPD	0x00040000	/* I am kswapd */
178921caf2fcSMing Lei #define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
17901da177e4SLinus Torvalds #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1791246bb0b1SOleg Nesterov #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1792b31dc66aSJens Axboe #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1793b31dc66aSJens Axboe #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1794b31dc66aSJens Axboe #define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1795b31dc66aSJens Axboe #define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
17969985b0baSDavid Rientjes #define PF_THREAD_BOUND	0x04000000	/* Thread bound to specific cpu */
17974db96cf0SAndi Kleen #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1798c61afb18SPaul Jackson #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
179961a87122SThomas Gleixner #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
180058a69cb4STejun Heo #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
18011da177e4SLinus Torvalds 
18021da177e4SLinus Torvalds /*
18031da177e4SLinus Torvalds  * Only the _current_ task can read/write to tsk->flags, but other
18041da177e4SLinus Torvalds  * tasks can access tsk->flags in readonly mode for example
18051da177e4SLinus Torvalds  * with tsk_used_math (like during threaded core dumping).
18061da177e4SLinus Torvalds  * There is however an exception to this rule during ptrace
18071da177e4SLinus Torvalds  * or during fork: the ptracer task is allowed to write to the
18081da177e4SLinus Torvalds  * child->flags of its traced child (same goes for fork, the parent
18091da177e4SLinus Torvalds  * can write to the child->flags), because we're guaranteed the
18101da177e4SLinus Torvalds  * child is not running and in turn not changing child->flags
18111da177e4SLinus Torvalds  * at the same time the parent does it.
18121da177e4SLinus Torvalds  */
18131da177e4SLinus Torvalds #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
18141da177e4SLinus Torvalds #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
18151da177e4SLinus Torvalds #define clear_used_math() clear_stopped_child_used_math(current)
18161da177e4SLinus Torvalds #define set_used_math() set_stopped_child_used_math(current)
18171da177e4SLinus Torvalds #define conditional_stopped_child_used_math(condition, child) \
18181da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
18191da177e4SLinus Torvalds #define conditional_used_math(condition) \
18201da177e4SLinus Torvalds 	conditional_stopped_child_used_math(condition, current)
18211da177e4SLinus Torvalds #define copy_to_stopped_child_used_math(child) \
18221da177e4SLinus Torvalds 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
18231da177e4SLinus Torvalds /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
18241da177e4SLinus Torvalds #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
18251da177e4SLinus Torvalds #define used_math() tsk_used_math(current)
18261da177e4SLinus Torvalds 
182721caf2fcSMing Lei /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
182821caf2fcSMing Lei static inline gfp_t memalloc_noio_flags(gfp_t flags)
182921caf2fcSMing Lei {
183021caf2fcSMing Lei 	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
183121caf2fcSMing Lei 		flags &= ~__GFP_IO;
183221caf2fcSMing Lei 	return flags;
183321caf2fcSMing Lei }
183421caf2fcSMing Lei 
183521caf2fcSMing Lei static inline unsigned int memalloc_noio_save(void)
183621caf2fcSMing Lei {
183721caf2fcSMing Lei 	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
183821caf2fcSMing Lei 	current->flags |= PF_MEMALLOC_NOIO;
183921caf2fcSMing Lei 	return flags;
184021caf2fcSMing Lei }
184121caf2fcSMing Lei 
184221caf2fcSMing Lei static inline void memalloc_noio_restore(unsigned int flags)
184321caf2fcSMing Lei {
184421caf2fcSMing Lei 	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
184521caf2fcSMing Lei }
184621caf2fcSMing Lei 
1847e5c1902eSTejun Heo /*
1848a8f072c1STejun Heo  * task->jobctl flags
1849e5c1902eSTejun Heo  */
1850a8f072c1STejun Heo #define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
1851e5c1902eSTejun Heo 
1852a8f072c1STejun Heo #define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
1853a8f072c1STejun Heo #define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
1854a8f072c1STejun Heo #define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
185573ddff2bSTejun Heo #define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
1856fb1d910cSTejun Heo #define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
1857a8f072c1STejun Heo #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
1858544b2c91STejun Heo #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
1859a8f072c1STejun Heo 
1860a8f072c1STejun Heo #define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT)
1861a8f072c1STejun Heo #define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT)
1862a8f072c1STejun Heo #define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT)
186373ddff2bSTejun Heo #define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT)
1864fb1d910cSTejun Heo #define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT)
1865a8f072c1STejun Heo #define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT)
1866544b2c91STejun Heo #define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT)
1867a8f072c1STejun Heo 
1868fb1d910cSTejun Heo #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
186973ddff2bSTejun Heo #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
18703759a0d9STejun Heo 
18717dd3db54STejun Heo extern bool task_set_jobctl_pending(struct task_struct *task,
18727dd3db54STejun Heo 				    unsigned int mask);
187373ddff2bSTejun Heo extern void task_clear_jobctl_trapping(struct task_struct *task);
18743759a0d9STejun Heo extern void task_clear_jobctl_pending(struct task_struct *task,
18753759a0d9STejun Heo 				      unsigned int mask);
187639efa3efSTejun Heo 
1877a57eb940SPaul E. McKenney #ifdef CONFIG_PREEMPT_RCU
1878f41d911fSPaul E. McKenney 
1879f41d911fSPaul E. McKenney #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
18801aa03f11SPaul E. McKenney #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1881f41d911fSPaul E. McKenney 
1882f41d911fSPaul E. McKenney static inline void rcu_copy_process(struct task_struct *p)
1883f41d911fSPaul E. McKenney {
1884f41d911fSPaul E. McKenney 	p->rcu_read_lock_nesting = 0;
1885f41d911fSPaul E. McKenney 	p->rcu_read_unlock_special = 0;
1886a57eb940SPaul E. McKenney #ifdef CONFIG_TREE_PREEMPT_RCU
1887dd5d19baSPaul E. McKenney 	p->rcu_blocked_node = NULL;
188824278d14SPaul E. McKenney #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
188924278d14SPaul E. McKenney #ifdef CONFIG_RCU_BOOST
189024278d14SPaul E. McKenney 	p->rcu_boost_mutex = NULL;
189124278d14SPaul E. McKenney #endif /* #ifdef CONFIG_RCU_BOOST */
1892f41d911fSPaul E. McKenney 	INIT_LIST_HEAD(&p->rcu_node_entry);
1893f41d911fSPaul E. McKenney }
1894f41d911fSPaul E. McKenney 
1895f41d911fSPaul E. McKenney #else
1896f41d911fSPaul E. McKenney 
1897f41d911fSPaul E. McKenney static inline void rcu_copy_process(struct task_struct *p)
1898f41d911fSPaul E. McKenney {
1899f41d911fSPaul E. McKenney }
1900f41d911fSPaul E. McKenney 
1901f41d911fSPaul E. McKenney #endif
1902f41d911fSPaul E. McKenney 
1903907aed48SMel Gorman static inline void tsk_restore_flags(struct task_struct *task,
1904907aed48SMel Gorman 				unsigned long orig_flags, unsigned long flags)
1905907aed48SMel Gorman {
1906907aed48SMel Gorman 	task->flags &= ~flags;
1907907aed48SMel Gorman 	task->flags |= orig_flags & flags;
1908907aed48SMel Gorman }
1909907aed48SMel Gorman 
19101da177e4SLinus Torvalds #ifdef CONFIG_SMP
19111e1b6c51SKOSAKI Motohiro extern void do_set_cpus_allowed(struct task_struct *p,
19121e1b6c51SKOSAKI Motohiro 			       const struct cpumask *new_mask);
19131e1b6c51SKOSAKI Motohiro 
1914cd8ba7cdSMike Travis extern int set_cpus_allowed_ptr(struct task_struct *p,
191596f874e2SRusty Russell 				const struct cpumask *new_mask);
19161da177e4SLinus Torvalds #else
19171e1b6c51SKOSAKI Motohiro static inline void do_set_cpus_allowed(struct task_struct *p,
19181e1b6c51SKOSAKI Motohiro 				      const struct cpumask *new_mask)
19191e1b6c51SKOSAKI Motohiro {
19201e1b6c51SKOSAKI Motohiro }
1921cd8ba7cdSMike Travis static inline int set_cpus_allowed_ptr(struct task_struct *p,
192296f874e2SRusty Russell 				       const struct cpumask *new_mask)
19231da177e4SLinus Torvalds {
192496f874e2SRusty Russell 	if (!cpumask_test_cpu(0, new_mask))
19251da177e4SLinus Torvalds 		return -EINVAL;
19261da177e4SLinus Torvalds 	return 0;
19271da177e4SLinus Torvalds }
19281da177e4SLinus Torvalds #endif
1929e0ad9556SRusty Russell 
19305167e8d5SPeter Zijlstra #ifdef CONFIG_NO_HZ
19315167e8d5SPeter Zijlstra void calc_load_enter_idle(void);
19325167e8d5SPeter Zijlstra void calc_load_exit_idle(void);
19335167e8d5SPeter Zijlstra #else
19345167e8d5SPeter Zijlstra static inline void calc_load_enter_idle(void) { }
19355167e8d5SPeter Zijlstra static inline void calc_load_exit_idle(void) { }
19365167e8d5SPeter Zijlstra #endif /* CONFIG_NO_HZ */
19375167e8d5SPeter Zijlstra 
1938e0ad9556SRusty Russell #ifndef CONFIG_CPUMASK_OFFSTACK
1939cd8ba7cdSMike Travis static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1940cd8ba7cdSMike Travis {
1941cd8ba7cdSMike Travis 	return set_cpus_allowed_ptr(p, &new_mask);
1942cd8ba7cdSMike Travis }
1943e0ad9556SRusty Russell #endif
19441da177e4SLinus Torvalds 
1945b342501cSIngo Molnar /*
1946c676329aSPeter Zijlstra  * Do not use outside of architecture code which knows its limitations.
1947c676329aSPeter Zijlstra  *
1948c676329aSPeter Zijlstra  * sched_clock() has no promise of monotonicity or bounded drift between
1949c676329aSPeter Zijlstra  * CPUs, use (which you should not) requires disabling IRQs.
1950c676329aSPeter Zijlstra  *
1951c676329aSPeter Zijlstra  * Please use one of the three interfaces below.
1952b342501cSIngo Molnar  */
19531bbfa6f2SMike Frysinger extern unsigned long long notrace sched_clock(void);
1954c676329aSPeter Zijlstra /*
1955489a71b0SHiroshi Shimamoto  * See the comment in kernel/sched/clock.c
1956c676329aSPeter Zijlstra  */
1957c676329aSPeter Zijlstra extern u64 cpu_clock(int cpu);
1958c676329aSPeter Zijlstra extern u64 local_clock(void);
1959c676329aSPeter Zijlstra extern u64 sched_clock_cpu(int cpu);
1960c676329aSPeter Zijlstra 
1961e436d800SIngo Molnar 
1962c1955a3dSPeter Zijlstra extern void sched_clock_init(void);
1963c1955a3dSPeter Zijlstra 
19643e51f33fSPeter Zijlstra #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
19653e51f33fSPeter Zijlstra static inline void sched_clock_tick(void)
19663e51f33fSPeter Zijlstra {
19673e51f33fSPeter Zijlstra }
19683e51f33fSPeter Zijlstra 
19693e51f33fSPeter Zijlstra static inline void sched_clock_idle_sleep_event(void)
19703e51f33fSPeter Zijlstra {
19713e51f33fSPeter Zijlstra }
19723e51f33fSPeter Zijlstra 
19733e51f33fSPeter Zijlstra static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
19743e51f33fSPeter Zijlstra {
19753e51f33fSPeter Zijlstra }
19763e51f33fSPeter Zijlstra #else
1977c676329aSPeter Zijlstra /*
1978c676329aSPeter Zijlstra  * Architectures can set this to 1 if they have specified
1979c676329aSPeter Zijlstra  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1980c676329aSPeter Zijlstra  * but then during bootup it turns out that sched_clock()
1981c676329aSPeter Zijlstra  * is reliable after all:
1982c676329aSPeter Zijlstra  */
1983c676329aSPeter Zijlstra extern int sched_clock_stable;
1984c676329aSPeter Zijlstra 
19853e51f33fSPeter Zijlstra extern void sched_clock_tick(void);
19863e51f33fSPeter Zijlstra extern void sched_clock_idle_sleep_event(void);
19873e51f33fSPeter Zijlstra extern void sched_clock_idle_wakeup_event(u64 delta_ns);
19883e51f33fSPeter Zijlstra #endif
19893e51f33fSPeter Zijlstra 
1990b52bfee4SVenkatesh Pallipadi #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1991b52bfee4SVenkatesh Pallipadi /*
1992b52bfee4SVenkatesh Pallipadi  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
1993b52bfee4SVenkatesh Pallipadi  * The reason for this explicit opt-in is not to have perf penalty with
1994b52bfee4SVenkatesh Pallipadi  * slow sched_clocks.
1995b52bfee4SVenkatesh Pallipadi  */
1996b52bfee4SVenkatesh Pallipadi extern void enable_sched_clock_irqtime(void);
1997b52bfee4SVenkatesh Pallipadi extern void disable_sched_clock_irqtime(void);
1998b52bfee4SVenkatesh Pallipadi #else
1999b52bfee4SVenkatesh Pallipadi static inline void enable_sched_clock_irqtime(void) {}
2000b52bfee4SVenkatesh Pallipadi static inline void disable_sched_clock_irqtime(void) {}
2001b52bfee4SVenkatesh Pallipadi #endif
2002b52bfee4SVenkatesh Pallipadi 
200336c8b586SIngo Molnar extern unsigned long long
200441b86e9cSIngo Molnar task_sched_runtime(struct task_struct *task);
20051da177e4SLinus Torvalds 
20061da177e4SLinus Torvalds /* sched_exec is called by processes performing an exec */
20071da177e4SLinus Torvalds #ifdef CONFIG_SMP
20081da177e4SLinus Torvalds extern void sched_exec(void);
20091da177e4SLinus Torvalds #else
20101da177e4SLinus Torvalds #define sched_exec()   {}
20111da177e4SLinus Torvalds #endif
20121da177e4SLinus Torvalds 
20132aa44d05SIngo Molnar extern void sched_clock_idle_sleep_event(void);
20142aa44d05SIngo Molnar extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2015bb29ab26SIngo Molnar 
20161da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU
20171da177e4SLinus Torvalds extern void idle_task_exit(void);
20181da177e4SLinus Torvalds #else
20191da177e4SLinus Torvalds static inline void idle_task_exit(void) {}
20201da177e4SLinus Torvalds #endif
20211da177e4SLinus Torvalds 
202206d8308cSThomas Gleixner #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
202306d8308cSThomas Gleixner extern void wake_up_idle_cpu(int cpu);
202406d8308cSThomas Gleixner #else
202506d8308cSThomas Gleixner static inline void wake_up_idle_cpu(int cpu) { }
202606d8308cSThomas Gleixner #endif
202706d8308cSThomas Gleixner 
20285091faa4SMike Galbraith #ifdef CONFIG_SCHED_AUTOGROUP
20295091faa4SMike Galbraith extern void sched_autogroup_create_attach(struct task_struct *p);
20305091faa4SMike Galbraith extern void sched_autogroup_detach(struct task_struct *p);
20315091faa4SMike Galbraith extern void sched_autogroup_fork(struct signal_struct *sig);
20325091faa4SMike Galbraith extern void sched_autogroup_exit(struct signal_struct *sig);
20335091faa4SMike Galbraith #ifdef CONFIG_PROC_FS
20345091faa4SMike Galbraith extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
20352e5b5b3aSHiroshi Shimamoto extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
20365091faa4SMike Galbraith #endif
20375091faa4SMike Galbraith #else
20385091faa4SMike Galbraith static inline void sched_autogroup_create_attach(struct task_struct *p) { }
20395091faa4SMike Galbraith static inline void sched_autogroup_detach(struct task_struct *p) { }
20405091faa4SMike Galbraith static inline void sched_autogroup_fork(struct signal_struct *sig) { }
20415091faa4SMike Galbraith static inline void sched_autogroup_exit(struct signal_struct *sig) { }
20425091faa4SMike Galbraith #endif
20435091faa4SMike Galbraith 
2044d95f4122SMike Galbraith extern bool yield_to(struct task_struct *p, bool preempt);
204536c8b586SIngo Molnar extern void set_user_nice(struct task_struct *p, long nice);
204636c8b586SIngo Molnar extern int task_prio(const struct task_struct *p);
204736c8b586SIngo Molnar extern int task_nice(const struct task_struct *p);
204836c8b586SIngo Molnar extern int can_nice(const struct task_struct *p, const int nice);
204936c8b586SIngo Molnar extern int task_curr(const struct task_struct *p);
20501da177e4SLinus Torvalds extern int idle_cpu(int cpu);
2051fe7de49fSKOSAKI Motohiro extern int sched_setscheduler(struct task_struct *, int,
2052fe7de49fSKOSAKI Motohiro 			      const struct sched_param *);
2053961ccdddSRusty Russell extern int sched_setscheduler_nocheck(struct task_struct *, int,
2054fe7de49fSKOSAKI Motohiro 				      const struct sched_param *);
205536c8b586SIngo Molnar extern struct task_struct *idle_task(int cpu);
2056c4f30608SPaul E. McKenney /**
2057c4f30608SPaul E. McKenney  * is_idle_task - is the specified task an idle task?
2058fa757281SRandy Dunlap  * @p: the task in question.
2059c4f30608SPaul E. McKenney  */
20607061ca3bSPaul E. McKenney static inline bool is_idle_task(const struct task_struct *p)
2061c4f30608SPaul E. McKenney {
2062c4f30608SPaul E. McKenney 	return p->pid == 0;
2063c4f30608SPaul E. McKenney }
206436c8b586SIngo Molnar extern struct task_struct *curr_task(int cpu);
206536c8b586SIngo Molnar extern void set_curr_task(int cpu, struct task_struct *p);
20661da177e4SLinus Torvalds 
20671da177e4SLinus Torvalds void yield(void);
20681da177e4SLinus Torvalds 
20691da177e4SLinus Torvalds /*
20701da177e4SLinus Torvalds  * The default (Linux) execution domain.
20711da177e4SLinus Torvalds  */
20721da177e4SLinus Torvalds extern struct exec_domain	default_exec_domain;
20731da177e4SLinus Torvalds 
20741da177e4SLinus Torvalds union thread_union {
20751da177e4SLinus Torvalds 	struct thread_info thread_info;
20761da177e4SLinus Torvalds 	unsigned long stack[THREAD_SIZE/sizeof(long)];
20771da177e4SLinus Torvalds };
20781da177e4SLinus Torvalds 
20791da177e4SLinus Torvalds #ifndef __HAVE_ARCH_KSTACK_END
20801da177e4SLinus Torvalds static inline int kstack_end(void *addr)
20811da177e4SLinus Torvalds {
20821da177e4SLinus Torvalds 	/* Reliable end of stack detection:
20831da177e4SLinus Torvalds 	 * Some APM bios versions misalign the stack
20841da177e4SLinus Torvalds 	 */
20851da177e4SLinus Torvalds 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
20861da177e4SLinus Torvalds }
20871da177e4SLinus Torvalds #endif
20881da177e4SLinus Torvalds 
20891da177e4SLinus Torvalds extern union thread_union init_thread_union;
20901da177e4SLinus Torvalds extern struct task_struct init_task;
20911da177e4SLinus Torvalds 
20921da177e4SLinus Torvalds extern struct   mm_struct init_mm;
20931da177e4SLinus Torvalds 
2094198fe21bSPavel Emelyanov extern struct pid_namespace init_pid_ns;
2095198fe21bSPavel Emelyanov 
2096198fe21bSPavel Emelyanov /*
2097198fe21bSPavel Emelyanov  * find a task by one of its numerical ids
2098198fe21bSPavel Emelyanov  *
2099198fe21bSPavel Emelyanov  * find_task_by_pid_ns():
2100198fe21bSPavel Emelyanov  *      finds a task by its pid in the specified namespace
2101228ebcbeSPavel Emelyanov  * find_task_by_vpid():
2102228ebcbeSPavel Emelyanov  *      finds a task by its virtual pid
2103198fe21bSPavel Emelyanov  *
2104e49859e7SPavel Emelyanov  * see also find_vpid() etc in include/linux/pid.h
2105198fe21bSPavel Emelyanov  */
2106198fe21bSPavel Emelyanov 
2107228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_vpid(pid_t nr);
2108228ebcbeSPavel Emelyanov extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2109228ebcbeSPavel Emelyanov 		struct pid_namespace *ns);
2110198fe21bSPavel Emelyanov 
21118520d7c7SOleg Nesterov extern void __set_special_pids(struct pid *pid);
21121da177e4SLinus Torvalds 
21131da177e4SLinus Torvalds /* per-UID process charging. */
21147b44ab97SEric W. Biederman extern struct user_struct * alloc_uid(kuid_t);
21151da177e4SLinus Torvalds static inline struct user_struct *get_uid(struct user_struct *u)
21161da177e4SLinus Torvalds {
21171da177e4SLinus Torvalds 	atomic_inc(&u->__count);
21181da177e4SLinus Torvalds 	return u;
21191da177e4SLinus Torvalds }
21201da177e4SLinus Torvalds extern void free_uid(struct user_struct *);
21211da177e4SLinus Torvalds 
21221da177e4SLinus Torvalds #include <asm/current.h>
21231da177e4SLinus Torvalds 
2124f0af911aSTorben Hohn extern void xtime_update(unsigned long ticks);
21251da177e4SLinus Torvalds 
2126b3c97528SHarvey Harrison extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2127b3c97528SHarvey Harrison extern int wake_up_process(struct task_struct *tsk);
21283e51e3edSSamir Bellabes extern void wake_up_new_task(struct task_struct *tsk);
21291da177e4SLinus Torvalds #ifdef CONFIG_SMP
21301da177e4SLinus Torvalds  extern void kick_process(struct task_struct *tsk);
21311da177e4SLinus Torvalds #else
21321da177e4SLinus Torvalds  static inline void kick_process(struct task_struct *tsk) { }
21331da177e4SLinus Torvalds #endif
21343e51e3edSSamir Bellabes extern void sched_fork(struct task_struct *p);
2135ad46c2c4SIngo Molnar extern void sched_dead(struct task_struct *p);
21361da177e4SLinus Torvalds 
21371da177e4SLinus Torvalds extern void proc_caches_init(void);
21381da177e4SLinus Torvalds extern void flush_signals(struct task_struct *);
21393bcac026SDavid Howells extern void __flush_signals(struct task_struct *);
214010ab825bSOleg Nesterov extern void ignore_signals(struct task_struct *);
21411da177e4SLinus Torvalds extern void flush_signal_handlers(struct task_struct *, int force_default);
21421da177e4SLinus Torvalds extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
21431da177e4SLinus Torvalds 
21441da177e4SLinus Torvalds static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
21451da177e4SLinus Torvalds {
21461da177e4SLinus Torvalds 	unsigned long flags;
21471da177e4SLinus Torvalds 	int ret;
21481da177e4SLinus Torvalds 
21491da177e4SLinus Torvalds 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
21501da177e4SLinus Torvalds 	ret = dequeue_signal(tsk, mask, info);
21511da177e4SLinus Torvalds 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
21521da177e4SLinus Torvalds 
21531da177e4SLinus Torvalds 	return ret;
21541da177e4SLinus Torvalds }
21551da177e4SLinus Torvalds 
21561da177e4SLinus Torvalds extern void block_all_signals(int (*notifier)(void *priv), void *priv,
21571da177e4SLinus Torvalds 			      sigset_t *mask);
21581da177e4SLinus Torvalds extern void unblock_all_signals(void);
21591da177e4SLinus Torvalds extern void release_task(struct task_struct * p);
21601da177e4SLinus Torvalds extern int send_sig_info(int, struct siginfo *, struct task_struct *);
21611da177e4SLinus Torvalds extern int force_sigsegv(int, struct task_struct *);
21621da177e4SLinus Torvalds extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2163c4b92fc1SEric W. Biederman extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2164c4b92fc1SEric W. Biederman extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2165d178bc3aSSerge Hallyn extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2166d178bc3aSSerge Hallyn 				const struct cred *, u32);
2167c4b92fc1SEric W. Biederman extern int kill_pgrp(struct pid *pid, int sig, int priv);
2168c4b92fc1SEric W. Biederman extern int kill_pid(struct pid *pid, int sig, int priv);
2169c3de4b38SMatthew Wilcox extern int kill_proc_info(int, struct siginfo *, pid_t);
217086773473SOleg Nesterov extern __must_check bool do_notify_parent(struct task_struct *, int);
2171a7f0765eSOleg Nesterov extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
21721da177e4SLinus Torvalds extern void force_sig(int, struct task_struct *);
21731da177e4SLinus Torvalds extern int send_sig(int, struct task_struct *, int);
217409faef11SOleg Nesterov extern int zap_other_threads(struct task_struct *p);
21751da177e4SLinus Torvalds extern struct sigqueue *sigqueue_alloc(void);
21761da177e4SLinus Torvalds extern void sigqueue_free(struct sigqueue *);
2177ac5c2153SOleg Nesterov extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
21789ac95f2fSOleg Nesterov extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
21791da177e4SLinus Torvalds 
218051a7b448SAl Viro static inline void restore_saved_sigmask(void)
218151a7b448SAl Viro {
218251a7b448SAl Viro 	if (test_and_clear_restore_sigmask())
218377097ae5SAl Viro 		__set_current_blocked(&current->saved_sigmask);
218451a7b448SAl Viro }
218551a7b448SAl Viro 
2186b7f9a11aSAl Viro static inline sigset_t *sigmask_to_save(void)
2187b7f9a11aSAl Viro {
2188b7f9a11aSAl Viro 	sigset_t *res = &current->blocked;
2189b7f9a11aSAl Viro 	if (unlikely(test_restore_sigmask()))
2190b7f9a11aSAl Viro 		res = &current->saved_sigmask;
2191b7f9a11aSAl Viro 	return res;
2192b7f9a11aSAl Viro }
2193b7f9a11aSAl Viro 
21949ec52099SCedric Le Goater static inline int kill_cad_pid(int sig, int priv)
21959ec52099SCedric Le Goater {
21969ec52099SCedric Le Goater 	return kill_pid(cad_pid, sig, priv);
21979ec52099SCedric Le Goater }
21989ec52099SCedric Le Goater 
21991da177e4SLinus Torvalds /* These can be the second arg to send_sig_info/send_group_sig_info.  */
22001da177e4SLinus Torvalds #define SEND_SIG_NOINFO ((struct siginfo *) 0)
22011da177e4SLinus Torvalds #define SEND_SIG_PRIV	((struct siginfo *) 1)
22021da177e4SLinus Torvalds #define SEND_SIG_FORCED	((struct siginfo *) 2)
22031da177e4SLinus Torvalds 
22042a855dd0SSebastian Andrzej Siewior /*
22052a855dd0SSebastian Andrzej Siewior  * True if we are on the alternate signal stack.
22062a855dd0SSebastian Andrzej Siewior  */
22071da177e4SLinus Torvalds static inline int on_sig_stack(unsigned long sp)
22081da177e4SLinus Torvalds {
22092a855dd0SSebastian Andrzej Siewior #ifdef CONFIG_STACK_GROWSUP
22102a855dd0SSebastian Andrzej Siewior 	return sp >= current->sas_ss_sp &&
22112a855dd0SSebastian Andrzej Siewior 		sp - current->sas_ss_sp < current->sas_ss_size;
22122a855dd0SSebastian Andrzej Siewior #else
22132a855dd0SSebastian Andrzej Siewior 	return sp > current->sas_ss_sp &&
22142a855dd0SSebastian Andrzej Siewior 		sp - current->sas_ss_sp <= current->sas_ss_size;
22152a855dd0SSebastian Andrzej Siewior #endif
22161da177e4SLinus Torvalds }
22171da177e4SLinus Torvalds 
22181da177e4SLinus Torvalds static inline int sas_ss_flags(unsigned long sp)
22191da177e4SLinus Torvalds {
22201da177e4SLinus Torvalds 	return (current->sas_ss_size == 0 ? SS_DISABLE
22211da177e4SLinus Torvalds 		: on_sig_stack(sp) ? SS_ONSTACK : 0);
22221da177e4SLinus Torvalds }
22231da177e4SLinus Torvalds 
22245a1b98d3SAl Viro static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
22255a1b98d3SAl Viro {
22265a1b98d3SAl Viro 	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
22275a1b98d3SAl Viro #ifdef CONFIG_STACK_GROWSUP
22285a1b98d3SAl Viro 		return current->sas_ss_sp;
22295a1b98d3SAl Viro #else
22305a1b98d3SAl Viro 		return current->sas_ss_sp + current->sas_ss_size;
22315a1b98d3SAl Viro #endif
22325a1b98d3SAl Viro 	return sp;
22335a1b98d3SAl Viro }
22345a1b98d3SAl Viro 
22351da177e4SLinus Torvalds /*
22361da177e4SLinus Torvalds  * Routines for handling mm_structs
22371da177e4SLinus Torvalds  */
22381da177e4SLinus Torvalds extern struct mm_struct * mm_alloc(void);
22391da177e4SLinus Torvalds 
22401da177e4SLinus Torvalds /* mmdrop drops the mm and the page tables */
2241b3c97528SHarvey Harrison extern void __mmdrop(struct mm_struct *);
22421da177e4SLinus Torvalds static inline void mmdrop(struct mm_struct * mm)
22431da177e4SLinus Torvalds {
22446fb43d7bSIngo Molnar 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
22451da177e4SLinus Torvalds 		__mmdrop(mm);
22461da177e4SLinus Torvalds }
22471da177e4SLinus Torvalds 
22481da177e4SLinus Torvalds /* mmput gets rid of the mappings and all user-space */
22491da177e4SLinus Torvalds extern void mmput(struct mm_struct *);
22501da177e4SLinus Torvalds /* Grab a reference to a task's mm, if it is not already going away */
22511da177e4SLinus Torvalds extern struct mm_struct *get_task_mm(struct task_struct *task);
22528cdb878dSChristopher Yeoh /*
22538cdb878dSChristopher Yeoh  * Grab a reference to a task's mm, if it is not already going away
22548cdb878dSChristopher Yeoh  * and ptrace_may_access with the mode parameter passed to it
22558cdb878dSChristopher Yeoh  * succeeds.
22568cdb878dSChristopher Yeoh  */
22578cdb878dSChristopher Yeoh extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
22581da177e4SLinus Torvalds /* Remove the current tasks stale references to the old mm_struct */
22591da177e4SLinus Torvalds extern void mm_release(struct task_struct *, struct mm_struct *);
2260402b0862SCarsten Otte /* Allocate a new mm structure and copy contents from tsk->mm */
2261402b0862SCarsten Otte extern struct mm_struct *dup_mm(struct task_struct *tsk);
22621da177e4SLinus Torvalds 
22636f2c55b8SAlexey Dobriyan extern int copy_thread(unsigned long, unsigned long, unsigned long,
2264afa86fc4SAl Viro 			struct task_struct *);
22651da177e4SLinus Torvalds extern void flush_thread(void);
22661da177e4SLinus Torvalds extern void exit_thread(void);
22671da177e4SLinus Torvalds 
22681da177e4SLinus Torvalds extern void exit_files(struct task_struct *);
2269a7e5328aSOleg Nesterov extern void __cleanup_sighand(struct sighand_struct *);
2270cbaffba1SOleg Nesterov 
22711da177e4SLinus Torvalds extern void exit_itimers(struct signal_struct *);
2272cbaffba1SOleg Nesterov extern void flush_itimer_signals(void);
22731da177e4SLinus Torvalds 
22749402c95fSJoe Perches extern void do_group_exit(int);
22751da177e4SLinus Torvalds 
22761da177e4SLinus Torvalds extern int allow_signal(int);
22771da177e4SLinus Torvalds extern int disallow_signal(int);
22781da177e4SLinus Torvalds 
2279d7627467SDavid Howells extern int do_execve(const char *,
2280d7627467SDavid Howells 		     const char __user * const __user *,
2281da3d4c5fSAl Viro 		     const char __user * const __user *);
2282e80d6661SAl Viro extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
228336c8b586SIngo Molnar struct task_struct *fork_idle(int);
22842aa3a7f8SAl Viro extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
22851da177e4SLinus Torvalds 
22861da177e4SLinus Torvalds extern void set_task_comm(struct task_struct *tsk, char *from);
228759714d65SAndrew Morton extern char *get_task_comm(char *to, struct task_struct *tsk);
22881da177e4SLinus Torvalds 
22891da177e4SLinus Torvalds #ifdef CONFIG_SMP
2290317f3941SPeter Zijlstra void scheduler_ipi(void);
229185ba2d86SRoland McGrath extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
22921da177e4SLinus Torvalds #else
2293184748ccSPeter Zijlstra static inline void scheduler_ipi(void) { }
229485ba2d86SRoland McGrath static inline unsigned long wait_task_inactive(struct task_struct *p,
229585ba2d86SRoland McGrath 					       long match_state)
229685ba2d86SRoland McGrath {
229785ba2d86SRoland McGrath 	return 1;
229885ba2d86SRoland McGrath }
22991da177e4SLinus Torvalds #endif
23001da177e4SLinus Torvalds 
230105725f7eSJiri Pirko #define next_task(p) \
230205725f7eSJiri Pirko 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
23031da177e4SLinus Torvalds 
23041da177e4SLinus Torvalds #define for_each_process(p) \
23051da177e4SLinus Torvalds 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
23061da177e4SLinus Torvalds 
23075bb459bbSOleg Nesterov extern bool current_is_single_threaded(void);
2308d84f4f99SDavid Howells 
23091da177e4SLinus Torvalds /*
23101da177e4SLinus Torvalds  * Careful: do_each_thread/while_each_thread is a double loop so
23111da177e4SLinus Torvalds  *          'break' will not work as expected - use goto instead.
23121da177e4SLinus Torvalds  */
23131da177e4SLinus Torvalds #define do_each_thread(g, t) \
23141da177e4SLinus Torvalds 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
23151da177e4SLinus Torvalds 
23161da177e4SLinus Torvalds #define while_each_thread(g, t) \
23171da177e4SLinus Torvalds 	while ((t = next_thread(t)) != g)
23181da177e4SLinus Torvalds 
23197e49827cSOleg Nesterov static inline int get_nr_threads(struct task_struct *tsk)
23207e49827cSOleg Nesterov {
2321b3ac022cSOleg Nesterov 	return tsk->signal->nr_threads;
23227e49827cSOleg Nesterov }
23237e49827cSOleg Nesterov 
2324087806b1SOleg Nesterov static inline bool thread_group_leader(struct task_struct *p)
2325087806b1SOleg Nesterov {
2326087806b1SOleg Nesterov 	return p->exit_signal >= 0;
2327087806b1SOleg Nesterov }
23281da177e4SLinus Torvalds 
23290804ef4bSEric W. Biederman /* Do to the insanities of de_thread it is possible for a process
23300804ef4bSEric W. Biederman  * to have the pid of the thread group leader without actually being
23310804ef4bSEric W. Biederman  * the thread group leader.  For iteration through the pids in proc
23320804ef4bSEric W. Biederman  * all we care about is that we have a task with the appropriate
23330804ef4bSEric W. Biederman  * pid, we don't actually care if we have the right task.
23340804ef4bSEric W. Biederman  */
2335e868171aSAlexey Dobriyan static inline int has_group_leader_pid(struct task_struct *p)
23360804ef4bSEric W. Biederman {
23370804ef4bSEric W. Biederman 	return p->pid == p->tgid;
23380804ef4bSEric W. Biederman }
23390804ef4bSEric W. Biederman 
2340bac0abd6SPavel Emelyanov static inline
2341bac0abd6SPavel Emelyanov int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2342bac0abd6SPavel Emelyanov {
2343bac0abd6SPavel Emelyanov 	return p1->tgid == p2->tgid;
2344bac0abd6SPavel Emelyanov }
2345bac0abd6SPavel Emelyanov 
234636c8b586SIngo Molnar static inline struct task_struct *next_thread(const struct task_struct *p)
234747e65328SOleg Nesterov {
234805725f7eSJiri Pirko 	return list_entry_rcu(p->thread_group.next,
234936c8b586SIngo Molnar 			      struct task_struct, thread_group);
235047e65328SOleg Nesterov }
235147e65328SOleg Nesterov 
2352e868171aSAlexey Dobriyan static inline int thread_group_empty(struct task_struct *p)
23531da177e4SLinus Torvalds {
235447e65328SOleg Nesterov 	return list_empty(&p->thread_group);
23551da177e4SLinus Torvalds }
23561da177e4SLinus Torvalds 
23571da177e4SLinus Torvalds #define delay_group_leader(p) \
23581da177e4SLinus Torvalds 		(thread_group_leader(p) && !thread_group_empty(p))
23591da177e4SLinus Torvalds 
23601da177e4SLinus Torvalds /*
2361260ea101SEric W. Biederman  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
236222e2c507SJens Axboe  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2363ddbcc7e8SPaul Menage  * pins the final release of task.io_context.  Also protects ->cpuset and
2364d68b46feSOleg Nesterov  * ->cgroup.subsys[]. And ->vfork_done.
23651da177e4SLinus Torvalds  *
23661da177e4SLinus Torvalds  * Nests both inside and outside of read_lock(&tasklist_lock).
23671da177e4SLinus Torvalds  * It must not be nested with write_lock_irq(&tasklist_lock),
23681da177e4SLinus Torvalds  * neither inside nor outside.
23691da177e4SLinus Torvalds  */
23701da177e4SLinus Torvalds static inline void task_lock(struct task_struct *p)
23711da177e4SLinus Torvalds {
23721da177e4SLinus Torvalds 	spin_lock(&p->alloc_lock);
23731da177e4SLinus Torvalds }
23741da177e4SLinus Torvalds 
23751da177e4SLinus Torvalds static inline void task_unlock(struct task_struct *p)
23761da177e4SLinus Torvalds {
23771da177e4SLinus Torvalds 	spin_unlock(&p->alloc_lock);
23781da177e4SLinus Torvalds }
23791da177e4SLinus Torvalds 
2380b8ed374eSNamhyung Kim extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2381f63ee72eSOleg Nesterov 							unsigned long *flags);
2382f63ee72eSOleg Nesterov 
23839388dc30SAnton Vorontsov static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
23849388dc30SAnton Vorontsov 						       unsigned long *flags)
23859388dc30SAnton Vorontsov {
23869388dc30SAnton Vorontsov 	struct sighand_struct *ret;
23879388dc30SAnton Vorontsov 
23889388dc30SAnton Vorontsov 	ret = __lock_task_sighand(tsk, flags);
23899388dc30SAnton Vorontsov 	(void)__cond_lock(&tsk->sighand->siglock, ret);
23909388dc30SAnton Vorontsov 	return ret;
23919388dc30SAnton Vorontsov }
2392b8ed374eSNamhyung Kim 
2393f63ee72eSOleg Nesterov static inline void unlock_task_sighand(struct task_struct *tsk,
2394f63ee72eSOleg Nesterov 						unsigned long *flags)
2395f63ee72eSOleg Nesterov {
2396f63ee72eSOleg Nesterov 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2397f63ee72eSOleg Nesterov }
2398f63ee72eSOleg Nesterov 
23994714d1d3SBen Blum #ifdef CONFIG_CGROUPS
2400257058aeSTejun Heo static inline void threadgroup_change_begin(struct task_struct *tsk)
24014714d1d3SBen Blum {
2402257058aeSTejun Heo 	down_read(&tsk->signal->group_rwsem);
24034714d1d3SBen Blum }
2404257058aeSTejun Heo static inline void threadgroup_change_end(struct task_struct *tsk)
24054714d1d3SBen Blum {
2406257058aeSTejun Heo 	up_read(&tsk->signal->group_rwsem);
24074714d1d3SBen Blum }
240877e4ef99STejun Heo 
240977e4ef99STejun Heo /**
241077e4ef99STejun Heo  * threadgroup_lock - lock threadgroup
241177e4ef99STejun Heo  * @tsk: member task of the threadgroup to lock
241277e4ef99STejun Heo  *
241377e4ef99STejun Heo  * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
241477e4ef99STejun Heo  * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
241577e4ef99STejun Heo  * perform exec.  This is useful for cases where the threadgroup needs to
241677e4ef99STejun Heo  * stay stable across blockable operations.
241777e4ef99STejun Heo  *
241877e4ef99STejun Heo  * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
241977e4ef99STejun Heo  * synchronization.  While held, no new task will be added to threadgroup
242077e4ef99STejun Heo  * and no existing live task will have its PF_EXITING set.
242177e4ef99STejun Heo  *
242277e4ef99STejun Heo  * During exec, a task goes and puts its thread group through unusual
242377e4ef99STejun Heo  * changes.  After de-threading, exclusive access is assumed to resources
242477e4ef99STejun Heo  * which are usually shared by tasks in the same group - e.g. sighand may
242577e4ef99STejun Heo  * be replaced with a new one.  Also, the exec'ing task takes over group
242677e4ef99STejun Heo  * leader role including its pid.  Exclude these changes while locked by
242777e4ef99STejun Heo  * grabbing cred_guard_mutex which is used to synchronize exec path.
242877e4ef99STejun Heo  */
2429257058aeSTejun Heo static inline void threadgroup_lock(struct task_struct *tsk)
24304714d1d3SBen Blum {
243177e4ef99STejun Heo 	/*
243277e4ef99STejun Heo 	 * exec uses exit for de-threading nesting group_rwsem inside
243377e4ef99STejun Heo 	 * cred_guard_mutex. Grab cred_guard_mutex first.
243477e4ef99STejun Heo 	 */
243577e4ef99STejun Heo 	mutex_lock(&tsk->signal->cred_guard_mutex);
2436257058aeSTejun Heo 	down_write(&tsk->signal->group_rwsem);
24374714d1d3SBen Blum }
243877e4ef99STejun Heo 
243977e4ef99STejun Heo /**
244077e4ef99STejun Heo  * threadgroup_unlock - unlock threadgroup
244177e4ef99STejun Heo  * @tsk: member task of the threadgroup to unlock
244277e4ef99STejun Heo  *
244377e4ef99STejun Heo  * Reverse threadgroup_lock().
244477e4ef99STejun Heo  */
2445257058aeSTejun Heo static inline void threadgroup_unlock(struct task_struct *tsk)
24464714d1d3SBen Blum {
2447257058aeSTejun Heo 	up_write(&tsk->signal->group_rwsem);
244877e4ef99STejun Heo 	mutex_unlock(&tsk->signal->cred_guard_mutex);
24494714d1d3SBen Blum }
24504714d1d3SBen Blum #else
2451257058aeSTejun Heo static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2452257058aeSTejun Heo static inline void threadgroup_change_end(struct task_struct *tsk) {}
2453257058aeSTejun Heo static inline void threadgroup_lock(struct task_struct *tsk) {}
2454257058aeSTejun Heo static inline void threadgroup_unlock(struct task_struct *tsk) {}
24554714d1d3SBen Blum #endif
24564714d1d3SBen Blum 
2457f037360fSAl Viro #ifndef __HAVE_THREAD_FUNCTIONS
2458f037360fSAl Viro 
2459f7e4217bSRoman Zippel #define task_thread_info(task)	((struct thread_info *)(task)->stack)
2460f7e4217bSRoman Zippel #define task_stack_page(task)	((task)->stack)
2461a1261f54SAl Viro 
246210ebffdeSAl Viro static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
246310ebffdeSAl Viro {
246410ebffdeSAl Viro 	*task_thread_info(p) = *task_thread_info(org);
246510ebffdeSAl Viro 	task_thread_info(p)->task = p;
246610ebffdeSAl Viro }
246710ebffdeSAl Viro 
246810ebffdeSAl Viro static inline unsigned long *end_of_stack(struct task_struct *p)
246910ebffdeSAl Viro {
2470f7e4217bSRoman Zippel 	return (unsigned long *)(task_thread_info(p) + 1);
247110ebffdeSAl Viro }
247210ebffdeSAl Viro 
2473f037360fSAl Viro #endif
2474f037360fSAl Viro 
24758b05c7e6SFUJITA Tomonori static inline int object_is_on_stack(void *obj)
24768b05c7e6SFUJITA Tomonori {
24778b05c7e6SFUJITA Tomonori 	void *stack = task_stack_page(current);
24788b05c7e6SFUJITA Tomonori 
24798b05c7e6SFUJITA Tomonori 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
24808b05c7e6SFUJITA Tomonori }
24818b05c7e6SFUJITA Tomonori 
24828c9843e5SBenjamin Herrenschmidt extern void thread_info_cache_init(void);
24838c9843e5SBenjamin Herrenschmidt 
24847c9f8861SEric Sandeen #ifdef CONFIG_DEBUG_STACK_USAGE
24857c9f8861SEric Sandeen static inline unsigned long stack_not_used(struct task_struct *p)
24867c9f8861SEric Sandeen {
24877c9f8861SEric Sandeen 	unsigned long *n = end_of_stack(p);
24887c9f8861SEric Sandeen 
24897c9f8861SEric Sandeen 	do { 	/* Skip over canary */
24907c9f8861SEric Sandeen 		n++;
24917c9f8861SEric Sandeen 	} while (!*n);
24927c9f8861SEric Sandeen 
24937c9f8861SEric Sandeen 	return (unsigned long)n - (unsigned long)end_of_stack(p);
24947c9f8861SEric Sandeen }
24957c9f8861SEric Sandeen #endif
24967c9f8861SEric Sandeen 
24971da177e4SLinus Torvalds /* set thread flags in other task's structures
24981da177e4SLinus Torvalds  * - see asm/thread_info.h for TIF_xxxx flags available
24991da177e4SLinus Torvalds  */
25001da177e4SLinus Torvalds static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
25011da177e4SLinus Torvalds {
2502a1261f54SAl Viro 	set_ti_thread_flag(task_thread_info(tsk), flag);
25031da177e4SLinus Torvalds }
25041da177e4SLinus Torvalds 
25051da177e4SLinus Torvalds static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
25061da177e4SLinus Torvalds {
2507a1261f54SAl Viro 	clear_ti_thread_flag(task_thread_info(tsk), flag);
25081da177e4SLinus Torvalds }
25091da177e4SLinus Torvalds 
25101da177e4SLinus Torvalds static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
25111da177e4SLinus Torvalds {
2512a1261f54SAl Viro 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
25131da177e4SLinus Torvalds }
25141da177e4SLinus Torvalds 
25151da177e4SLinus Torvalds static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
25161da177e4SLinus Torvalds {
2517a1261f54SAl Viro 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
25181da177e4SLinus Torvalds }
25191da177e4SLinus Torvalds 
25201da177e4SLinus Torvalds static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
25211da177e4SLinus Torvalds {
2522a1261f54SAl Viro 	return test_ti_thread_flag(task_thread_info(tsk), flag);
25231da177e4SLinus Torvalds }
25241da177e4SLinus Torvalds 
25251da177e4SLinus Torvalds static inline void set_tsk_need_resched(struct task_struct *tsk)
25261da177e4SLinus Torvalds {
25271da177e4SLinus Torvalds 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
25281da177e4SLinus Torvalds }
25291da177e4SLinus Torvalds 
25301da177e4SLinus Torvalds static inline void clear_tsk_need_resched(struct task_struct *tsk)
25311da177e4SLinus Torvalds {
25321da177e4SLinus Torvalds 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
25331da177e4SLinus Torvalds }
25341da177e4SLinus Torvalds 
25358ae121acSGregory Haskins static inline int test_tsk_need_resched(struct task_struct *tsk)
25368ae121acSGregory Haskins {
25378ae121acSGregory Haskins 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
25388ae121acSGregory Haskins }
25398ae121acSGregory Haskins 
2540690cc3ffSEric W. Biederman static inline int restart_syscall(void)
2541690cc3ffSEric W. Biederman {
2542690cc3ffSEric W. Biederman 	set_tsk_thread_flag(current, TIF_SIGPENDING);
2543690cc3ffSEric W. Biederman 	return -ERESTARTNOINTR;
2544690cc3ffSEric W. Biederman }
2545690cc3ffSEric W. Biederman 
25461da177e4SLinus Torvalds static inline int signal_pending(struct task_struct *p)
25471da177e4SLinus Torvalds {
25481da177e4SLinus Torvalds 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
25491da177e4SLinus Torvalds }
25501da177e4SLinus Torvalds 
2551d9588725SRoland McGrath static inline int __fatal_signal_pending(struct task_struct *p)
2552d9588725SRoland McGrath {
2553d9588725SRoland McGrath 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2554d9588725SRoland McGrath }
2555f776d12dSMatthew Wilcox 
2556f776d12dSMatthew Wilcox static inline int fatal_signal_pending(struct task_struct *p)
2557f776d12dSMatthew Wilcox {
2558f776d12dSMatthew Wilcox 	return signal_pending(p) && __fatal_signal_pending(p);
2559f776d12dSMatthew Wilcox }
2560f776d12dSMatthew Wilcox 
256116882c1eSOleg Nesterov static inline int signal_pending_state(long state, struct task_struct *p)
256216882c1eSOleg Nesterov {
256316882c1eSOleg Nesterov 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
256416882c1eSOleg Nesterov 		return 0;
256516882c1eSOleg Nesterov 	if (!signal_pending(p))
256616882c1eSOleg Nesterov 		return 0;
256716882c1eSOleg Nesterov 
256816882c1eSOleg Nesterov 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
256916882c1eSOleg Nesterov }
257016882c1eSOleg Nesterov 
25711da177e4SLinus Torvalds static inline int need_resched(void)
25721da177e4SLinus Torvalds {
25739404ef02SLinus Torvalds 	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
25741da177e4SLinus Torvalds }
25751da177e4SLinus Torvalds 
25761da177e4SLinus Torvalds /*
25771da177e4SLinus Torvalds  * cond_resched() and cond_resched_lock(): latency reduction via
25781da177e4SLinus Torvalds  * explicit rescheduling in places that are safe. The return
25791da177e4SLinus Torvalds  * value indicates whether a reschedule was done in fact.
25801da177e4SLinus Torvalds  * cond_resched_lock() will drop the spinlock before scheduling,
25811da177e4SLinus Torvalds  * cond_resched_softirq() will enable bhs before scheduling.
25821da177e4SLinus Torvalds  */
2583c3921ab7SLinus Torvalds extern int _cond_resched(void);
25846f80bd98SFrederic Weisbecker 
2585613afbf8SFrederic Weisbecker #define cond_resched() ({			\
2586613afbf8SFrederic Weisbecker 	__might_sleep(__FILE__, __LINE__, 0);	\
2587613afbf8SFrederic Weisbecker 	_cond_resched();			\
2588613afbf8SFrederic Weisbecker })
25896f80bd98SFrederic Weisbecker 
2590613afbf8SFrederic Weisbecker extern int __cond_resched_lock(spinlock_t *lock);
2591613afbf8SFrederic Weisbecker 
2592bdd4e85dSFrederic Weisbecker #ifdef CONFIG_PREEMPT_COUNT
2593716a4234SFrederic Weisbecker #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
259402b67cc3SHerbert Xu #else
2595716a4234SFrederic Weisbecker #define PREEMPT_LOCK_OFFSET	0
259602b67cc3SHerbert Xu #endif
2597716a4234SFrederic Weisbecker 
2598613afbf8SFrederic Weisbecker #define cond_resched_lock(lock) ({				\
2599716a4234SFrederic Weisbecker 	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2600613afbf8SFrederic Weisbecker 	__cond_resched_lock(lock);				\
2601613afbf8SFrederic Weisbecker })
2602613afbf8SFrederic Weisbecker 
2603613afbf8SFrederic Weisbecker extern int __cond_resched_softirq(void);
2604613afbf8SFrederic Weisbecker 
2605613afbf8SFrederic Weisbecker #define cond_resched_softirq() ({					\
260675e1056fSVenkatesh Pallipadi 	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2607613afbf8SFrederic Weisbecker 	__cond_resched_softirq();					\
2608613afbf8SFrederic Weisbecker })
26091da177e4SLinus Torvalds 
26101da177e4SLinus Torvalds /*
26111da177e4SLinus Torvalds  * Does a critical section need to be broken due to another
261295c354feSNick Piggin  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
261395c354feSNick Piggin  * but a general need for low latency)
26141da177e4SLinus Torvalds  */
261595c354feSNick Piggin static inline int spin_needbreak(spinlock_t *lock)
26161da177e4SLinus Torvalds {
261795c354feSNick Piggin #ifdef CONFIG_PREEMPT
261895c354feSNick Piggin 	return spin_is_contended(lock);
261995c354feSNick Piggin #else
26201da177e4SLinus Torvalds 	return 0;
262195c354feSNick Piggin #endif
26221da177e4SLinus Torvalds }
26231da177e4SLinus Torvalds 
26247bb44adeSRoland McGrath /*
2625*ee761f62SThomas Gleixner  * Idle thread specific functions to determine the need_resched
2626*ee761f62SThomas Gleixner  * polling state. We have two versions, one based on TS_POLLING in
2627*ee761f62SThomas Gleixner  * thread_info.status and one based on TIF_POLLING_NRFLAG in
2628*ee761f62SThomas Gleixner  * thread_info.flags
2629*ee761f62SThomas Gleixner  */
2630*ee761f62SThomas Gleixner #ifdef TS_POLLING
2631*ee761f62SThomas Gleixner static inline int tsk_is_polling(struct task_struct *p)
2632*ee761f62SThomas Gleixner {
2633*ee761f62SThomas Gleixner 	return task_thread_info(p)->status & TS_POLLING;
2634*ee761f62SThomas Gleixner }
2635*ee761f62SThomas Gleixner #elif defined(TIF_POLLING_NRFLAG)
2636*ee761f62SThomas Gleixner static inline int tsk_is_polling(struct task_struct *p)
2637*ee761f62SThomas Gleixner {
2638*ee761f62SThomas Gleixner 	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2639*ee761f62SThomas Gleixner }
2640*ee761f62SThomas Gleixner #else
2641*ee761f62SThomas Gleixner static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2642*ee761f62SThomas Gleixner #endif
2643*ee761f62SThomas Gleixner 
2644*ee761f62SThomas Gleixner /*
2645f06febc9SFrank Mayhar  * Thread group CPU time accounting.
2646f06febc9SFrank Mayhar  */
26474cd4c1b4SPeter Zijlstra void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
26484da94d49SPeter Zijlstra void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2649f06febc9SFrank Mayhar 
2650f06febc9SFrank Mayhar static inline void thread_group_cputime_init(struct signal_struct *sig)
2651f06febc9SFrank Mayhar {
2652ee30a7b2SThomas Gleixner 	raw_spin_lock_init(&sig->cputimer.lock);
2653f06febc9SFrank Mayhar }
2654f06febc9SFrank Mayhar 
2655f06febc9SFrank Mayhar /*
26567bb44adeSRoland McGrath  * Reevaluate whether the task has signals pending delivery.
26577bb44adeSRoland McGrath  * Wake the task if so.
26587bb44adeSRoland McGrath  * This is required every time the blocked sigset_t changes.
26597bb44adeSRoland McGrath  * callers must hold sighand->siglock.
26607bb44adeSRoland McGrath  */
26617bb44adeSRoland McGrath extern void recalc_sigpending_and_wake(struct task_struct *t);
26621da177e4SLinus Torvalds extern void recalc_sigpending(void);
26631da177e4SLinus Torvalds 
2664910ffdb1SOleg Nesterov extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2665910ffdb1SOleg Nesterov 
2666910ffdb1SOleg Nesterov static inline void signal_wake_up(struct task_struct *t, bool resume)
2667910ffdb1SOleg Nesterov {
2668910ffdb1SOleg Nesterov 	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2669910ffdb1SOleg Nesterov }
2670910ffdb1SOleg Nesterov static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2671910ffdb1SOleg Nesterov {
2672910ffdb1SOleg Nesterov 	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2673910ffdb1SOleg Nesterov }
26741da177e4SLinus Torvalds 
26751da177e4SLinus Torvalds /*
26761da177e4SLinus Torvalds  * Wrappers for p->thread_info->cpu access. No-op on UP.
26771da177e4SLinus Torvalds  */
26781da177e4SLinus Torvalds #ifdef CONFIG_SMP
26791da177e4SLinus Torvalds 
26801da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
26811da177e4SLinus Torvalds {
2682a1261f54SAl Viro 	return task_thread_info(p)->cpu;
26831da177e4SLinus Torvalds }
26841da177e4SLinus Torvalds 
2685c65cc870SIngo Molnar extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
26861da177e4SLinus Torvalds 
26871da177e4SLinus Torvalds #else
26881da177e4SLinus Torvalds 
26891da177e4SLinus Torvalds static inline unsigned int task_cpu(const struct task_struct *p)
26901da177e4SLinus Torvalds {
26911da177e4SLinus Torvalds 	return 0;
26921da177e4SLinus Torvalds }
26931da177e4SLinus Torvalds 
26941da177e4SLinus Torvalds static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
26951da177e4SLinus Torvalds {
26961da177e4SLinus Torvalds }
26971da177e4SLinus Torvalds 
26981da177e4SLinus Torvalds #endif /* CONFIG_SMP */
26991da177e4SLinus Torvalds 
270096f874e2SRusty Russell extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
270196f874e2SRusty Russell extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
27025c45bf27SSiddha, Suresh B 
27037c941438SDhaval Giani #ifdef CONFIG_CGROUP_SCHED
27049b5b7751SSrivatsa Vaddagiri 
270507e06b01SYong Zhang extern struct task_group root_task_group;
27069b5b7751SSrivatsa Vaddagiri 
2707ec7dc8acSDhaval Giani extern struct task_group *sched_create_group(struct task_group *parent);
2708ace783b9SLi Zefan extern void sched_online_group(struct task_group *tg,
2709ace783b9SLi Zefan 			       struct task_group *parent);
27104cf86d77SIngo Molnar extern void sched_destroy_group(struct task_group *tg);
2711ace783b9SLi Zefan extern void sched_offline_group(struct task_group *tg);
27129b5b7751SSrivatsa Vaddagiri extern void sched_move_task(struct task_struct *tsk);
2713052f1dc7SPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
27144cf86d77SIngo Molnar extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
27155cb350baSDhaval Giani extern unsigned long sched_group_shares(struct task_group *tg);
2716052f1dc7SPeter Zijlstra #endif
2717052f1dc7SPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
27189f0c1e56SPeter Zijlstra extern int sched_group_set_rt_runtime(struct task_group *tg,
27199f0c1e56SPeter Zijlstra 				      long rt_runtime_us);
27209f0c1e56SPeter Zijlstra extern long sched_group_rt_runtime(struct task_group *tg);
2721d0b27fa7SPeter Zijlstra extern int sched_group_set_rt_period(struct task_group *tg,
2722d0b27fa7SPeter Zijlstra 				      long rt_period_us);
2723d0b27fa7SPeter Zijlstra extern long sched_group_rt_period(struct task_group *tg);
272454e99124SDhaval Giani extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2725052f1dc7SPeter Zijlstra #endif
27268323f26cSPeter Zijlstra #endif /* CONFIG_CGROUP_SCHED */
27279b5b7751SSrivatsa Vaddagiri 
272854e99124SDhaval Giani extern int task_can_switch_user(struct user_struct *up,
272954e99124SDhaval Giani 					struct task_struct *tsk);
273054e99124SDhaval Giani 
27314b98d11bSAlexey Dobriyan #ifdef CONFIG_TASK_XACCT
27324b98d11bSAlexey Dobriyan static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
27334b98d11bSAlexey Dobriyan {
2734940389b8SAndrea Righi 	tsk->ioac.rchar += amt;
27354b98d11bSAlexey Dobriyan }
27364b98d11bSAlexey Dobriyan 
27374b98d11bSAlexey Dobriyan static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
27384b98d11bSAlexey Dobriyan {
2739940389b8SAndrea Righi 	tsk->ioac.wchar += amt;
27404b98d11bSAlexey Dobriyan }
27414b98d11bSAlexey Dobriyan 
27424b98d11bSAlexey Dobriyan static inline void inc_syscr(struct task_struct *tsk)
27434b98d11bSAlexey Dobriyan {
2744940389b8SAndrea Righi 	tsk->ioac.syscr++;
27454b98d11bSAlexey Dobriyan }
27464b98d11bSAlexey Dobriyan 
27474b98d11bSAlexey Dobriyan static inline void inc_syscw(struct task_struct *tsk)
27484b98d11bSAlexey Dobriyan {
2749940389b8SAndrea Righi 	tsk->ioac.syscw++;
27504b98d11bSAlexey Dobriyan }
27514b98d11bSAlexey Dobriyan #else
27524b98d11bSAlexey Dobriyan static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
27534b98d11bSAlexey Dobriyan {
27544b98d11bSAlexey Dobriyan }
27554b98d11bSAlexey Dobriyan 
27564b98d11bSAlexey Dobriyan static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
27574b98d11bSAlexey Dobriyan {
27584b98d11bSAlexey Dobriyan }
27594b98d11bSAlexey Dobriyan 
27604b98d11bSAlexey Dobriyan static inline void inc_syscr(struct task_struct *tsk)
27614b98d11bSAlexey Dobriyan {
27624b98d11bSAlexey Dobriyan }
27634b98d11bSAlexey Dobriyan 
27644b98d11bSAlexey Dobriyan static inline void inc_syscw(struct task_struct *tsk)
27654b98d11bSAlexey Dobriyan {
27664b98d11bSAlexey Dobriyan }
27674b98d11bSAlexey Dobriyan #endif
27684b98d11bSAlexey Dobriyan 
276982455257SDave Hansen #ifndef TASK_SIZE_OF
277082455257SDave Hansen #define TASK_SIZE_OF(tsk)	TASK_SIZE
277182455257SDave Hansen #endif
277282455257SDave Hansen 
2773cf475ad2SBalbir Singh #ifdef CONFIG_MM_OWNER
2774cf475ad2SBalbir Singh extern void mm_update_next_owner(struct mm_struct *mm);
2775cf475ad2SBalbir Singh extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2776cf475ad2SBalbir Singh #else
2777cf475ad2SBalbir Singh static inline void mm_update_next_owner(struct mm_struct *mm)
2778cf475ad2SBalbir Singh {
2779cf475ad2SBalbir Singh }
2780cf475ad2SBalbir Singh 
2781cf475ad2SBalbir Singh static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2782cf475ad2SBalbir Singh {
2783cf475ad2SBalbir Singh }
2784cf475ad2SBalbir Singh #endif /* CONFIG_MM_OWNER */
2785cf475ad2SBalbir Singh 
27863e10e716SJiri Slaby static inline unsigned long task_rlimit(const struct task_struct *tsk,
27873e10e716SJiri Slaby 		unsigned int limit)
27883e10e716SJiri Slaby {
27893e10e716SJiri Slaby 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
27903e10e716SJiri Slaby }
27913e10e716SJiri Slaby 
27923e10e716SJiri Slaby static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
27933e10e716SJiri Slaby 		unsigned int limit)
27943e10e716SJiri Slaby {
27953e10e716SJiri Slaby 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
27963e10e716SJiri Slaby }
27973e10e716SJiri Slaby 
27983e10e716SJiri Slaby static inline unsigned long rlimit(unsigned int limit)
27993e10e716SJiri Slaby {
28003e10e716SJiri Slaby 	return task_rlimit(current, limit);
28013e10e716SJiri Slaby }
28023e10e716SJiri Slaby 
28033e10e716SJiri Slaby static inline unsigned long rlimit_max(unsigned int limit)
28043e10e716SJiri Slaby {
28053e10e716SJiri Slaby 	return task_rlimit_max(current, limit);
28063e10e716SJiri Slaby }
28073e10e716SJiri Slaby 
28081da177e4SLinus Torvalds #endif
2809