xref: /linux/include/linux/sched.h (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 #ifndef _LINUX_SCHED_H
2 #define _LINUX_SCHED_H
3 
4 /*
5  * cloning flags:
6  */
7 #define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */
8 #define CLONE_VM	0x00000100	/* set if VM shared between processes */
9 #define CLONE_FS	0x00000200	/* set if fs info shared between processes */
10 #define CLONE_FILES	0x00000400	/* set if open files shared between processes */
11 #define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */
12 #define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */
13 #define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */
14 #define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */
15 #define CLONE_THREAD	0x00010000	/* Same thread group? */
16 #define CLONE_NEWNS	0x00020000	/* New namespace group? */
17 #define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */
18 #define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */
19 #define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */
20 #define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */
21 #define CLONE_DETACHED		0x00400000	/* Unused, ignored */
22 #define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */
23 #define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */
24 /* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state)
25    and is now available for re-use. */
26 #define CLONE_NEWUTS		0x04000000	/* New utsname group? */
27 #define CLONE_NEWIPC		0x08000000	/* New ipcs */
28 #define CLONE_NEWUSER		0x10000000	/* New user namespace */
29 #define CLONE_NEWPID		0x20000000	/* New pid namespace */
30 #define CLONE_NEWNET		0x40000000	/* New network namespace */
31 #define CLONE_IO		0x80000000	/* Clone io context */
32 
33 /*
34  * Scheduling policies
35  */
36 #define SCHED_NORMAL		0
37 #define SCHED_FIFO		1
38 #define SCHED_RR		2
39 #define SCHED_BATCH		3
40 /* SCHED_ISO: reserved but not implemented yet */
41 #define SCHED_IDLE		5
42 /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
43 #define SCHED_RESET_ON_FORK     0x40000000
44 
45 #ifdef __KERNEL__
46 
47 struct sched_param {
48 	int sched_priority;
49 };
50 
51 #include <asm/param.h>	/* for HZ */
52 
53 #include <linux/capability.h>
54 #include <linux/threads.h>
55 #include <linux/kernel.h>
56 #include <linux/types.h>
57 #include <linux/timex.h>
58 #include <linux/jiffies.h>
59 #include <linux/rbtree.h>
60 #include <linux/thread_info.h>
61 #include <linux/cpumask.h>
62 #include <linux/errno.h>
63 #include <linux/nodemask.h>
64 #include <linux/mm_types.h>
65 
66 #include <asm/system.h>
67 #include <asm/page.h>
68 #include <asm/ptrace.h>
69 #include <asm/cputime.h>
70 
71 #include <linux/smp.h>
72 #include <linux/sem.h>
73 #include <linux/signal.h>
74 #include <linux/compiler.h>
75 #include <linux/completion.h>
76 #include <linux/pid.h>
77 #include <linux/percpu.h>
78 #include <linux/topology.h>
79 #include <linux/proportions.h>
80 #include <linux/seccomp.h>
81 #include <linux/rcupdate.h>
82 #include <linux/rculist.h>
83 #include <linux/rtmutex.h>
84 
85 #include <linux/time.h>
86 #include <linux/param.h>
87 #include <linux/resource.h>
88 #include <linux/timer.h>
89 #include <linux/hrtimer.h>
90 #include <linux/task_io_accounting.h>
91 #include <linux/latencytop.h>
92 #include <linux/cred.h>
93 #include <linux/llist.h>
94 
95 #include <asm/processor.h>
96 
97 struct exec_domain;
98 struct futex_pi_state;
99 struct robust_list_head;
100 struct bio_list;
101 struct fs_struct;
102 struct perf_event_context;
103 struct blk_plug;
104 
105 /*
106  * List of flags we want to share for kernel threads,
107  * if only because they are not used by them anyway.
108  */
109 #define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
110 
111 /*
112  * These are the constant used to fake the fixed-point load-average
113  * counting. Some notes:
114  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
115  *    a load-average precision of 10 bits integer + 11 bits fractional
116  *  - if you want to count load-averages more often, you need more
117  *    precision, or rounding will get you. With 2-second counting freq,
118  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
119  *    11 bit fractions.
120  */
121 extern unsigned long avenrun[];		/* Load averages */
122 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
123 
124 #define FSHIFT		11		/* nr of bits of precision */
125 #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
126 #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
127 #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
128 #define EXP_5		2014		/* 1/exp(5sec/5min) */
129 #define EXP_15		2037		/* 1/exp(5sec/15min) */
130 
131 #define CALC_LOAD(load,exp,n) \
132 	load *= exp; \
133 	load += n*(FIXED_1-exp); \
134 	load >>= FSHIFT;
135 
136 extern unsigned long total_forks;
137 extern int nr_threads;
138 DECLARE_PER_CPU(unsigned long, process_counts);
139 extern int nr_processes(void);
140 extern unsigned long nr_running(void);
141 extern unsigned long nr_uninterruptible(void);
142 extern unsigned long nr_iowait(void);
143 extern unsigned long nr_iowait_cpu(int cpu);
144 extern unsigned long this_cpu_load(void);
145 
146 
147 extern void calc_global_load(unsigned long ticks);
148 
149 extern unsigned long get_parent_ip(unsigned long addr);
150 
151 struct seq_file;
152 struct cfs_rq;
153 struct task_group;
154 #ifdef CONFIG_SCHED_DEBUG
155 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
156 extern void proc_sched_set_task(struct task_struct *p);
157 extern void
158 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
159 #else
160 static inline void
161 proc_sched_show_task(struct task_struct *p, struct seq_file *m)
162 {
163 }
164 static inline void proc_sched_set_task(struct task_struct *p)
165 {
166 }
167 static inline void
168 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
169 {
170 }
171 #endif
172 
173 /*
174  * Task state bitmask. NOTE! These bits are also
175  * encoded in fs/proc/array.c: get_task_state().
176  *
177  * We have two separate sets of flags: task->state
178  * is about runnability, while task->exit_state are
179  * about the task exiting. Confusing, but this way
180  * modifying one set can't modify the other one by
181  * mistake.
182  */
183 #define TASK_RUNNING		0
184 #define TASK_INTERRUPTIBLE	1
185 #define TASK_UNINTERRUPTIBLE	2
186 #define __TASK_STOPPED		4
187 #define __TASK_TRACED		8
188 /* in tsk->exit_state */
189 #define EXIT_ZOMBIE		16
190 #define EXIT_DEAD		32
191 /* in tsk->state again */
192 #define TASK_DEAD		64
193 #define TASK_WAKEKILL		128
194 #define TASK_WAKING		256
195 #define TASK_STATE_MAX		512
196 
197 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
198 
199 extern char ___assert_task_state[1 - 2*!!(
200 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
201 
202 /* Convenience macros for the sake of set_task_state */
203 #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
204 #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
205 #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
206 
207 /* Convenience macros for the sake of wake_up */
208 #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
209 #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
210 
211 /* get_task_state() */
212 #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
213 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
214 				 __TASK_TRACED)
215 
216 #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
217 #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
218 #define task_is_dead(task)	((task)->exit_state != 0)
219 #define task_is_stopped_or_traced(task)	\
220 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
221 #define task_contributes_to_load(task)	\
222 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
223 				 (task->flags & PF_FROZEN) == 0)
224 
225 #define __set_task_state(tsk, state_value)		\
226 	do { (tsk)->state = (state_value); } while (0)
227 #define set_task_state(tsk, state_value)		\
228 	set_mb((tsk)->state, (state_value))
229 
230 /*
231  * set_current_state() includes a barrier so that the write of current->state
232  * is correctly serialised wrt the caller's subsequent test of whether to
233  * actually sleep:
234  *
235  *	set_current_state(TASK_UNINTERRUPTIBLE);
236  *	if (do_i_need_to_sleep())
237  *		schedule();
238  *
239  * If the caller does not need such serialisation then use __set_current_state()
240  */
241 #define __set_current_state(state_value)			\
242 	do { current->state = (state_value); } while (0)
243 #define set_current_state(state_value)		\
244 	set_mb(current->state, (state_value))
245 
246 /* Task command name length */
247 #define TASK_COMM_LEN 16
248 
249 #include <linux/spinlock.h>
250 
251 /*
252  * This serializes "schedule()" and also protects
253  * the run-queue from deletions/modifications (but
254  * _adding_ to the beginning of the run-queue has
255  * a separate lock).
256  */
257 extern rwlock_t tasklist_lock;
258 extern spinlock_t mmlist_lock;
259 
260 struct task_struct;
261 
262 #ifdef CONFIG_PROVE_RCU
263 extern int lockdep_tasklist_lock_is_held(void);
264 #endif /* #ifdef CONFIG_PROVE_RCU */
265 
266 extern void sched_init(void);
267 extern void sched_init_smp(void);
268 extern asmlinkage void schedule_tail(struct task_struct *prev);
269 extern void init_idle(struct task_struct *idle, int cpu);
270 extern void init_idle_bootup_task(struct task_struct *idle);
271 
272 extern int runqueue_is_locked(int cpu);
273 
274 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
275 extern void select_nohz_load_balancer(int stop_tick);
276 extern void set_cpu_sd_state_idle(void);
277 extern int get_nohz_timer_target(void);
278 #else
279 static inline void select_nohz_load_balancer(int stop_tick) { }
280 static inline void set_cpu_sd_state_idle(void) { }
281 #endif
282 
283 /*
284  * Only dump TASK_* tasks. (0 for all tasks)
285  */
286 extern void show_state_filter(unsigned long state_filter);
287 
288 static inline void show_state(void)
289 {
290 	show_state_filter(0);
291 }
292 
293 extern void show_regs(struct pt_regs *);
294 
295 /*
296  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
297  * task), SP is the stack pointer of the first frame that should be shown in the back
298  * trace (or NULL if the entire call-chain of the task should be shown).
299  */
300 extern void show_stack(struct task_struct *task, unsigned long *sp);
301 
302 void io_schedule(void);
303 long io_schedule_timeout(long timeout);
304 
305 extern void cpu_init (void);
306 extern void trap_init(void);
307 extern void update_process_times(int user);
308 extern void scheduler_tick(void);
309 
310 extern void sched_show_task(struct task_struct *p);
311 
312 #ifdef CONFIG_LOCKUP_DETECTOR
313 extern void touch_softlockup_watchdog(void);
314 extern void touch_softlockup_watchdog_sync(void);
315 extern void touch_all_softlockup_watchdogs(void);
316 extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
317 				  void __user *buffer,
318 				  size_t *lenp, loff_t *ppos);
319 extern unsigned int  softlockup_panic;
320 void lockup_detector_init(void);
321 #else
322 static inline void touch_softlockup_watchdog(void)
323 {
324 }
325 static inline void touch_softlockup_watchdog_sync(void)
326 {
327 }
328 static inline void touch_all_softlockup_watchdogs(void)
329 {
330 }
331 static inline void lockup_detector_init(void)
332 {
333 }
334 #endif
335 
336 #ifdef CONFIG_DETECT_HUNG_TASK
337 extern unsigned int  sysctl_hung_task_panic;
338 extern unsigned long sysctl_hung_task_check_count;
339 extern unsigned long sysctl_hung_task_timeout_secs;
340 extern unsigned long sysctl_hung_task_warnings;
341 extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
342 					 void __user *buffer,
343 					 size_t *lenp, loff_t *ppos);
344 #else
345 /* Avoid need for ifdefs elsewhere in the code */
346 enum { sysctl_hung_task_timeout_secs = 0 };
347 #endif
348 
349 /* Attach to any functions which should be ignored in wchan output. */
350 #define __sched		__attribute__((__section__(".sched.text")))
351 
352 /* Linker adds these: start and end of __sched functions */
353 extern char __sched_text_start[], __sched_text_end[];
354 
355 /* Is this address in the __sched functions? */
356 extern int in_sched_functions(unsigned long addr);
357 
358 #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
359 extern signed long schedule_timeout(signed long timeout);
360 extern signed long schedule_timeout_interruptible(signed long timeout);
361 extern signed long schedule_timeout_killable(signed long timeout);
362 extern signed long schedule_timeout_uninterruptible(signed long timeout);
363 asmlinkage void schedule(void);
364 extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
365 
366 struct nsproxy;
367 struct user_namespace;
368 
369 /*
370  * Default maximum number of active map areas, this limits the number of vmas
371  * per mm struct. Users can overwrite this number by sysctl but there is a
372  * problem.
373  *
374  * When a program's coredump is generated as ELF format, a section is created
375  * per a vma. In ELF, the number of sections is represented in unsigned short.
376  * This means the number of sections should be smaller than 65535 at coredump.
377  * Because the kernel adds some informative sections to a image of program at
378  * generating coredump, we need some margin. The number of extra sections is
379  * 1-3 now and depends on arch. We use "5" as safe margin, here.
380  */
381 #define MAPCOUNT_ELF_CORE_MARGIN	(5)
382 #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
383 
384 extern int sysctl_max_map_count;
385 
386 #include <linux/aio.h>
387 
388 #ifdef CONFIG_MMU
389 extern void arch_pick_mmap_layout(struct mm_struct *mm);
390 extern unsigned long
391 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
392 		       unsigned long, unsigned long);
393 extern unsigned long
394 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
395 			  unsigned long len, unsigned long pgoff,
396 			  unsigned long flags);
397 extern void arch_unmap_area(struct mm_struct *, unsigned long);
398 extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
399 #else
400 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
401 #endif
402 
403 
404 extern void set_dumpable(struct mm_struct *mm, int value);
405 extern int get_dumpable(struct mm_struct *mm);
406 
407 /* mm flags */
408 /* dumpable bits */
409 #define MMF_DUMPABLE      0  /* core dump is permitted */
410 #define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
411 
412 #define MMF_DUMPABLE_BITS 2
413 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
414 
415 /* coredump filter bits */
416 #define MMF_DUMP_ANON_PRIVATE	2
417 #define MMF_DUMP_ANON_SHARED	3
418 #define MMF_DUMP_MAPPED_PRIVATE	4
419 #define MMF_DUMP_MAPPED_SHARED	5
420 #define MMF_DUMP_ELF_HEADERS	6
421 #define MMF_DUMP_HUGETLB_PRIVATE 7
422 #define MMF_DUMP_HUGETLB_SHARED  8
423 
424 #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
425 #define MMF_DUMP_FILTER_BITS	7
426 #define MMF_DUMP_FILTER_MASK \
427 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
428 #define MMF_DUMP_FILTER_DEFAULT \
429 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
430 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
431 
432 #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
433 # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
434 #else
435 # define MMF_DUMP_MASK_DEFAULT_ELF	0
436 #endif
437 					/* leave room for more dump flags */
438 #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
439 #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
440 
441 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
442 
443 struct sighand_struct {
444 	atomic_t		count;
445 	struct k_sigaction	action[_NSIG];
446 	spinlock_t		siglock;
447 	wait_queue_head_t	signalfd_wqh;
448 };
449 
450 struct pacct_struct {
451 	int			ac_flag;
452 	long			ac_exitcode;
453 	unsigned long		ac_mem;
454 	cputime_t		ac_utime, ac_stime;
455 	unsigned long		ac_minflt, ac_majflt;
456 };
457 
458 struct cpu_itimer {
459 	cputime_t expires;
460 	cputime_t incr;
461 	u32 error;
462 	u32 incr_error;
463 };
464 
465 /**
466  * struct task_cputime - collected CPU time counts
467  * @utime:		time spent in user mode, in &cputime_t units
468  * @stime:		time spent in kernel mode, in &cputime_t units
469  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
470  *
471  * This structure groups together three kinds of CPU time that are
472  * tracked for threads and thread groups.  Most things considering
473  * CPU time want to group these counts together and treat all three
474  * of them in parallel.
475  */
476 struct task_cputime {
477 	cputime_t utime;
478 	cputime_t stime;
479 	unsigned long long sum_exec_runtime;
480 };
481 /* Alternate field names when used to cache expirations. */
482 #define prof_exp	stime
483 #define virt_exp	utime
484 #define sched_exp	sum_exec_runtime
485 
486 #define INIT_CPUTIME	\
487 	(struct task_cputime) {					\
488 		.utime = 0,					\
489 		.stime = 0,					\
490 		.sum_exec_runtime = 0,				\
491 	}
492 
493 /*
494  * Disable preemption until the scheduler is running.
495  * Reset by start_kernel()->sched_init()->init_idle().
496  *
497  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
498  * before the scheduler is active -- see should_resched().
499  */
500 #define INIT_PREEMPT_COUNT	(1 + PREEMPT_ACTIVE)
501 
502 /**
503  * struct thread_group_cputimer - thread group interval timer counts
504  * @cputime:		thread group interval timers.
505  * @running:		non-zero when there are timers running and
506  * 			@cputime receives updates.
507  * @lock:		lock for fields in this struct.
508  *
509  * This structure contains the version of task_cputime, above, that is
510  * used for thread group CPU timer calculations.
511  */
512 struct thread_group_cputimer {
513 	struct task_cputime cputime;
514 	int running;
515 	raw_spinlock_t lock;
516 };
517 
518 #include <linux/rwsem.h>
519 struct autogroup;
520 
521 /*
522  * NOTE! "signal_struct" does not have its own
523  * locking, because a shared signal_struct always
524  * implies a shared sighand_struct, so locking
525  * sighand_struct is always a proper superset of
526  * the locking of signal_struct.
527  */
528 struct signal_struct {
529 	atomic_t		sigcnt;
530 	atomic_t		live;
531 	int			nr_threads;
532 
533 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
534 
535 	/* current thread group signal load-balancing target: */
536 	struct task_struct	*curr_target;
537 
538 	/* shared signal handling: */
539 	struct sigpending	shared_pending;
540 
541 	/* thread group exit support */
542 	int			group_exit_code;
543 	/* overloaded:
544 	 * - notify group_exit_task when ->count is equal to notify_count
545 	 * - everyone except group_exit_task is stopped during signal delivery
546 	 *   of fatal signals, group_exit_task processes the signal.
547 	 */
548 	int			notify_count;
549 	struct task_struct	*group_exit_task;
550 
551 	/* thread group stop support, overloads group_exit_code too */
552 	int			group_stop_count;
553 	unsigned int		flags; /* see SIGNAL_* flags below */
554 
555 	/* POSIX.1b Interval Timers */
556 	struct list_head posix_timers;
557 
558 	/* ITIMER_REAL timer for the process */
559 	struct hrtimer real_timer;
560 	struct pid *leader_pid;
561 	ktime_t it_real_incr;
562 
563 	/*
564 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
565 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
566 	 * values are defined to 0 and 1 respectively
567 	 */
568 	struct cpu_itimer it[2];
569 
570 	/*
571 	 * Thread group totals for process CPU timers.
572 	 * See thread_group_cputimer(), et al, for details.
573 	 */
574 	struct thread_group_cputimer cputimer;
575 
576 	/* Earliest-expiration cache. */
577 	struct task_cputime cputime_expires;
578 
579 	struct list_head cpu_timers[3];
580 
581 	struct pid *tty_old_pgrp;
582 
583 	/* boolean value for session group leader */
584 	int leader;
585 
586 	struct tty_struct *tty; /* NULL if no tty */
587 
588 #ifdef CONFIG_SCHED_AUTOGROUP
589 	struct autogroup *autogroup;
590 #endif
591 	/*
592 	 * Cumulative resource counters for dead threads in the group,
593 	 * and for reaped dead child processes forked by this group.
594 	 * Live threads maintain their own counters and add to these
595 	 * in __exit_signal, except for the group leader.
596 	 */
597 	cputime_t utime, stime, cutime, cstime;
598 	cputime_t gtime;
599 	cputime_t cgtime;
600 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
601 	cputime_t prev_utime, prev_stime;
602 #endif
603 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
604 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
605 	unsigned long inblock, oublock, cinblock, coublock;
606 	unsigned long maxrss, cmaxrss;
607 	struct task_io_accounting ioac;
608 
609 	/*
610 	 * Cumulative ns of schedule CPU time fo dead threads in the
611 	 * group, not including a zombie group leader, (This only differs
612 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
613 	 * other than jiffies.)
614 	 */
615 	unsigned long long sum_sched_runtime;
616 
617 	/*
618 	 * We don't bother to synchronize most readers of this at all,
619 	 * because there is no reader checking a limit that actually needs
620 	 * to get both rlim_cur and rlim_max atomically, and either one
621 	 * alone is a single word that can safely be read normally.
622 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
623 	 * protect this instead of the siglock, because they really
624 	 * have no need to disable irqs.
625 	 */
626 	struct rlimit rlim[RLIM_NLIMITS];
627 
628 #ifdef CONFIG_BSD_PROCESS_ACCT
629 	struct pacct_struct pacct;	/* per-process accounting information */
630 #endif
631 #ifdef CONFIG_TASKSTATS
632 	struct taskstats *stats;
633 #endif
634 #ifdef CONFIG_AUDIT
635 	unsigned audit_tty;
636 	struct tty_audit_buf *tty_audit_buf;
637 #endif
638 #ifdef CONFIG_CGROUPS
639 	/*
640 	 * group_rwsem prevents new tasks from entering the threadgroup and
641 	 * member tasks from exiting,a more specifically, setting of
642 	 * PF_EXITING.  fork and exit paths are protected with this rwsem
643 	 * using threadgroup_change_begin/end().  Users which require
644 	 * threadgroup to remain stable should use threadgroup_[un]lock()
645 	 * which also takes care of exec path.  Currently, cgroup is the
646 	 * only user.
647 	 */
648 	struct rw_semaphore group_rwsem;
649 #endif
650 
651 	int oom_adj;		/* OOM kill score adjustment (bit shift) */
652 	int oom_score_adj;	/* OOM kill score adjustment */
653 	int oom_score_adj_min;	/* OOM kill score adjustment minimum value.
654 				 * Only settable by CAP_SYS_RESOURCE. */
655 
656 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
657 					 * credential calculations
658 					 * (notably. ptrace) */
659 };
660 
661 /* Context switch must be unlocked if interrupts are to be enabled */
662 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
663 # define __ARCH_WANT_UNLOCKED_CTXSW
664 #endif
665 
666 /*
667  * Bits in flags field of signal_struct.
668  */
669 #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
670 #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
671 #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
672 /*
673  * Pending notifications to parent.
674  */
675 #define SIGNAL_CLD_STOPPED	0x00000010
676 #define SIGNAL_CLD_CONTINUED	0x00000020
677 #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
678 
679 #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
680 
681 /* If true, all threads except ->group_exit_task have pending SIGKILL */
682 static inline int signal_group_exit(const struct signal_struct *sig)
683 {
684 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
685 		(sig->group_exit_task != NULL);
686 }
687 
688 /*
689  * Some day this will be a full-fledged user tracking system..
690  */
691 struct user_struct {
692 	atomic_t __count;	/* reference count */
693 	atomic_t processes;	/* How many processes does this user have? */
694 	atomic_t files;		/* How many open files does this user have? */
695 	atomic_t sigpending;	/* How many pending signals does this user have? */
696 #ifdef CONFIG_INOTIFY_USER
697 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
698 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
699 #endif
700 #ifdef CONFIG_FANOTIFY
701 	atomic_t fanotify_listeners;
702 #endif
703 #ifdef CONFIG_EPOLL
704 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
705 #endif
706 #ifdef CONFIG_POSIX_MQUEUE
707 	/* protected by mq_lock	*/
708 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
709 #endif
710 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
711 
712 #ifdef CONFIG_KEYS
713 	struct key *uid_keyring;	/* UID specific keyring */
714 	struct key *session_keyring;	/* UID's default session keyring */
715 #endif
716 
717 	/* Hash table maintenance information */
718 	struct hlist_node uidhash_node;
719 	uid_t uid;
720 	struct user_namespace *user_ns;
721 
722 #ifdef CONFIG_PERF_EVENTS
723 	atomic_long_t locked_vm;
724 #endif
725 };
726 
727 extern int uids_sysfs_init(void);
728 
729 extern struct user_struct *find_user(uid_t);
730 
731 extern struct user_struct root_user;
732 #define INIT_USER (&root_user)
733 
734 
735 struct backing_dev_info;
736 struct reclaim_state;
737 
738 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
739 struct sched_info {
740 	/* cumulative counters */
741 	unsigned long pcount;	      /* # of times run on this cpu */
742 	unsigned long long run_delay; /* time spent waiting on a runqueue */
743 
744 	/* timestamps */
745 	unsigned long long last_arrival,/* when we last ran on a cpu */
746 			   last_queued;	/* when we were last queued to run */
747 };
748 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
749 
750 #ifdef CONFIG_TASK_DELAY_ACCT
751 struct task_delay_info {
752 	spinlock_t	lock;
753 	unsigned int	flags;	/* Private per-task flags */
754 
755 	/* For each stat XXX, add following, aligned appropriately
756 	 *
757 	 * struct timespec XXX_start, XXX_end;
758 	 * u64 XXX_delay;
759 	 * u32 XXX_count;
760 	 *
761 	 * Atomicity of updates to XXX_delay, XXX_count protected by
762 	 * single lock above (split into XXX_lock if contention is an issue).
763 	 */
764 
765 	/*
766 	 * XXX_count is incremented on every XXX operation, the delay
767 	 * associated with the operation is added to XXX_delay.
768 	 * XXX_delay contains the accumulated delay time in nanoseconds.
769 	 */
770 	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
771 	u64 blkio_delay;	/* wait for sync block io completion */
772 	u64 swapin_delay;	/* wait for swapin block io completion */
773 	u32 blkio_count;	/* total count of the number of sync block */
774 				/* io operations performed */
775 	u32 swapin_count;	/* total count of the number of swapin block */
776 				/* io operations performed */
777 
778 	struct timespec freepages_start, freepages_end;
779 	u64 freepages_delay;	/* wait for memory reclaim */
780 	u32 freepages_count;	/* total count of memory reclaim */
781 };
782 #endif	/* CONFIG_TASK_DELAY_ACCT */
783 
784 static inline int sched_info_on(void)
785 {
786 #ifdef CONFIG_SCHEDSTATS
787 	return 1;
788 #elif defined(CONFIG_TASK_DELAY_ACCT)
789 	extern int delayacct_on;
790 	return delayacct_on;
791 #else
792 	return 0;
793 #endif
794 }
795 
796 enum cpu_idle_type {
797 	CPU_IDLE,
798 	CPU_NOT_IDLE,
799 	CPU_NEWLY_IDLE,
800 	CPU_MAX_IDLE_TYPES
801 };
802 
803 /*
804  * Increase resolution of nice-level calculations for 64-bit architectures.
805  * The extra resolution improves shares distribution and load balancing of
806  * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
807  * hierarchies, especially on larger systems. This is not a user-visible change
808  * and does not change the user-interface for setting shares/weights.
809  *
810  * We increase resolution only if we have enough bits to allow this increased
811  * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
812  * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
813  * increased costs.
814  */
815 #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load  */
816 # define SCHED_LOAD_RESOLUTION	10
817 # define scale_load(w)		((w) << SCHED_LOAD_RESOLUTION)
818 # define scale_load_down(w)	((w) >> SCHED_LOAD_RESOLUTION)
819 #else
820 # define SCHED_LOAD_RESOLUTION	0
821 # define scale_load(w)		(w)
822 # define scale_load_down(w)	(w)
823 #endif
824 
825 #define SCHED_LOAD_SHIFT	(10 + SCHED_LOAD_RESOLUTION)
826 #define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)
827 
828 /*
829  * Increase resolution of cpu_power calculations
830  */
831 #define SCHED_POWER_SHIFT	10
832 #define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT)
833 
834 /*
835  * sched-domains (multiprocessor balancing) declarations:
836  */
837 #ifdef CONFIG_SMP
838 #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
839 #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
840 #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
841 #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
842 #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
843 #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
844 #define SD_PREFER_LOCAL		0x0040  /* Prefer to keep tasks local to this domain */
845 #define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
846 #define SD_POWERSAVINGS_BALANCE	0x0100	/* Balance for power savings */
847 #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
848 #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
849 #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
850 #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
851 #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
852 
853 enum powersavings_balance_level {
854 	POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */
855 	POWERSAVINGS_BALANCE_BASIC,	/* Fill one thread/core/package
856 					 * first for long running threads
857 					 */
858 	POWERSAVINGS_BALANCE_WAKEUP,	/* Also bias task wakeups to semi-idle
859 					 * cpu package for power savings
860 					 */
861 	MAX_POWERSAVINGS_BALANCE_LEVELS
862 };
863 
864 extern int sched_mc_power_savings, sched_smt_power_savings;
865 
866 static inline int sd_balance_for_mc_power(void)
867 {
868 	if (sched_smt_power_savings)
869 		return SD_POWERSAVINGS_BALANCE;
870 
871 	if (!sched_mc_power_savings)
872 		return SD_PREFER_SIBLING;
873 
874 	return 0;
875 }
876 
877 static inline int sd_balance_for_package_power(void)
878 {
879 	if (sched_mc_power_savings | sched_smt_power_savings)
880 		return SD_POWERSAVINGS_BALANCE;
881 
882 	return SD_PREFER_SIBLING;
883 }
884 
885 extern int __weak arch_sd_sibiling_asym_packing(void);
886 
887 /*
888  * Optimise SD flags for power savings:
889  * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings.
890  * Keep default SD flags if sched_{smt,mc}_power_saving=0
891  */
892 
893 static inline int sd_power_saving_flags(void)
894 {
895 	if (sched_mc_power_savings | sched_smt_power_savings)
896 		return SD_BALANCE_NEWIDLE;
897 
898 	return 0;
899 }
900 
901 struct sched_group_power {
902 	atomic_t ref;
903 	/*
904 	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
905 	 * single CPU.
906 	 */
907 	unsigned int power, power_orig;
908 	/*
909 	 * Number of busy cpus in this group.
910 	 */
911 	atomic_t nr_busy_cpus;
912 };
913 
914 struct sched_group {
915 	struct sched_group *next;	/* Must be a circular list */
916 	atomic_t ref;
917 
918 	unsigned int group_weight;
919 	struct sched_group_power *sgp;
920 
921 	/*
922 	 * The CPUs this group covers.
923 	 *
924 	 * NOTE: this field is variable length. (Allocated dynamically
925 	 * by attaching extra space to the end of the structure,
926 	 * depending on how many CPUs the kernel has booted up with)
927 	 */
928 	unsigned long cpumask[0];
929 };
930 
931 static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
932 {
933 	return to_cpumask(sg->cpumask);
934 }
935 
936 /**
937  * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
938  * @group: The group whose first cpu is to be returned.
939  */
940 static inline unsigned int group_first_cpu(struct sched_group *group)
941 {
942 	return cpumask_first(sched_group_cpus(group));
943 }
944 
945 struct sched_domain_attr {
946 	int relax_domain_level;
947 };
948 
949 #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
950 	.relax_domain_level = -1,			\
951 }
952 
953 extern int sched_domain_level_max;
954 
955 struct sched_domain {
956 	/* These fields must be setup */
957 	struct sched_domain *parent;	/* top domain must be null terminated */
958 	struct sched_domain *child;	/* bottom domain must be null terminated */
959 	struct sched_group *groups;	/* the balancing groups of the domain */
960 	unsigned long min_interval;	/* Minimum balance interval ms */
961 	unsigned long max_interval;	/* Maximum balance interval ms */
962 	unsigned int busy_factor;	/* less balancing by factor if busy */
963 	unsigned int imbalance_pct;	/* No balance until over watermark */
964 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
965 	unsigned int busy_idx;
966 	unsigned int idle_idx;
967 	unsigned int newidle_idx;
968 	unsigned int wake_idx;
969 	unsigned int forkexec_idx;
970 	unsigned int smt_gain;
971 	int flags;			/* See SD_* */
972 	int level;
973 
974 	/* Runtime fields. */
975 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
976 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
977 	unsigned int nr_balance_failed; /* initialise to 0 */
978 
979 	u64 last_update;
980 
981 #ifdef CONFIG_SCHEDSTATS
982 	/* load_balance() stats */
983 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
984 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
985 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
986 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
987 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
988 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
989 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
990 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
991 
992 	/* Active load balancing */
993 	unsigned int alb_count;
994 	unsigned int alb_failed;
995 	unsigned int alb_pushed;
996 
997 	/* SD_BALANCE_EXEC stats */
998 	unsigned int sbe_count;
999 	unsigned int sbe_balanced;
1000 	unsigned int sbe_pushed;
1001 
1002 	/* SD_BALANCE_FORK stats */
1003 	unsigned int sbf_count;
1004 	unsigned int sbf_balanced;
1005 	unsigned int sbf_pushed;
1006 
1007 	/* try_to_wake_up() stats */
1008 	unsigned int ttwu_wake_remote;
1009 	unsigned int ttwu_move_affine;
1010 	unsigned int ttwu_move_balance;
1011 #endif
1012 #ifdef CONFIG_SCHED_DEBUG
1013 	char *name;
1014 #endif
1015 	union {
1016 		void *private;		/* used during construction */
1017 		struct rcu_head rcu;	/* used during destruction */
1018 	};
1019 
1020 	unsigned int span_weight;
1021 	/*
1022 	 * Span of all CPUs in this domain.
1023 	 *
1024 	 * NOTE: this field is variable length. (Allocated dynamically
1025 	 * by attaching extra space to the end of the structure,
1026 	 * depending on how many CPUs the kernel has booted up with)
1027 	 */
1028 	unsigned long span[0];
1029 };
1030 
1031 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1032 {
1033 	return to_cpumask(sd->span);
1034 }
1035 
1036 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1037 				    struct sched_domain_attr *dattr_new);
1038 
1039 /* Allocate an array of sched domains, for partition_sched_domains(). */
1040 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1041 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1042 
1043 /* Test a flag in parent sched domain */
1044 static inline int test_sd_parent(struct sched_domain *sd, int flag)
1045 {
1046 	if (sd->parent && (sd->parent->flags & flag))
1047 		return 1;
1048 
1049 	return 0;
1050 }
1051 
1052 unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
1053 unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1054 
1055 #else /* CONFIG_SMP */
1056 
1057 struct sched_domain_attr;
1058 
1059 static inline void
1060 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1061 			struct sched_domain_attr *dattr_new)
1062 {
1063 }
1064 #endif	/* !CONFIG_SMP */
1065 
1066 
1067 struct io_context;			/* See blkdev.h */
1068 
1069 
1070 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1071 extern void prefetch_stack(struct task_struct *t);
1072 #else
1073 static inline void prefetch_stack(struct task_struct *t) { }
1074 #endif
1075 
1076 struct audit_context;		/* See audit.c */
1077 struct mempolicy;
1078 struct pipe_inode_info;
1079 struct uts_namespace;
1080 
1081 struct rq;
1082 struct sched_domain;
1083 
1084 /*
1085  * wake flags
1086  */
1087 #define WF_SYNC		0x01		/* waker goes to sleep after wakup */
1088 #define WF_FORK		0x02		/* child wakeup after fork */
1089 #define WF_MIGRATED	0x04		/* internal use, task got migrated */
1090 
1091 #define ENQUEUE_WAKEUP		1
1092 #define ENQUEUE_HEAD		2
1093 #ifdef CONFIG_SMP
1094 #define ENQUEUE_WAKING		4	/* sched_class::task_waking was called */
1095 #else
1096 #define ENQUEUE_WAKING		0
1097 #endif
1098 
1099 #define DEQUEUE_SLEEP		1
1100 
1101 struct sched_class {
1102 	const struct sched_class *next;
1103 
1104 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1105 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1106 	void (*yield_task) (struct rq *rq);
1107 	bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1108 
1109 	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1110 
1111 	struct task_struct * (*pick_next_task) (struct rq *rq);
1112 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1113 
1114 #ifdef CONFIG_SMP
1115 	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1116 
1117 	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1118 	void (*post_schedule) (struct rq *this_rq);
1119 	void (*task_waking) (struct task_struct *task);
1120 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1121 
1122 	void (*set_cpus_allowed)(struct task_struct *p,
1123 				 const struct cpumask *newmask);
1124 
1125 	void (*rq_online)(struct rq *rq);
1126 	void (*rq_offline)(struct rq *rq);
1127 #endif
1128 
1129 	void (*set_curr_task) (struct rq *rq);
1130 	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1131 	void (*task_fork) (struct task_struct *p);
1132 
1133 	void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1134 	void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1135 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1136 			     int oldprio);
1137 
1138 	unsigned int (*get_rr_interval) (struct rq *rq,
1139 					 struct task_struct *task);
1140 
1141 #ifdef CONFIG_FAIR_GROUP_SCHED
1142 	void (*task_move_group) (struct task_struct *p, int on_rq);
1143 #endif
1144 };
1145 
1146 struct load_weight {
1147 	unsigned long weight, inv_weight;
1148 };
1149 
1150 #ifdef CONFIG_SCHEDSTATS
1151 struct sched_statistics {
1152 	u64			wait_start;
1153 	u64			wait_max;
1154 	u64			wait_count;
1155 	u64			wait_sum;
1156 	u64			iowait_count;
1157 	u64			iowait_sum;
1158 
1159 	u64			sleep_start;
1160 	u64			sleep_max;
1161 	s64			sum_sleep_runtime;
1162 
1163 	u64			block_start;
1164 	u64			block_max;
1165 	u64			exec_max;
1166 	u64			slice_max;
1167 
1168 	u64			nr_migrations_cold;
1169 	u64			nr_failed_migrations_affine;
1170 	u64			nr_failed_migrations_running;
1171 	u64			nr_failed_migrations_hot;
1172 	u64			nr_forced_migrations;
1173 
1174 	u64			nr_wakeups;
1175 	u64			nr_wakeups_sync;
1176 	u64			nr_wakeups_migrate;
1177 	u64			nr_wakeups_local;
1178 	u64			nr_wakeups_remote;
1179 	u64			nr_wakeups_affine;
1180 	u64			nr_wakeups_affine_attempts;
1181 	u64			nr_wakeups_passive;
1182 	u64			nr_wakeups_idle;
1183 };
1184 #endif
1185 
1186 struct sched_entity {
1187 	struct load_weight	load;		/* for load-balancing */
1188 	struct rb_node		run_node;
1189 	struct list_head	group_node;
1190 	unsigned int		on_rq;
1191 
1192 	u64			exec_start;
1193 	u64			sum_exec_runtime;
1194 	u64			vruntime;
1195 	u64			prev_sum_exec_runtime;
1196 
1197 	u64			nr_migrations;
1198 
1199 #ifdef CONFIG_SCHEDSTATS
1200 	struct sched_statistics statistics;
1201 #endif
1202 
1203 #ifdef CONFIG_FAIR_GROUP_SCHED
1204 	struct sched_entity	*parent;
1205 	/* rq on which this entity is (to be) queued: */
1206 	struct cfs_rq		*cfs_rq;
1207 	/* rq "owned" by this entity/group: */
1208 	struct cfs_rq		*my_q;
1209 #endif
1210 };
1211 
1212 struct sched_rt_entity {
1213 	struct list_head run_list;
1214 	unsigned long timeout;
1215 	unsigned int time_slice;
1216 	int nr_cpus_allowed;
1217 
1218 	struct sched_rt_entity *back;
1219 #ifdef CONFIG_RT_GROUP_SCHED
1220 	struct sched_rt_entity	*parent;
1221 	/* rq on which this entity is (to be) queued: */
1222 	struct rt_rq		*rt_rq;
1223 	/* rq "owned" by this entity/group: */
1224 	struct rt_rq		*my_q;
1225 #endif
1226 };
1227 
1228 struct rcu_node;
1229 
1230 enum perf_event_task_context {
1231 	perf_invalid_context = -1,
1232 	perf_hw_context = 0,
1233 	perf_sw_context,
1234 	perf_nr_task_contexts,
1235 };
1236 
1237 struct task_struct {
1238 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1239 	void *stack;
1240 	atomic_t usage;
1241 	unsigned int flags;	/* per process flags, defined below */
1242 	unsigned int ptrace;
1243 
1244 #ifdef CONFIG_SMP
1245 	struct llist_node wake_entry;
1246 	int on_cpu;
1247 #endif
1248 	int on_rq;
1249 
1250 	int prio, static_prio, normal_prio;
1251 	unsigned int rt_priority;
1252 	const struct sched_class *sched_class;
1253 	struct sched_entity se;
1254 	struct sched_rt_entity rt;
1255 
1256 #ifdef CONFIG_PREEMPT_NOTIFIERS
1257 	/* list of struct preempt_notifier: */
1258 	struct hlist_head preempt_notifiers;
1259 #endif
1260 
1261 	/*
1262 	 * fpu_counter contains the number of consecutive context switches
1263 	 * that the FPU is used. If this is over a threshold, the lazy fpu
1264 	 * saving becomes unlazy to save the trap. This is an unsigned char
1265 	 * so that after 256 times the counter wraps and the behavior turns
1266 	 * lazy again; this to deal with bursty apps that only use FPU for
1267 	 * a short time
1268 	 */
1269 	unsigned char fpu_counter;
1270 #ifdef CONFIG_BLK_DEV_IO_TRACE
1271 	unsigned int btrace_seq;
1272 #endif
1273 
1274 	unsigned int policy;
1275 	cpumask_t cpus_allowed;
1276 
1277 #ifdef CONFIG_PREEMPT_RCU
1278 	int rcu_read_lock_nesting;
1279 	char rcu_read_unlock_special;
1280 	struct list_head rcu_node_entry;
1281 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1282 #ifdef CONFIG_TREE_PREEMPT_RCU
1283 	struct rcu_node *rcu_blocked_node;
1284 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1285 #ifdef CONFIG_RCU_BOOST
1286 	struct rt_mutex *rcu_boost_mutex;
1287 #endif /* #ifdef CONFIG_RCU_BOOST */
1288 
1289 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1290 	struct sched_info sched_info;
1291 #endif
1292 
1293 	struct list_head tasks;
1294 #ifdef CONFIG_SMP
1295 	struct plist_node pushable_tasks;
1296 #endif
1297 
1298 	struct mm_struct *mm, *active_mm;
1299 #ifdef CONFIG_COMPAT_BRK
1300 	unsigned brk_randomized:1;
1301 #endif
1302 #if defined(SPLIT_RSS_COUNTING)
1303 	struct task_rss_stat	rss_stat;
1304 #endif
1305 /* task state */
1306 	int exit_state;
1307 	int exit_code, exit_signal;
1308 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1309 	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
1310 	/* ??? */
1311 	unsigned int personality;
1312 	unsigned did_exec:1;
1313 	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1314 				 * execve */
1315 	unsigned in_iowait:1;
1316 
1317 
1318 	/* Revert to default priority/policy when forking */
1319 	unsigned sched_reset_on_fork:1;
1320 	unsigned sched_contributes_to_load:1;
1321 
1322 	pid_t pid;
1323 	pid_t tgid;
1324 
1325 #ifdef CONFIG_CC_STACKPROTECTOR
1326 	/* Canary value for the -fstack-protector gcc feature */
1327 	unsigned long stack_canary;
1328 #endif
1329 
1330 	/*
1331 	 * pointers to (original) parent process, youngest child, younger sibling,
1332 	 * older sibling, respectively.  (p->father can be replaced with
1333 	 * p->real_parent->pid)
1334 	 */
1335 	struct task_struct __rcu *real_parent; /* real parent process */
1336 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1337 	/*
1338 	 * children/sibling forms the list of my natural children
1339 	 */
1340 	struct list_head children;	/* list of my children */
1341 	struct list_head sibling;	/* linkage in my parent's children list */
1342 	struct task_struct *group_leader;	/* threadgroup leader */
1343 
1344 	/*
1345 	 * ptraced is the list of tasks this task is using ptrace on.
1346 	 * This includes both natural children and PTRACE_ATTACH targets.
1347 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1348 	 */
1349 	struct list_head ptraced;
1350 	struct list_head ptrace_entry;
1351 
1352 	/* PID/PID hash table linkage. */
1353 	struct pid_link pids[PIDTYPE_MAX];
1354 	struct list_head thread_group;
1355 
1356 	struct completion *vfork_done;		/* for vfork() */
1357 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
1358 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
1359 
1360 	cputime_t utime, stime, utimescaled, stimescaled;
1361 	cputime_t gtime;
1362 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1363 	cputime_t prev_utime, prev_stime;
1364 #endif
1365 	unsigned long nvcsw, nivcsw; /* context switch counts */
1366 	struct timespec start_time; 		/* monotonic time */
1367 	struct timespec real_start_time;	/* boot based time */
1368 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1369 	unsigned long min_flt, maj_flt;
1370 
1371 	struct task_cputime cputime_expires;
1372 	struct list_head cpu_timers[3];
1373 
1374 /* process credentials */
1375 	const struct cred __rcu *real_cred; /* objective and real subjective task
1376 					 * credentials (COW) */
1377 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1378 					 * credentials (COW) */
1379 	struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1380 
1381 	char comm[TASK_COMM_LEN]; /* executable name excluding path
1382 				     - access with [gs]et_task_comm (which lock
1383 				       it with task_lock())
1384 				     - initialized normally by setup_new_exec */
1385 /* file system info */
1386 	int link_count, total_link_count;
1387 #ifdef CONFIG_SYSVIPC
1388 /* ipc stuff */
1389 	struct sysv_sem sysvsem;
1390 #endif
1391 #ifdef CONFIG_DETECT_HUNG_TASK
1392 /* hung task detection */
1393 	unsigned long last_switch_count;
1394 #endif
1395 /* CPU-specific state of this task */
1396 	struct thread_struct thread;
1397 /* filesystem information */
1398 	struct fs_struct *fs;
1399 /* open file information */
1400 	struct files_struct *files;
1401 /* namespaces */
1402 	struct nsproxy *nsproxy;
1403 /* signal handlers */
1404 	struct signal_struct *signal;
1405 	struct sighand_struct *sighand;
1406 
1407 	sigset_t blocked, real_blocked;
1408 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
1409 	struct sigpending pending;
1410 
1411 	unsigned long sas_ss_sp;
1412 	size_t sas_ss_size;
1413 	int (*notifier)(void *priv);
1414 	void *notifier_data;
1415 	sigset_t *notifier_mask;
1416 	struct audit_context *audit_context;
1417 #ifdef CONFIG_AUDITSYSCALL
1418 	uid_t loginuid;
1419 	unsigned int sessionid;
1420 #endif
1421 	seccomp_t seccomp;
1422 
1423 /* Thread group tracking */
1424    	u32 parent_exec_id;
1425    	u32 self_exec_id;
1426 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1427  * mempolicy */
1428 	spinlock_t alloc_lock;
1429 
1430 #ifdef CONFIG_GENERIC_HARDIRQS
1431 	/* IRQ handler threads */
1432 	struct irqaction *irqaction;
1433 #endif
1434 
1435 	/* Protection of the PI data structures: */
1436 	raw_spinlock_t pi_lock;
1437 
1438 #ifdef CONFIG_RT_MUTEXES
1439 	/* PI waiters blocked on a rt_mutex held by this task */
1440 	struct plist_head pi_waiters;
1441 	/* Deadlock detection and priority inheritance handling */
1442 	struct rt_mutex_waiter *pi_blocked_on;
1443 #endif
1444 
1445 #ifdef CONFIG_DEBUG_MUTEXES
1446 	/* mutex deadlock detection */
1447 	struct mutex_waiter *blocked_on;
1448 #endif
1449 #ifdef CONFIG_TRACE_IRQFLAGS
1450 	unsigned int irq_events;
1451 	unsigned long hardirq_enable_ip;
1452 	unsigned long hardirq_disable_ip;
1453 	unsigned int hardirq_enable_event;
1454 	unsigned int hardirq_disable_event;
1455 	int hardirqs_enabled;
1456 	int hardirq_context;
1457 	unsigned long softirq_disable_ip;
1458 	unsigned long softirq_enable_ip;
1459 	unsigned int softirq_disable_event;
1460 	unsigned int softirq_enable_event;
1461 	int softirqs_enabled;
1462 	int softirq_context;
1463 #endif
1464 #ifdef CONFIG_LOCKDEP
1465 # define MAX_LOCK_DEPTH 48UL
1466 	u64 curr_chain_key;
1467 	int lockdep_depth;
1468 	unsigned int lockdep_recursion;
1469 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1470 	gfp_t lockdep_reclaim_gfp;
1471 #endif
1472 
1473 /* journalling filesystem info */
1474 	void *journal_info;
1475 
1476 /* stacked block device info */
1477 	struct bio_list *bio_list;
1478 
1479 #ifdef CONFIG_BLOCK
1480 /* stack plugging */
1481 	struct blk_plug *plug;
1482 #endif
1483 
1484 /* VM state */
1485 	struct reclaim_state *reclaim_state;
1486 
1487 	struct backing_dev_info *backing_dev_info;
1488 
1489 	struct io_context *io_context;
1490 
1491 	unsigned long ptrace_message;
1492 	siginfo_t *last_siginfo; /* For ptrace use.  */
1493 	struct task_io_accounting ioac;
1494 #if defined(CONFIG_TASK_XACCT)
1495 	u64 acct_rss_mem1;	/* accumulated rss usage */
1496 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1497 	cputime_t acct_timexpd;	/* stime + utime since last update */
1498 #endif
1499 #ifdef CONFIG_CPUSETS
1500 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1501 	int mems_allowed_change_disable;
1502 	int cpuset_mem_spread_rotor;
1503 	int cpuset_slab_spread_rotor;
1504 #endif
1505 #ifdef CONFIG_CGROUPS
1506 	/* Control Group info protected by css_set_lock */
1507 	struct css_set __rcu *cgroups;
1508 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1509 	struct list_head cg_list;
1510 #endif
1511 #ifdef CONFIG_FUTEX
1512 	struct robust_list_head __user *robust_list;
1513 #ifdef CONFIG_COMPAT
1514 	struct compat_robust_list_head __user *compat_robust_list;
1515 #endif
1516 	struct list_head pi_state_list;
1517 	struct futex_pi_state *pi_state_cache;
1518 #endif
1519 #ifdef CONFIG_PERF_EVENTS
1520 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1521 	struct mutex perf_event_mutex;
1522 	struct list_head perf_event_list;
1523 #endif
1524 #ifdef CONFIG_NUMA
1525 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1526 	short il_next;
1527 	short pref_node_fork;
1528 #endif
1529 	struct rcu_head rcu;
1530 
1531 	/*
1532 	 * cache last used pipe for splice
1533 	 */
1534 	struct pipe_inode_info *splice_pipe;
1535 #ifdef	CONFIG_TASK_DELAY_ACCT
1536 	struct task_delay_info *delays;
1537 #endif
1538 #ifdef CONFIG_FAULT_INJECTION
1539 	int make_it_fail;
1540 #endif
1541 	/*
1542 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1543 	 * balance_dirty_pages() for some dirty throttling pause
1544 	 */
1545 	int nr_dirtied;
1546 	int nr_dirtied_pause;
1547 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
1548 
1549 #ifdef CONFIG_LATENCYTOP
1550 	int latency_record_count;
1551 	struct latency_record latency_record[LT_SAVECOUNT];
1552 #endif
1553 	/*
1554 	 * time slack values; these are used to round up poll() and
1555 	 * select() etc timeout values. These are in nanoseconds.
1556 	 */
1557 	unsigned long timer_slack_ns;
1558 	unsigned long default_timer_slack_ns;
1559 
1560 	struct list_head	*scm_work_list;
1561 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1562 	/* Index of current stored address in ret_stack */
1563 	int curr_ret_stack;
1564 	/* Stack of return addresses for return function tracing */
1565 	struct ftrace_ret_stack	*ret_stack;
1566 	/* time stamp for last schedule */
1567 	unsigned long long ftrace_timestamp;
1568 	/*
1569 	 * Number of functions that haven't been traced
1570 	 * because of depth overrun.
1571 	 */
1572 	atomic_t trace_overrun;
1573 	/* Pause for the tracing */
1574 	atomic_t tracing_graph_pause;
1575 #endif
1576 #ifdef CONFIG_TRACING
1577 	/* state flags for use by tracers */
1578 	unsigned long trace;
1579 	/* bitmask and counter of trace recursion */
1580 	unsigned long trace_recursion;
1581 #endif /* CONFIG_TRACING */
1582 #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
1583 	struct memcg_batch_info {
1584 		int do_batch;	/* incremented when batch uncharge started */
1585 		struct mem_cgroup *memcg; /* target memcg of uncharge */
1586 		unsigned long nr_pages;	/* uncharged usage */
1587 		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1588 	} memcg_batch;
1589 #endif
1590 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1591 	atomic_t ptrace_bp_refcnt;
1592 #endif
1593 };
1594 
1595 /* Future-safe accessor for struct task_struct's cpus_allowed. */
1596 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1597 
1598 /*
1599  * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1600  * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1601  * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1602  * values are inverted: lower p->prio value means higher priority.
1603  *
1604  * The MAX_USER_RT_PRIO value allows the actual maximum
1605  * RT priority to be separate from the value exported to
1606  * user-space.  This allows kernel threads to set their
1607  * priority to a value higher than any user task. Note:
1608  * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1609  */
1610 
1611 #define MAX_USER_RT_PRIO	100
1612 #define MAX_RT_PRIO		MAX_USER_RT_PRIO
1613 
1614 #define MAX_PRIO		(MAX_RT_PRIO + 40)
1615 #define DEFAULT_PRIO		(MAX_RT_PRIO + 20)
1616 
1617 static inline int rt_prio(int prio)
1618 {
1619 	if (unlikely(prio < MAX_RT_PRIO))
1620 		return 1;
1621 	return 0;
1622 }
1623 
1624 static inline int rt_task(struct task_struct *p)
1625 {
1626 	return rt_prio(p->prio);
1627 }
1628 
1629 static inline struct pid *task_pid(struct task_struct *task)
1630 {
1631 	return task->pids[PIDTYPE_PID].pid;
1632 }
1633 
1634 static inline struct pid *task_tgid(struct task_struct *task)
1635 {
1636 	return task->group_leader->pids[PIDTYPE_PID].pid;
1637 }
1638 
1639 /*
1640  * Without tasklist or rcu lock it is not safe to dereference
1641  * the result of task_pgrp/task_session even if task == current,
1642  * we can race with another thread doing sys_setsid/sys_setpgid.
1643  */
1644 static inline struct pid *task_pgrp(struct task_struct *task)
1645 {
1646 	return task->group_leader->pids[PIDTYPE_PGID].pid;
1647 }
1648 
1649 static inline struct pid *task_session(struct task_struct *task)
1650 {
1651 	return task->group_leader->pids[PIDTYPE_SID].pid;
1652 }
1653 
1654 struct pid_namespace;
1655 
1656 /*
1657  * the helpers to get the task's different pids as they are seen
1658  * from various namespaces
1659  *
1660  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1661  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1662  *                     current.
1663  * task_xid_nr_ns()  : id seen from the ns specified;
1664  *
1665  * set_task_vxid()   : assigns a virtual id to a task;
1666  *
1667  * see also pid_nr() etc in include/linux/pid.h
1668  */
1669 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1670 			struct pid_namespace *ns);
1671 
1672 static inline pid_t task_pid_nr(struct task_struct *tsk)
1673 {
1674 	return tsk->pid;
1675 }
1676 
1677 static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1678 					struct pid_namespace *ns)
1679 {
1680 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1681 }
1682 
1683 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1684 {
1685 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1686 }
1687 
1688 
1689 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1690 {
1691 	return tsk->tgid;
1692 }
1693 
1694 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1695 
1696 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1697 {
1698 	return pid_vnr(task_tgid(tsk));
1699 }
1700 
1701 
1702 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1703 					struct pid_namespace *ns)
1704 {
1705 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1706 }
1707 
1708 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1709 {
1710 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1711 }
1712 
1713 
1714 static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1715 					struct pid_namespace *ns)
1716 {
1717 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1718 }
1719 
1720 static inline pid_t task_session_vnr(struct task_struct *tsk)
1721 {
1722 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1723 }
1724 
1725 /* obsolete, do not use */
1726 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1727 {
1728 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1729 }
1730 
1731 /**
1732  * pid_alive - check that a task structure is not stale
1733  * @p: Task structure to be checked.
1734  *
1735  * Test if a process is not yet dead (at most zombie state)
1736  * If pid_alive fails, then pointers within the task structure
1737  * can be stale and must not be dereferenced.
1738  */
1739 static inline int pid_alive(struct task_struct *p)
1740 {
1741 	return p->pids[PIDTYPE_PID].pid != NULL;
1742 }
1743 
1744 /**
1745  * is_global_init - check if a task structure is init
1746  * @tsk: Task structure to be checked.
1747  *
1748  * Check if a task structure is the first user space task the kernel created.
1749  */
1750 static inline int is_global_init(struct task_struct *tsk)
1751 {
1752 	return tsk->pid == 1;
1753 }
1754 
1755 /*
1756  * is_container_init:
1757  * check whether in the task is init in its own pid namespace.
1758  */
1759 extern int is_container_init(struct task_struct *tsk);
1760 
1761 extern struct pid *cad_pid;
1762 
1763 extern void free_task(struct task_struct *tsk);
1764 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1765 
1766 extern void __put_task_struct(struct task_struct *t);
1767 
1768 static inline void put_task_struct(struct task_struct *t)
1769 {
1770 	if (atomic_dec_and_test(&t->usage))
1771 		__put_task_struct(t);
1772 }
1773 
1774 extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1775 extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
1776 
1777 /*
1778  * Per process flags
1779  */
1780 #define PF_STARTING	0x00000002	/* being created */
1781 #define PF_EXITING	0x00000004	/* getting shut down */
1782 #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1783 #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
1784 #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
1785 #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1786 #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
1787 #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
1788 #define PF_DUMPCORE	0x00000200	/* dumped core */
1789 #define PF_SIGNALED	0x00000400	/* killed by a signal */
1790 #define PF_MEMALLOC	0x00000800	/* Allocating memory */
1791 #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
1792 #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1793 #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
1794 #define PF_FROZEN	0x00010000	/* frozen for system suspend */
1795 #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
1796 #define PF_KSWAPD	0x00040000	/* I am kswapd */
1797 #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1798 #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1799 #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1800 #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1801 #define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1802 #define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
1803 #define PF_THREAD_BOUND	0x04000000	/* Thread bound to specific cpu */
1804 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1805 #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
1806 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
1807 #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
1808 
1809 /*
1810  * Only the _current_ task can read/write to tsk->flags, but other
1811  * tasks can access tsk->flags in readonly mode for example
1812  * with tsk_used_math (like during threaded core dumping).
1813  * There is however an exception to this rule during ptrace
1814  * or during fork: the ptracer task is allowed to write to the
1815  * child->flags of its traced child (same goes for fork, the parent
1816  * can write to the child->flags), because we're guaranteed the
1817  * child is not running and in turn not changing child->flags
1818  * at the same time the parent does it.
1819  */
1820 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1821 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1822 #define clear_used_math() clear_stopped_child_used_math(current)
1823 #define set_used_math() set_stopped_child_used_math(current)
1824 #define conditional_stopped_child_used_math(condition, child) \
1825 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1826 #define conditional_used_math(condition) \
1827 	conditional_stopped_child_used_math(condition, current)
1828 #define copy_to_stopped_child_used_math(child) \
1829 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1830 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1831 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1832 #define used_math() tsk_used_math(current)
1833 
1834 /*
1835  * task->jobctl flags
1836  */
1837 #define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
1838 
1839 #define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
1840 #define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
1841 #define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
1842 #define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
1843 #define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
1844 #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
1845 #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
1846 
1847 #define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT)
1848 #define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT)
1849 #define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT)
1850 #define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT)
1851 #define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT)
1852 #define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT)
1853 #define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT)
1854 
1855 #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1856 #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1857 
1858 extern bool task_set_jobctl_pending(struct task_struct *task,
1859 				    unsigned int mask);
1860 extern void task_clear_jobctl_trapping(struct task_struct *task);
1861 extern void task_clear_jobctl_pending(struct task_struct *task,
1862 				      unsigned int mask);
1863 
1864 #ifdef CONFIG_PREEMPT_RCU
1865 
1866 #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1867 #define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */
1868 #define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */
1869 
1870 static inline void rcu_copy_process(struct task_struct *p)
1871 {
1872 	p->rcu_read_lock_nesting = 0;
1873 	p->rcu_read_unlock_special = 0;
1874 #ifdef CONFIG_TREE_PREEMPT_RCU
1875 	p->rcu_blocked_node = NULL;
1876 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1877 #ifdef CONFIG_RCU_BOOST
1878 	p->rcu_boost_mutex = NULL;
1879 #endif /* #ifdef CONFIG_RCU_BOOST */
1880 	INIT_LIST_HEAD(&p->rcu_node_entry);
1881 }
1882 
1883 #else
1884 
1885 static inline void rcu_copy_process(struct task_struct *p)
1886 {
1887 }
1888 
1889 #endif
1890 
1891 #ifdef CONFIG_SMP
1892 extern void do_set_cpus_allowed(struct task_struct *p,
1893 			       const struct cpumask *new_mask);
1894 
1895 extern int set_cpus_allowed_ptr(struct task_struct *p,
1896 				const struct cpumask *new_mask);
1897 #else
1898 static inline void do_set_cpus_allowed(struct task_struct *p,
1899 				      const struct cpumask *new_mask)
1900 {
1901 }
1902 static inline int set_cpus_allowed_ptr(struct task_struct *p,
1903 				       const struct cpumask *new_mask)
1904 {
1905 	if (!cpumask_test_cpu(0, new_mask))
1906 		return -EINVAL;
1907 	return 0;
1908 }
1909 #endif
1910 
1911 #ifndef CONFIG_CPUMASK_OFFSTACK
1912 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1913 {
1914 	return set_cpus_allowed_ptr(p, &new_mask);
1915 }
1916 #endif
1917 
1918 /*
1919  * Do not use outside of architecture code which knows its limitations.
1920  *
1921  * sched_clock() has no promise of monotonicity or bounded drift between
1922  * CPUs, use (which you should not) requires disabling IRQs.
1923  *
1924  * Please use one of the three interfaces below.
1925  */
1926 extern unsigned long long notrace sched_clock(void);
1927 /*
1928  * See the comment in kernel/sched_clock.c
1929  */
1930 extern u64 cpu_clock(int cpu);
1931 extern u64 local_clock(void);
1932 extern u64 sched_clock_cpu(int cpu);
1933 
1934 
1935 extern void sched_clock_init(void);
1936 
1937 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1938 static inline void sched_clock_tick(void)
1939 {
1940 }
1941 
1942 static inline void sched_clock_idle_sleep_event(void)
1943 {
1944 }
1945 
1946 static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1947 {
1948 }
1949 #else
1950 /*
1951  * Architectures can set this to 1 if they have specified
1952  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1953  * but then during bootup it turns out that sched_clock()
1954  * is reliable after all:
1955  */
1956 extern int sched_clock_stable;
1957 
1958 extern void sched_clock_tick(void);
1959 extern void sched_clock_idle_sleep_event(void);
1960 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1961 #endif
1962 
1963 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1964 /*
1965  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
1966  * The reason for this explicit opt-in is not to have perf penalty with
1967  * slow sched_clocks.
1968  */
1969 extern void enable_sched_clock_irqtime(void);
1970 extern void disable_sched_clock_irqtime(void);
1971 #else
1972 static inline void enable_sched_clock_irqtime(void) {}
1973 static inline void disable_sched_clock_irqtime(void) {}
1974 #endif
1975 
1976 extern unsigned long long
1977 task_sched_runtime(struct task_struct *task);
1978 
1979 /* sched_exec is called by processes performing an exec */
1980 #ifdef CONFIG_SMP
1981 extern void sched_exec(void);
1982 #else
1983 #define sched_exec()   {}
1984 #endif
1985 
1986 extern void sched_clock_idle_sleep_event(void);
1987 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1988 
1989 #ifdef CONFIG_HOTPLUG_CPU
1990 extern void idle_task_exit(void);
1991 #else
1992 static inline void idle_task_exit(void) {}
1993 #endif
1994 
1995 #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
1996 extern void wake_up_idle_cpu(int cpu);
1997 #else
1998 static inline void wake_up_idle_cpu(int cpu) { }
1999 #endif
2000 
2001 extern unsigned int sysctl_sched_latency;
2002 extern unsigned int sysctl_sched_min_granularity;
2003 extern unsigned int sysctl_sched_wakeup_granularity;
2004 extern unsigned int sysctl_sched_child_runs_first;
2005 
2006 enum sched_tunable_scaling {
2007 	SCHED_TUNABLESCALING_NONE,
2008 	SCHED_TUNABLESCALING_LOG,
2009 	SCHED_TUNABLESCALING_LINEAR,
2010 	SCHED_TUNABLESCALING_END,
2011 };
2012 extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
2013 
2014 #ifdef CONFIG_SCHED_DEBUG
2015 extern unsigned int sysctl_sched_migration_cost;
2016 extern unsigned int sysctl_sched_nr_migrate;
2017 extern unsigned int sysctl_sched_time_avg;
2018 extern unsigned int sysctl_timer_migration;
2019 extern unsigned int sysctl_sched_shares_window;
2020 
2021 int sched_proc_update_handler(struct ctl_table *table, int write,
2022 		void __user *buffer, size_t *length,
2023 		loff_t *ppos);
2024 #endif
2025 #ifdef CONFIG_SCHED_DEBUG
2026 static inline unsigned int get_sysctl_timer_migration(void)
2027 {
2028 	return sysctl_timer_migration;
2029 }
2030 #else
2031 static inline unsigned int get_sysctl_timer_migration(void)
2032 {
2033 	return 1;
2034 }
2035 #endif
2036 extern unsigned int sysctl_sched_rt_period;
2037 extern int sysctl_sched_rt_runtime;
2038 
2039 int sched_rt_handler(struct ctl_table *table, int write,
2040 		void __user *buffer, size_t *lenp,
2041 		loff_t *ppos);
2042 
2043 #ifdef CONFIG_SCHED_AUTOGROUP
2044 extern unsigned int sysctl_sched_autogroup_enabled;
2045 
2046 extern void sched_autogroup_create_attach(struct task_struct *p);
2047 extern void sched_autogroup_detach(struct task_struct *p);
2048 extern void sched_autogroup_fork(struct signal_struct *sig);
2049 extern void sched_autogroup_exit(struct signal_struct *sig);
2050 #ifdef CONFIG_PROC_FS
2051 extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2052 extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice);
2053 #endif
2054 #else
2055 static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2056 static inline void sched_autogroup_detach(struct task_struct *p) { }
2057 static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2058 static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2059 #endif
2060 
2061 #ifdef CONFIG_CFS_BANDWIDTH
2062 extern unsigned int sysctl_sched_cfs_bandwidth_slice;
2063 #endif
2064 
2065 #ifdef CONFIG_RT_MUTEXES
2066 extern int rt_mutex_getprio(struct task_struct *p);
2067 extern void rt_mutex_setprio(struct task_struct *p, int prio);
2068 extern void rt_mutex_adjust_pi(struct task_struct *p);
2069 #else
2070 static inline int rt_mutex_getprio(struct task_struct *p)
2071 {
2072 	return p->normal_prio;
2073 }
2074 # define rt_mutex_adjust_pi(p)		do { } while (0)
2075 #endif
2076 
2077 extern bool yield_to(struct task_struct *p, bool preempt);
2078 extern void set_user_nice(struct task_struct *p, long nice);
2079 extern int task_prio(const struct task_struct *p);
2080 extern int task_nice(const struct task_struct *p);
2081 extern int can_nice(const struct task_struct *p, const int nice);
2082 extern int task_curr(const struct task_struct *p);
2083 extern int idle_cpu(int cpu);
2084 extern int sched_setscheduler(struct task_struct *, int,
2085 			      const struct sched_param *);
2086 extern int sched_setscheduler_nocheck(struct task_struct *, int,
2087 				      const struct sched_param *);
2088 extern struct task_struct *idle_task(int cpu);
2089 /**
2090  * is_idle_task - is the specified task an idle task?
2091  * @tsk: the task in question.
2092  */
2093 static inline bool is_idle_task(struct task_struct *p)
2094 {
2095 	return p->pid == 0;
2096 }
2097 extern struct task_struct *curr_task(int cpu);
2098 extern void set_curr_task(int cpu, struct task_struct *p);
2099 
2100 void yield(void);
2101 
2102 /*
2103  * The default (Linux) execution domain.
2104  */
2105 extern struct exec_domain	default_exec_domain;
2106 
2107 union thread_union {
2108 	struct thread_info thread_info;
2109 	unsigned long stack[THREAD_SIZE/sizeof(long)];
2110 };
2111 
2112 #ifndef __HAVE_ARCH_KSTACK_END
2113 static inline int kstack_end(void *addr)
2114 {
2115 	/* Reliable end of stack detection:
2116 	 * Some APM bios versions misalign the stack
2117 	 */
2118 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2119 }
2120 #endif
2121 
2122 extern union thread_union init_thread_union;
2123 extern struct task_struct init_task;
2124 
2125 extern struct   mm_struct init_mm;
2126 
2127 extern struct pid_namespace init_pid_ns;
2128 
2129 /*
2130  * find a task by one of its numerical ids
2131  *
2132  * find_task_by_pid_ns():
2133  *      finds a task by its pid in the specified namespace
2134  * find_task_by_vpid():
2135  *      finds a task by its virtual pid
2136  *
2137  * see also find_vpid() etc in include/linux/pid.h
2138  */
2139 
2140 extern struct task_struct *find_task_by_vpid(pid_t nr);
2141 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2142 		struct pid_namespace *ns);
2143 
2144 extern void __set_special_pids(struct pid *pid);
2145 
2146 /* per-UID process charging. */
2147 extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
2148 static inline struct user_struct *get_uid(struct user_struct *u)
2149 {
2150 	atomic_inc(&u->__count);
2151 	return u;
2152 }
2153 extern void free_uid(struct user_struct *);
2154 extern void release_uids(struct user_namespace *ns);
2155 
2156 #include <asm/current.h>
2157 
2158 extern void xtime_update(unsigned long ticks);
2159 
2160 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2161 extern int wake_up_process(struct task_struct *tsk);
2162 extern void wake_up_new_task(struct task_struct *tsk);
2163 #ifdef CONFIG_SMP
2164  extern void kick_process(struct task_struct *tsk);
2165 #else
2166  static inline void kick_process(struct task_struct *tsk) { }
2167 #endif
2168 extern void sched_fork(struct task_struct *p);
2169 extern void sched_dead(struct task_struct *p);
2170 
2171 extern void proc_caches_init(void);
2172 extern void flush_signals(struct task_struct *);
2173 extern void __flush_signals(struct task_struct *);
2174 extern void ignore_signals(struct task_struct *);
2175 extern void flush_signal_handlers(struct task_struct *, int force_default);
2176 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2177 
2178 static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2179 {
2180 	unsigned long flags;
2181 	int ret;
2182 
2183 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
2184 	ret = dequeue_signal(tsk, mask, info);
2185 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2186 
2187 	return ret;
2188 }
2189 
2190 extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2191 			      sigset_t *mask);
2192 extern void unblock_all_signals(void);
2193 extern void release_task(struct task_struct * p);
2194 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2195 extern int force_sigsegv(int, struct task_struct *);
2196 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2197 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2198 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2199 extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2200 				const struct cred *, u32);
2201 extern int kill_pgrp(struct pid *pid, int sig, int priv);
2202 extern int kill_pid(struct pid *pid, int sig, int priv);
2203 extern int kill_proc_info(int, struct siginfo *, pid_t);
2204 extern __must_check bool do_notify_parent(struct task_struct *, int);
2205 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2206 extern void force_sig(int, struct task_struct *);
2207 extern int send_sig(int, struct task_struct *, int);
2208 extern int zap_other_threads(struct task_struct *p);
2209 extern struct sigqueue *sigqueue_alloc(void);
2210 extern void sigqueue_free(struct sigqueue *);
2211 extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2212 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2213 extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
2214 
2215 static inline int kill_cad_pid(int sig, int priv)
2216 {
2217 	return kill_pid(cad_pid, sig, priv);
2218 }
2219 
2220 /* These can be the second arg to send_sig_info/send_group_sig_info.  */
2221 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
2222 #define SEND_SIG_PRIV	((struct siginfo *) 1)
2223 #define SEND_SIG_FORCED	((struct siginfo *) 2)
2224 
2225 /*
2226  * True if we are on the alternate signal stack.
2227  */
2228 static inline int on_sig_stack(unsigned long sp)
2229 {
2230 #ifdef CONFIG_STACK_GROWSUP
2231 	return sp >= current->sas_ss_sp &&
2232 		sp - current->sas_ss_sp < current->sas_ss_size;
2233 #else
2234 	return sp > current->sas_ss_sp &&
2235 		sp - current->sas_ss_sp <= current->sas_ss_size;
2236 #endif
2237 }
2238 
2239 static inline int sas_ss_flags(unsigned long sp)
2240 {
2241 	return (current->sas_ss_size == 0 ? SS_DISABLE
2242 		: on_sig_stack(sp) ? SS_ONSTACK : 0);
2243 }
2244 
2245 /*
2246  * Routines for handling mm_structs
2247  */
2248 extern struct mm_struct * mm_alloc(void);
2249 
2250 /* mmdrop drops the mm and the page tables */
2251 extern void __mmdrop(struct mm_struct *);
2252 static inline void mmdrop(struct mm_struct * mm)
2253 {
2254 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2255 		__mmdrop(mm);
2256 }
2257 
2258 /* mmput gets rid of the mappings and all user-space */
2259 extern void mmput(struct mm_struct *);
2260 /* Grab a reference to a task's mm, if it is not already going away */
2261 extern struct mm_struct *get_task_mm(struct task_struct *task);
2262 /* Remove the current tasks stale references to the old mm_struct */
2263 extern void mm_release(struct task_struct *, struct mm_struct *);
2264 /* Allocate a new mm structure and copy contents from tsk->mm */
2265 extern struct mm_struct *dup_mm(struct task_struct *tsk);
2266 
2267 extern int copy_thread(unsigned long, unsigned long, unsigned long,
2268 			struct task_struct *, struct pt_regs *);
2269 extern void flush_thread(void);
2270 extern void exit_thread(void);
2271 
2272 extern void exit_files(struct task_struct *);
2273 extern void __cleanup_sighand(struct sighand_struct *);
2274 
2275 extern void exit_itimers(struct signal_struct *);
2276 extern void flush_itimer_signals(void);
2277 
2278 extern void do_group_exit(int);
2279 
2280 extern void daemonize(const char *, ...);
2281 extern int allow_signal(int);
2282 extern int disallow_signal(int);
2283 
2284 extern int do_execve(const char *,
2285 		     const char __user * const __user *,
2286 		     const char __user * const __user *, struct pt_regs *);
2287 extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
2288 struct task_struct *fork_idle(int);
2289 
2290 extern void set_task_comm(struct task_struct *tsk, char *from);
2291 extern char *get_task_comm(char *to, struct task_struct *tsk);
2292 
2293 #ifdef CONFIG_SMP
2294 void scheduler_ipi(void);
2295 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2296 #else
2297 static inline void scheduler_ipi(void) { }
2298 static inline unsigned long wait_task_inactive(struct task_struct *p,
2299 					       long match_state)
2300 {
2301 	return 1;
2302 }
2303 #endif
2304 
2305 #define next_task(p) \
2306 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2307 
2308 #define for_each_process(p) \
2309 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2310 
2311 extern bool current_is_single_threaded(void);
2312 
2313 /*
2314  * Careful: do_each_thread/while_each_thread is a double loop so
2315  *          'break' will not work as expected - use goto instead.
2316  */
2317 #define do_each_thread(g, t) \
2318 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2319 
2320 #define while_each_thread(g, t) \
2321 	while ((t = next_thread(t)) != g)
2322 
2323 static inline int get_nr_threads(struct task_struct *tsk)
2324 {
2325 	return tsk->signal->nr_threads;
2326 }
2327 
2328 static inline bool thread_group_leader(struct task_struct *p)
2329 {
2330 	return p->exit_signal >= 0;
2331 }
2332 
2333 /* Do to the insanities of de_thread it is possible for a process
2334  * to have the pid of the thread group leader without actually being
2335  * the thread group leader.  For iteration through the pids in proc
2336  * all we care about is that we have a task with the appropriate
2337  * pid, we don't actually care if we have the right task.
2338  */
2339 static inline int has_group_leader_pid(struct task_struct *p)
2340 {
2341 	return p->pid == p->tgid;
2342 }
2343 
2344 static inline
2345 int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2346 {
2347 	return p1->tgid == p2->tgid;
2348 }
2349 
2350 static inline struct task_struct *next_thread(const struct task_struct *p)
2351 {
2352 	return list_entry_rcu(p->thread_group.next,
2353 			      struct task_struct, thread_group);
2354 }
2355 
2356 static inline int thread_group_empty(struct task_struct *p)
2357 {
2358 	return list_empty(&p->thread_group);
2359 }
2360 
2361 #define delay_group_leader(p) \
2362 		(thread_group_leader(p) && !thread_group_empty(p))
2363 
2364 /*
2365  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2366  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2367  * pins the final release of task.io_context.  Also protects ->cpuset and
2368  * ->cgroup.subsys[].
2369  *
2370  * Nests both inside and outside of read_lock(&tasklist_lock).
2371  * It must not be nested with write_lock_irq(&tasklist_lock),
2372  * neither inside nor outside.
2373  */
2374 static inline void task_lock(struct task_struct *p)
2375 {
2376 	spin_lock(&p->alloc_lock);
2377 }
2378 
2379 static inline void task_unlock(struct task_struct *p)
2380 {
2381 	spin_unlock(&p->alloc_lock);
2382 }
2383 
2384 extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2385 							unsigned long *flags);
2386 
2387 #define lock_task_sighand(tsk, flags)					\
2388 ({	struct sighand_struct *__ss;					\
2389 	__cond_lock(&(tsk)->sighand->siglock,				\
2390 		    (__ss = __lock_task_sighand(tsk, flags)));		\
2391 	__ss;								\
2392 })									\
2393 
2394 static inline void unlock_task_sighand(struct task_struct *tsk,
2395 						unsigned long *flags)
2396 {
2397 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2398 }
2399 
2400 #ifdef CONFIG_CGROUPS
2401 static inline void threadgroup_change_begin(struct task_struct *tsk)
2402 {
2403 	down_read(&tsk->signal->group_rwsem);
2404 }
2405 static inline void threadgroup_change_end(struct task_struct *tsk)
2406 {
2407 	up_read(&tsk->signal->group_rwsem);
2408 }
2409 
2410 /**
2411  * threadgroup_lock - lock threadgroup
2412  * @tsk: member task of the threadgroup to lock
2413  *
2414  * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
2415  * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2416  * perform exec.  This is useful for cases where the threadgroup needs to
2417  * stay stable across blockable operations.
2418  *
2419  * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2420  * synchronization.  While held, no new task will be added to threadgroup
2421  * and no existing live task will have its PF_EXITING set.
2422  *
2423  * During exec, a task goes and puts its thread group through unusual
2424  * changes.  After de-threading, exclusive access is assumed to resources
2425  * which are usually shared by tasks in the same group - e.g. sighand may
2426  * be replaced with a new one.  Also, the exec'ing task takes over group
2427  * leader role including its pid.  Exclude these changes while locked by
2428  * grabbing cred_guard_mutex which is used to synchronize exec path.
2429  */
2430 static inline void threadgroup_lock(struct task_struct *tsk)
2431 {
2432 	/*
2433 	 * exec uses exit for de-threading nesting group_rwsem inside
2434 	 * cred_guard_mutex. Grab cred_guard_mutex first.
2435 	 */
2436 	mutex_lock(&tsk->signal->cred_guard_mutex);
2437 	down_write(&tsk->signal->group_rwsem);
2438 }
2439 
2440 /**
2441  * threadgroup_unlock - unlock threadgroup
2442  * @tsk: member task of the threadgroup to unlock
2443  *
2444  * Reverse threadgroup_lock().
2445  */
2446 static inline void threadgroup_unlock(struct task_struct *tsk)
2447 {
2448 	up_write(&tsk->signal->group_rwsem);
2449 	mutex_unlock(&tsk->signal->cred_guard_mutex);
2450 }
2451 #else
2452 static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2453 static inline void threadgroup_change_end(struct task_struct *tsk) {}
2454 static inline void threadgroup_lock(struct task_struct *tsk) {}
2455 static inline void threadgroup_unlock(struct task_struct *tsk) {}
2456 #endif
2457 
2458 #ifndef __HAVE_THREAD_FUNCTIONS
2459 
2460 #define task_thread_info(task)	((struct thread_info *)(task)->stack)
2461 #define task_stack_page(task)	((task)->stack)
2462 
2463 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2464 {
2465 	*task_thread_info(p) = *task_thread_info(org);
2466 	task_thread_info(p)->task = p;
2467 }
2468 
2469 static inline unsigned long *end_of_stack(struct task_struct *p)
2470 {
2471 	return (unsigned long *)(task_thread_info(p) + 1);
2472 }
2473 
2474 #endif
2475 
2476 static inline int object_is_on_stack(void *obj)
2477 {
2478 	void *stack = task_stack_page(current);
2479 
2480 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2481 }
2482 
2483 extern void thread_info_cache_init(void);
2484 
2485 #ifdef CONFIG_DEBUG_STACK_USAGE
2486 static inline unsigned long stack_not_used(struct task_struct *p)
2487 {
2488 	unsigned long *n = end_of_stack(p);
2489 
2490 	do { 	/* Skip over canary */
2491 		n++;
2492 	} while (!*n);
2493 
2494 	return (unsigned long)n - (unsigned long)end_of_stack(p);
2495 }
2496 #endif
2497 
2498 /* set thread flags in other task's structures
2499  * - see asm/thread_info.h for TIF_xxxx flags available
2500  */
2501 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2502 {
2503 	set_ti_thread_flag(task_thread_info(tsk), flag);
2504 }
2505 
2506 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2507 {
2508 	clear_ti_thread_flag(task_thread_info(tsk), flag);
2509 }
2510 
2511 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2512 {
2513 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2514 }
2515 
2516 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2517 {
2518 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2519 }
2520 
2521 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2522 {
2523 	return test_ti_thread_flag(task_thread_info(tsk), flag);
2524 }
2525 
2526 static inline void set_tsk_need_resched(struct task_struct *tsk)
2527 {
2528 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2529 }
2530 
2531 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2532 {
2533 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2534 }
2535 
2536 static inline int test_tsk_need_resched(struct task_struct *tsk)
2537 {
2538 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2539 }
2540 
2541 static inline int restart_syscall(void)
2542 {
2543 	set_tsk_thread_flag(current, TIF_SIGPENDING);
2544 	return -ERESTARTNOINTR;
2545 }
2546 
2547 static inline int signal_pending(struct task_struct *p)
2548 {
2549 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2550 }
2551 
2552 static inline int __fatal_signal_pending(struct task_struct *p)
2553 {
2554 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2555 }
2556 
2557 static inline int fatal_signal_pending(struct task_struct *p)
2558 {
2559 	return signal_pending(p) && __fatal_signal_pending(p);
2560 }
2561 
2562 static inline int signal_pending_state(long state, struct task_struct *p)
2563 {
2564 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2565 		return 0;
2566 	if (!signal_pending(p))
2567 		return 0;
2568 
2569 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2570 }
2571 
2572 static inline int need_resched(void)
2573 {
2574 	return unlikely(test_thread_flag(TIF_NEED_RESCHED));
2575 }
2576 
2577 /*
2578  * cond_resched() and cond_resched_lock(): latency reduction via
2579  * explicit rescheduling in places that are safe. The return
2580  * value indicates whether a reschedule was done in fact.
2581  * cond_resched_lock() will drop the spinlock before scheduling,
2582  * cond_resched_softirq() will enable bhs before scheduling.
2583  */
2584 extern int _cond_resched(void);
2585 
2586 #define cond_resched() ({			\
2587 	__might_sleep(__FILE__, __LINE__, 0);	\
2588 	_cond_resched();			\
2589 })
2590 
2591 extern int __cond_resched_lock(spinlock_t *lock);
2592 
2593 #ifdef CONFIG_PREEMPT_COUNT
2594 #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
2595 #else
2596 #define PREEMPT_LOCK_OFFSET	0
2597 #endif
2598 
2599 #define cond_resched_lock(lock) ({				\
2600 	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2601 	__cond_resched_lock(lock);				\
2602 })
2603 
2604 extern int __cond_resched_softirq(void);
2605 
2606 #define cond_resched_softirq() ({					\
2607 	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2608 	__cond_resched_softirq();					\
2609 })
2610 
2611 /*
2612  * Does a critical section need to be broken due to another
2613  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2614  * but a general need for low latency)
2615  */
2616 static inline int spin_needbreak(spinlock_t *lock)
2617 {
2618 #ifdef CONFIG_PREEMPT
2619 	return spin_is_contended(lock);
2620 #else
2621 	return 0;
2622 #endif
2623 }
2624 
2625 /*
2626  * Thread group CPU time accounting.
2627  */
2628 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2629 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2630 
2631 static inline void thread_group_cputime_init(struct signal_struct *sig)
2632 {
2633 	raw_spin_lock_init(&sig->cputimer.lock);
2634 }
2635 
2636 /*
2637  * Reevaluate whether the task has signals pending delivery.
2638  * Wake the task if so.
2639  * This is required every time the blocked sigset_t changes.
2640  * callers must hold sighand->siglock.
2641  */
2642 extern void recalc_sigpending_and_wake(struct task_struct *t);
2643 extern void recalc_sigpending(void);
2644 
2645 extern void signal_wake_up(struct task_struct *t, int resume_stopped);
2646 
2647 /*
2648  * Wrappers for p->thread_info->cpu access. No-op on UP.
2649  */
2650 #ifdef CONFIG_SMP
2651 
2652 static inline unsigned int task_cpu(const struct task_struct *p)
2653 {
2654 	return task_thread_info(p)->cpu;
2655 }
2656 
2657 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2658 
2659 #else
2660 
2661 static inline unsigned int task_cpu(const struct task_struct *p)
2662 {
2663 	return 0;
2664 }
2665 
2666 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2667 {
2668 }
2669 
2670 #endif /* CONFIG_SMP */
2671 
2672 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2673 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2674 
2675 extern void normalize_rt_tasks(void);
2676 
2677 #ifdef CONFIG_CGROUP_SCHED
2678 
2679 extern struct task_group root_task_group;
2680 
2681 extern struct task_group *sched_create_group(struct task_group *parent);
2682 extern void sched_destroy_group(struct task_group *tg);
2683 extern void sched_move_task(struct task_struct *tsk);
2684 #ifdef CONFIG_FAIR_GROUP_SCHED
2685 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2686 extern unsigned long sched_group_shares(struct task_group *tg);
2687 #endif
2688 #ifdef CONFIG_RT_GROUP_SCHED
2689 extern int sched_group_set_rt_runtime(struct task_group *tg,
2690 				      long rt_runtime_us);
2691 extern long sched_group_rt_runtime(struct task_group *tg);
2692 extern int sched_group_set_rt_period(struct task_group *tg,
2693 				      long rt_period_us);
2694 extern long sched_group_rt_period(struct task_group *tg);
2695 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2696 #endif
2697 #endif
2698 
2699 extern int task_can_switch_user(struct user_struct *up,
2700 					struct task_struct *tsk);
2701 
2702 #ifdef CONFIG_TASK_XACCT
2703 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2704 {
2705 	tsk->ioac.rchar += amt;
2706 }
2707 
2708 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2709 {
2710 	tsk->ioac.wchar += amt;
2711 }
2712 
2713 static inline void inc_syscr(struct task_struct *tsk)
2714 {
2715 	tsk->ioac.syscr++;
2716 }
2717 
2718 static inline void inc_syscw(struct task_struct *tsk)
2719 {
2720 	tsk->ioac.syscw++;
2721 }
2722 #else
2723 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2724 {
2725 }
2726 
2727 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2728 {
2729 }
2730 
2731 static inline void inc_syscr(struct task_struct *tsk)
2732 {
2733 }
2734 
2735 static inline void inc_syscw(struct task_struct *tsk)
2736 {
2737 }
2738 #endif
2739 
2740 #ifndef TASK_SIZE_OF
2741 #define TASK_SIZE_OF(tsk)	TASK_SIZE
2742 #endif
2743 
2744 #ifdef CONFIG_MM_OWNER
2745 extern void mm_update_next_owner(struct mm_struct *mm);
2746 extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2747 #else
2748 static inline void mm_update_next_owner(struct mm_struct *mm)
2749 {
2750 }
2751 
2752 static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2753 {
2754 }
2755 #endif /* CONFIG_MM_OWNER */
2756 
2757 static inline unsigned long task_rlimit(const struct task_struct *tsk,
2758 		unsigned int limit)
2759 {
2760 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2761 }
2762 
2763 static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2764 		unsigned int limit)
2765 {
2766 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2767 }
2768 
2769 static inline unsigned long rlimit(unsigned int limit)
2770 {
2771 	return task_rlimit(current, limit);
2772 }
2773 
2774 static inline unsigned long rlimit_max(unsigned int limit)
2775 {
2776 	return task_rlimit_max(current, limit);
2777 }
2778 
2779 #endif /* __KERNEL__ */
2780 
2781 #endif
2782