xref: /linux/include/linux/sched.h (revision d91517839e5d95adc0cf4b28caa7af62a71de526)
1 #ifndef _LINUX_SCHED_H
2 #define _LINUX_SCHED_H
3 
4 #include <uapi/linux/sched.h>
5 
6 
7 struct sched_param {
8 	int sched_priority;
9 };
10 
11 #include <asm/param.h>	/* for HZ */
12 
13 #include <linux/capability.h>
14 #include <linux/threads.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/timex.h>
18 #include <linux/jiffies.h>
19 #include <linux/plist.h>
20 #include <linux/rbtree.h>
21 #include <linux/thread_info.h>
22 #include <linux/cpumask.h>
23 #include <linux/errno.h>
24 #include <linux/nodemask.h>
25 #include <linux/mm_types.h>
26 #include <linux/preempt_mask.h>
27 
28 #include <asm/page.h>
29 #include <asm/ptrace.h>
30 #include <asm/cputime.h>
31 
32 #include <linux/smp.h>
33 #include <linux/sem.h>
34 #include <linux/signal.h>
35 #include <linux/compiler.h>
36 #include <linux/completion.h>
37 #include <linux/pid.h>
38 #include <linux/percpu.h>
39 #include <linux/topology.h>
40 #include <linux/proportions.h>
41 #include <linux/seccomp.h>
42 #include <linux/rcupdate.h>
43 #include <linux/rculist.h>
44 #include <linux/rtmutex.h>
45 
46 #include <linux/time.h>
47 #include <linux/param.h>
48 #include <linux/resource.h>
49 #include <linux/timer.h>
50 #include <linux/hrtimer.h>
51 #include <linux/task_io_accounting.h>
52 #include <linux/latencytop.h>
53 #include <linux/cred.h>
54 #include <linux/llist.h>
55 #include <linux/uidgid.h>
56 #include <linux/gfp.h>
57 
58 #include <asm/processor.h>
59 
60 #define SCHED_ATTR_SIZE_VER0	48	/* sizeof first published struct */
61 
62 /*
63  * Extended scheduling parameters data structure.
64  *
65  * This is needed because the original struct sched_param can not be
66  * altered without introducing ABI issues with legacy applications
67  * (e.g., in sched_getparam()).
68  *
69  * However, the possibility of specifying more than just a priority for
70  * the tasks may be useful for a wide variety of application fields, e.g.,
71  * multimedia, streaming, automation and control, and many others.
72  *
73  * This variant (sched_attr) is meant at describing a so-called
74  * sporadic time-constrained task. In such model a task is specified by:
75  *  - the activation period or minimum instance inter-arrival time;
76  *  - the maximum (or average, depending on the actual scheduling
77  *    discipline) computation time of all instances, a.k.a. runtime;
78  *  - the deadline (relative to the actual activation time) of each
79  *    instance.
80  * Very briefly, a periodic (sporadic) task asks for the execution of
81  * some specific computation --which is typically called an instance--
82  * (at most) every period. Moreover, each instance typically lasts no more
83  * than the runtime and must be completed by time instant t equal to
84  * the instance activation time + the deadline.
85  *
86  * This is reflected by the actual fields of the sched_attr structure:
87  *
88  *  @size		size of the structure, for fwd/bwd compat.
89  *
90  *  @sched_policy	task's scheduling policy
91  *  @sched_flags	for customizing the scheduler behaviour
92  *  @sched_nice		task's nice value      (SCHED_NORMAL/BATCH)
93  *  @sched_priority	task's static priority (SCHED_FIFO/RR)
94  *  @sched_deadline	representative of the task's deadline
95  *  @sched_runtime	representative of the task's runtime
96  *  @sched_period	representative of the task's period
97  *
98  * Given this task model, there are a multiplicity of scheduling algorithms
99  * and policies, that can be used to ensure all the tasks will make their
100  * timing constraints.
101  *
102  * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
103  * only user of this new interface. More information about the algorithm
104  * available in the scheduling class file or in Documentation/.
105  */
106 struct sched_attr {
107 	u32 size;
108 
109 	u32 sched_policy;
110 	u64 sched_flags;
111 
112 	/* SCHED_NORMAL, SCHED_BATCH */
113 	s32 sched_nice;
114 
115 	/* SCHED_FIFO, SCHED_RR */
116 	u32 sched_priority;
117 
118 	/* SCHED_DEADLINE */
119 	u64 sched_runtime;
120 	u64 sched_deadline;
121 	u64 sched_period;
122 };
123 
124 struct exec_domain;
125 struct futex_pi_state;
126 struct robust_list_head;
127 struct bio_list;
128 struct fs_struct;
129 struct perf_event_context;
130 struct blk_plug;
131 
132 /*
133  * List of flags we want to share for kernel threads,
134  * if only because they are not used by them anyway.
135  */
136 #define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
137 
138 /*
139  * These are the constant used to fake the fixed-point load-average
140  * counting. Some notes:
141  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
142  *    a load-average precision of 10 bits integer + 11 bits fractional
143  *  - if you want to count load-averages more often, you need more
144  *    precision, or rounding will get you. With 2-second counting freq,
145  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
146  *    11 bit fractions.
147  */
148 extern unsigned long avenrun[];		/* Load averages */
149 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
150 
151 #define FSHIFT		11		/* nr of bits of precision */
152 #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
153 #define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
154 #define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
155 #define EXP_5		2014		/* 1/exp(5sec/5min) */
156 #define EXP_15		2037		/* 1/exp(5sec/15min) */
157 
158 #define CALC_LOAD(load,exp,n) \
159 	load *= exp; \
160 	load += n*(FIXED_1-exp); \
161 	load >>= FSHIFT;
162 
163 extern unsigned long total_forks;
164 extern int nr_threads;
165 DECLARE_PER_CPU(unsigned long, process_counts);
166 extern int nr_processes(void);
167 extern unsigned long nr_running(void);
168 extern unsigned long nr_iowait(void);
169 extern unsigned long nr_iowait_cpu(int cpu);
170 extern unsigned long this_cpu_load(void);
171 
172 
173 extern void calc_global_load(unsigned long ticks);
174 extern void update_cpu_load_nohz(void);
175 
176 extern unsigned long get_parent_ip(unsigned long addr);
177 
178 extern void dump_cpu_task(int cpu);
179 
180 struct seq_file;
181 struct cfs_rq;
182 struct task_group;
183 #ifdef CONFIG_SCHED_DEBUG
184 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
185 extern void proc_sched_set_task(struct task_struct *p);
186 extern void
187 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
188 #endif
189 
190 /*
191  * Task state bitmask. NOTE! These bits are also
192  * encoded in fs/proc/array.c: get_task_state().
193  *
194  * We have two separate sets of flags: task->state
195  * is about runnability, while task->exit_state are
196  * about the task exiting. Confusing, but this way
197  * modifying one set can't modify the other one by
198  * mistake.
199  */
200 #define TASK_RUNNING		0
201 #define TASK_INTERRUPTIBLE	1
202 #define TASK_UNINTERRUPTIBLE	2
203 #define __TASK_STOPPED		4
204 #define __TASK_TRACED		8
205 /* in tsk->exit_state */
206 #define EXIT_ZOMBIE		16
207 #define EXIT_DEAD		32
208 /* in tsk->state again */
209 #define TASK_DEAD		64
210 #define TASK_WAKEKILL		128
211 #define TASK_WAKING		256
212 #define TASK_PARKED		512
213 #define TASK_STATE_MAX		1024
214 
215 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
216 
217 extern char ___assert_task_state[1 - 2*!!(
218 		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
219 
220 /* Convenience macros for the sake of set_task_state */
221 #define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
222 #define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
223 #define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
224 
225 /* Convenience macros for the sake of wake_up */
226 #define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
227 #define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
228 
229 /* get_task_state() */
230 #define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
231 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
232 				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
233 
234 #define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
235 #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
236 #define task_is_stopped_or_traced(task)	\
237 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
238 #define task_contributes_to_load(task)	\
239 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
240 				 (task->flags & PF_FROZEN) == 0)
241 
242 #define __set_task_state(tsk, state_value)		\
243 	do { (tsk)->state = (state_value); } while (0)
244 #define set_task_state(tsk, state_value)		\
245 	set_mb((tsk)->state, (state_value))
246 
247 /*
248  * set_current_state() includes a barrier so that the write of current->state
249  * is correctly serialised wrt the caller's subsequent test of whether to
250  * actually sleep:
251  *
252  *	set_current_state(TASK_UNINTERRUPTIBLE);
253  *	if (do_i_need_to_sleep())
254  *		schedule();
255  *
256  * If the caller does not need such serialisation then use __set_current_state()
257  */
258 #define __set_current_state(state_value)			\
259 	do { current->state = (state_value); } while (0)
260 #define set_current_state(state_value)		\
261 	set_mb(current->state, (state_value))
262 
263 /* Task command name length */
264 #define TASK_COMM_LEN 16
265 
266 #include <linux/spinlock.h>
267 
268 /*
269  * This serializes "schedule()" and also protects
270  * the run-queue from deletions/modifications (but
271  * _adding_ to the beginning of the run-queue has
272  * a separate lock).
273  */
274 extern rwlock_t tasklist_lock;
275 extern spinlock_t mmlist_lock;
276 
277 struct task_struct;
278 
279 #ifdef CONFIG_PROVE_RCU
280 extern int lockdep_tasklist_lock_is_held(void);
281 #endif /* #ifdef CONFIG_PROVE_RCU */
282 
283 extern void sched_init(void);
284 extern void sched_init_smp(void);
285 extern asmlinkage void schedule_tail(struct task_struct *prev);
286 extern void init_idle(struct task_struct *idle, int cpu);
287 extern void init_idle_bootup_task(struct task_struct *idle);
288 
289 extern int runqueue_is_locked(int cpu);
290 
291 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
292 extern void nohz_balance_enter_idle(int cpu);
293 extern void set_cpu_sd_state_idle(void);
294 extern int get_nohz_timer_target(void);
295 #else
296 static inline void nohz_balance_enter_idle(int cpu) { }
297 static inline void set_cpu_sd_state_idle(void) { }
298 #endif
299 
300 /*
301  * Only dump TASK_* tasks. (0 for all tasks)
302  */
303 extern void show_state_filter(unsigned long state_filter);
304 
305 static inline void show_state(void)
306 {
307 	show_state_filter(0);
308 }
309 
310 extern void show_regs(struct pt_regs *);
311 
312 /*
313  * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
314  * task), SP is the stack pointer of the first frame that should be shown in the back
315  * trace (or NULL if the entire call-chain of the task should be shown).
316  */
317 extern void show_stack(struct task_struct *task, unsigned long *sp);
318 
319 void io_schedule(void);
320 long io_schedule_timeout(long timeout);
321 
322 extern void cpu_init (void);
323 extern void trap_init(void);
324 extern void update_process_times(int user);
325 extern void scheduler_tick(void);
326 
327 extern void sched_show_task(struct task_struct *p);
328 
329 #ifdef CONFIG_LOCKUP_DETECTOR
330 extern void touch_softlockup_watchdog(void);
331 extern void touch_softlockup_watchdog_sync(void);
332 extern void touch_all_softlockup_watchdogs(void);
333 extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
334 				  void __user *buffer,
335 				  size_t *lenp, loff_t *ppos);
336 extern unsigned int  softlockup_panic;
337 void lockup_detector_init(void);
338 #else
339 static inline void touch_softlockup_watchdog(void)
340 {
341 }
342 static inline void touch_softlockup_watchdog_sync(void)
343 {
344 }
345 static inline void touch_all_softlockup_watchdogs(void)
346 {
347 }
348 static inline void lockup_detector_init(void)
349 {
350 }
351 #endif
352 
353 #ifdef CONFIG_DETECT_HUNG_TASK
354 void reset_hung_task_detector(void);
355 #else
356 static inline void reset_hung_task_detector(void)
357 {
358 }
359 #endif
360 
361 /* Attach to any functions which should be ignored in wchan output. */
362 #define __sched		__attribute__((__section__(".sched.text")))
363 
364 /* Linker adds these: start and end of __sched functions */
365 extern char __sched_text_start[], __sched_text_end[];
366 
367 /* Is this address in the __sched functions? */
368 extern int in_sched_functions(unsigned long addr);
369 
370 #define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
371 extern signed long schedule_timeout(signed long timeout);
372 extern signed long schedule_timeout_interruptible(signed long timeout);
373 extern signed long schedule_timeout_killable(signed long timeout);
374 extern signed long schedule_timeout_uninterruptible(signed long timeout);
375 asmlinkage void schedule(void);
376 extern void schedule_preempt_disabled(void);
377 
378 struct nsproxy;
379 struct user_namespace;
380 
381 #ifdef CONFIG_MMU
382 extern void arch_pick_mmap_layout(struct mm_struct *mm);
383 extern unsigned long
384 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
385 		       unsigned long, unsigned long);
386 extern unsigned long
387 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
388 			  unsigned long len, unsigned long pgoff,
389 			  unsigned long flags);
390 #else
391 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
392 #endif
393 
394 #define SUID_DUMP_DISABLE	0	/* No setuid dumping */
395 #define SUID_DUMP_USER		1	/* Dump as user of process */
396 #define SUID_DUMP_ROOT		2	/* Dump as root */
397 
398 /* mm flags */
399 
400 /* for SUID_DUMP_* above */
401 #define MMF_DUMPABLE_BITS 2
402 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
403 
404 extern void set_dumpable(struct mm_struct *mm, int value);
405 /*
406  * This returns the actual value of the suid_dumpable flag. For things
407  * that are using this for checking for privilege transitions, it must
408  * test against SUID_DUMP_USER rather than treating it as a boolean
409  * value.
410  */
411 static inline int __get_dumpable(unsigned long mm_flags)
412 {
413 	return mm_flags & MMF_DUMPABLE_MASK;
414 }
415 
416 static inline int get_dumpable(struct mm_struct *mm)
417 {
418 	return __get_dumpable(mm->flags);
419 }
420 
421 /* coredump filter bits */
422 #define MMF_DUMP_ANON_PRIVATE	2
423 #define MMF_DUMP_ANON_SHARED	3
424 #define MMF_DUMP_MAPPED_PRIVATE	4
425 #define MMF_DUMP_MAPPED_SHARED	5
426 #define MMF_DUMP_ELF_HEADERS	6
427 #define MMF_DUMP_HUGETLB_PRIVATE 7
428 #define MMF_DUMP_HUGETLB_SHARED  8
429 
430 #define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
431 #define MMF_DUMP_FILTER_BITS	7
432 #define MMF_DUMP_FILTER_MASK \
433 	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
434 #define MMF_DUMP_FILTER_DEFAULT \
435 	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
436 	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
437 
438 #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
439 # define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
440 #else
441 # define MMF_DUMP_MASK_DEFAULT_ELF	0
442 #endif
443 					/* leave room for more dump flags */
444 #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
445 #define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
446 #define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
447 
448 #define MMF_HAS_UPROBES		19	/* has uprobes */
449 #define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
450 
451 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
452 
453 struct sighand_struct {
454 	atomic_t		count;
455 	struct k_sigaction	action[_NSIG];
456 	spinlock_t		siglock;
457 	wait_queue_head_t	signalfd_wqh;
458 };
459 
460 struct pacct_struct {
461 	int			ac_flag;
462 	long			ac_exitcode;
463 	unsigned long		ac_mem;
464 	cputime_t		ac_utime, ac_stime;
465 	unsigned long		ac_minflt, ac_majflt;
466 };
467 
468 struct cpu_itimer {
469 	cputime_t expires;
470 	cputime_t incr;
471 	u32 error;
472 	u32 incr_error;
473 };
474 
475 /**
476  * struct cputime - snaphsot of system and user cputime
477  * @utime: time spent in user mode
478  * @stime: time spent in system mode
479  *
480  * Gathers a generic snapshot of user and system time.
481  */
482 struct cputime {
483 	cputime_t utime;
484 	cputime_t stime;
485 };
486 
487 /**
488  * struct task_cputime - collected CPU time counts
489  * @utime:		time spent in user mode, in &cputime_t units
490  * @stime:		time spent in kernel mode, in &cputime_t units
491  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
492  *
493  * This is an extension of struct cputime that includes the total runtime
494  * spent by the task from the scheduler point of view.
495  *
496  * As a result, this structure groups together three kinds of CPU time
497  * that are tracked for threads and thread groups.  Most things considering
498  * CPU time want to group these counts together and treat all three
499  * of them in parallel.
500  */
501 struct task_cputime {
502 	cputime_t utime;
503 	cputime_t stime;
504 	unsigned long long sum_exec_runtime;
505 };
506 /* Alternate field names when used to cache expirations. */
507 #define prof_exp	stime
508 #define virt_exp	utime
509 #define sched_exp	sum_exec_runtime
510 
511 #define INIT_CPUTIME	\
512 	(struct task_cputime) {					\
513 		.utime = 0,					\
514 		.stime = 0,					\
515 		.sum_exec_runtime = 0,				\
516 	}
517 
518 #ifdef CONFIG_PREEMPT_COUNT
519 #define PREEMPT_DISABLED	(1 + PREEMPT_ENABLED)
520 #else
521 #define PREEMPT_DISABLED	PREEMPT_ENABLED
522 #endif
523 
524 /*
525  * Disable preemption until the scheduler is running.
526  * Reset by start_kernel()->sched_init()->init_idle().
527  *
528  * We include PREEMPT_ACTIVE to avoid cond_resched() from working
529  * before the scheduler is active -- see should_resched().
530  */
531 #define INIT_PREEMPT_COUNT	(PREEMPT_DISABLED + PREEMPT_ACTIVE)
532 
533 /**
534  * struct thread_group_cputimer - thread group interval timer counts
535  * @cputime:		thread group interval timers.
536  * @running:		non-zero when there are timers running and
537  * 			@cputime receives updates.
538  * @lock:		lock for fields in this struct.
539  *
540  * This structure contains the version of task_cputime, above, that is
541  * used for thread group CPU timer calculations.
542  */
543 struct thread_group_cputimer {
544 	struct task_cputime cputime;
545 	int running;
546 	raw_spinlock_t lock;
547 };
548 
549 #include <linux/rwsem.h>
550 struct autogroup;
551 
552 /*
553  * NOTE! "signal_struct" does not have its own
554  * locking, because a shared signal_struct always
555  * implies a shared sighand_struct, so locking
556  * sighand_struct is always a proper superset of
557  * the locking of signal_struct.
558  */
559 struct signal_struct {
560 	atomic_t		sigcnt;
561 	atomic_t		live;
562 	int			nr_threads;
563 	struct list_head	thread_head;
564 
565 	wait_queue_head_t	wait_chldexit;	/* for wait4() */
566 
567 	/* current thread group signal load-balancing target: */
568 	struct task_struct	*curr_target;
569 
570 	/* shared signal handling: */
571 	struct sigpending	shared_pending;
572 
573 	/* thread group exit support */
574 	int			group_exit_code;
575 	/* overloaded:
576 	 * - notify group_exit_task when ->count is equal to notify_count
577 	 * - everyone except group_exit_task is stopped during signal delivery
578 	 *   of fatal signals, group_exit_task processes the signal.
579 	 */
580 	int			notify_count;
581 	struct task_struct	*group_exit_task;
582 
583 	/* thread group stop support, overloads group_exit_code too */
584 	int			group_stop_count;
585 	unsigned int		flags; /* see SIGNAL_* flags below */
586 
587 	/*
588 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
589 	 * manager, to re-parent orphan (double-forking) child processes
590 	 * to this process instead of 'init'. The service manager is
591 	 * able to receive SIGCHLD signals and is able to investigate
592 	 * the process until it calls wait(). All children of this
593 	 * process will inherit a flag if they should look for a
594 	 * child_subreaper process at exit.
595 	 */
596 	unsigned int		is_child_subreaper:1;
597 	unsigned int		has_child_subreaper:1;
598 
599 	/* POSIX.1b Interval Timers */
600 	int			posix_timer_id;
601 	struct list_head	posix_timers;
602 
603 	/* ITIMER_REAL timer for the process */
604 	struct hrtimer real_timer;
605 	struct pid *leader_pid;
606 	ktime_t it_real_incr;
607 
608 	/*
609 	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
610 	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
611 	 * values are defined to 0 and 1 respectively
612 	 */
613 	struct cpu_itimer it[2];
614 
615 	/*
616 	 * Thread group totals for process CPU timers.
617 	 * See thread_group_cputimer(), et al, for details.
618 	 */
619 	struct thread_group_cputimer cputimer;
620 
621 	/* Earliest-expiration cache. */
622 	struct task_cputime cputime_expires;
623 
624 	struct list_head cpu_timers[3];
625 
626 	struct pid *tty_old_pgrp;
627 
628 	/* boolean value for session group leader */
629 	int leader;
630 
631 	struct tty_struct *tty; /* NULL if no tty */
632 
633 #ifdef CONFIG_SCHED_AUTOGROUP
634 	struct autogroup *autogroup;
635 #endif
636 	/*
637 	 * Cumulative resource counters for dead threads in the group,
638 	 * and for reaped dead child processes forked by this group.
639 	 * Live threads maintain their own counters and add to these
640 	 * in __exit_signal, except for the group leader.
641 	 */
642 	cputime_t utime, stime, cutime, cstime;
643 	cputime_t gtime;
644 	cputime_t cgtime;
645 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
646 	struct cputime prev_cputime;
647 #endif
648 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
649 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
650 	unsigned long inblock, oublock, cinblock, coublock;
651 	unsigned long maxrss, cmaxrss;
652 	struct task_io_accounting ioac;
653 
654 	/*
655 	 * Cumulative ns of schedule CPU time fo dead threads in the
656 	 * group, not including a zombie group leader, (This only differs
657 	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
658 	 * other than jiffies.)
659 	 */
660 	unsigned long long sum_sched_runtime;
661 
662 	/*
663 	 * We don't bother to synchronize most readers of this at all,
664 	 * because there is no reader checking a limit that actually needs
665 	 * to get both rlim_cur and rlim_max atomically, and either one
666 	 * alone is a single word that can safely be read normally.
667 	 * getrlimit/setrlimit use task_lock(current->group_leader) to
668 	 * protect this instead of the siglock, because they really
669 	 * have no need to disable irqs.
670 	 */
671 	struct rlimit rlim[RLIM_NLIMITS];
672 
673 #ifdef CONFIG_BSD_PROCESS_ACCT
674 	struct pacct_struct pacct;	/* per-process accounting information */
675 #endif
676 #ifdef CONFIG_TASKSTATS
677 	struct taskstats *stats;
678 #endif
679 #ifdef CONFIG_AUDIT
680 	unsigned audit_tty;
681 	unsigned audit_tty_log_passwd;
682 	struct tty_audit_buf *tty_audit_buf;
683 #endif
684 #ifdef CONFIG_CGROUPS
685 	/*
686 	 * group_rwsem prevents new tasks from entering the threadgroup and
687 	 * member tasks from exiting,a more specifically, setting of
688 	 * PF_EXITING.  fork and exit paths are protected with this rwsem
689 	 * using threadgroup_change_begin/end().  Users which require
690 	 * threadgroup to remain stable should use threadgroup_[un]lock()
691 	 * which also takes care of exec path.  Currently, cgroup is the
692 	 * only user.
693 	 */
694 	struct rw_semaphore group_rwsem;
695 #endif
696 
697 	oom_flags_t oom_flags;
698 	short oom_score_adj;		/* OOM kill score adjustment */
699 	short oom_score_adj_min;	/* OOM kill score adjustment min value.
700 					 * Only settable by CAP_SYS_RESOURCE. */
701 
702 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
703 					 * credential calculations
704 					 * (notably. ptrace) */
705 };
706 
707 /*
708  * Bits in flags field of signal_struct.
709  */
710 #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
711 #define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
712 #define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
713 #define SIGNAL_GROUP_COREDUMP	0x00000008 /* coredump in progress */
714 /*
715  * Pending notifications to parent.
716  */
717 #define SIGNAL_CLD_STOPPED	0x00000010
718 #define SIGNAL_CLD_CONTINUED	0x00000020
719 #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
720 
721 #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
722 
723 /* If true, all threads except ->group_exit_task have pending SIGKILL */
724 static inline int signal_group_exit(const struct signal_struct *sig)
725 {
726 	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
727 		(sig->group_exit_task != NULL);
728 }
729 
730 /*
731  * Some day this will be a full-fledged user tracking system..
732  */
733 struct user_struct {
734 	atomic_t __count;	/* reference count */
735 	atomic_t processes;	/* How many processes does this user have? */
736 	atomic_t files;		/* How many open files does this user have? */
737 	atomic_t sigpending;	/* How many pending signals does this user have? */
738 #ifdef CONFIG_INOTIFY_USER
739 	atomic_t inotify_watches; /* How many inotify watches does this user have? */
740 	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
741 #endif
742 #ifdef CONFIG_FANOTIFY
743 	atomic_t fanotify_listeners;
744 #endif
745 #ifdef CONFIG_EPOLL
746 	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
747 #endif
748 #ifdef CONFIG_POSIX_MQUEUE
749 	/* protected by mq_lock	*/
750 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
751 #endif
752 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
753 
754 #ifdef CONFIG_KEYS
755 	struct key *uid_keyring;	/* UID specific keyring */
756 	struct key *session_keyring;	/* UID's default session keyring */
757 #endif
758 
759 	/* Hash table maintenance information */
760 	struct hlist_node uidhash_node;
761 	kuid_t uid;
762 
763 #ifdef CONFIG_PERF_EVENTS
764 	atomic_long_t locked_vm;
765 #endif
766 };
767 
768 extern int uids_sysfs_init(void);
769 
770 extern struct user_struct *find_user(kuid_t);
771 
772 extern struct user_struct root_user;
773 #define INIT_USER (&root_user)
774 
775 
776 struct backing_dev_info;
777 struct reclaim_state;
778 
779 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
780 struct sched_info {
781 	/* cumulative counters */
782 	unsigned long pcount;	      /* # of times run on this cpu */
783 	unsigned long long run_delay; /* time spent waiting on a runqueue */
784 
785 	/* timestamps */
786 	unsigned long long last_arrival,/* when we last ran on a cpu */
787 			   last_queued;	/* when we were last queued to run */
788 };
789 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
790 
791 #ifdef CONFIG_TASK_DELAY_ACCT
792 struct task_delay_info {
793 	spinlock_t	lock;
794 	unsigned int	flags;	/* Private per-task flags */
795 
796 	/* For each stat XXX, add following, aligned appropriately
797 	 *
798 	 * struct timespec XXX_start, XXX_end;
799 	 * u64 XXX_delay;
800 	 * u32 XXX_count;
801 	 *
802 	 * Atomicity of updates to XXX_delay, XXX_count protected by
803 	 * single lock above (split into XXX_lock if contention is an issue).
804 	 */
805 
806 	/*
807 	 * XXX_count is incremented on every XXX operation, the delay
808 	 * associated with the operation is added to XXX_delay.
809 	 * XXX_delay contains the accumulated delay time in nanoseconds.
810 	 */
811 	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
812 	u64 blkio_delay;	/* wait for sync block io completion */
813 	u64 swapin_delay;	/* wait for swapin block io completion */
814 	u32 blkio_count;	/* total count of the number of sync block */
815 				/* io operations performed */
816 	u32 swapin_count;	/* total count of the number of swapin block */
817 				/* io operations performed */
818 
819 	struct timespec freepages_start, freepages_end;
820 	u64 freepages_delay;	/* wait for memory reclaim */
821 	u32 freepages_count;	/* total count of memory reclaim */
822 };
823 #endif	/* CONFIG_TASK_DELAY_ACCT */
824 
825 static inline int sched_info_on(void)
826 {
827 #ifdef CONFIG_SCHEDSTATS
828 	return 1;
829 #elif defined(CONFIG_TASK_DELAY_ACCT)
830 	extern int delayacct_on;
831 	return delayacct_on;
832 #else
833 	return 0;
834 #endif
835 }
836 
837 enum cpu_idle_type {
838 	CPU_IDLE,
839 	CPU_NOT_IDLE,
840 	CPU_NEWLY_IDLE,
841 	CPU_MAX_IDLE_TYPES
842 };
843 
844 /*
845  * Increase resolution of cpu_power calculations
846  */
847 #define SCHED_POWER_SHIFT	10
848 #define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT)
849 
850 /*
851  * sched-domains (multiprocessor balancing) declarations:
852  */
853 #ifdef CONFIG_SMP
854 #define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
855 #define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
856 #define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
857 #define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
858 #define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
859 #define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
860 #define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
861 #define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
862 #define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
863 #define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
864 #define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
865 #define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
866 #define SD_NUMA			0x4000	/* cross-node balancing */
867 
868 extern int __weak arch_sd_sibiling_asym_packing(void);
869 
870 struct sched_domain_attr {
871 	int relax_domain_level;
872 };
873 
874 #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
875 	.relax_domain_level = -1,			\
876 }
877 
878 extern int sched_domain_level_max;
879 
880 struct sched_group;
881 
882 struct sched_domain {
883 	/* These fields must be setup */
884 	struct sched_domain *parent;	/* top domain must be null terminated */
885 	struct sched_domain *child;	/* bottom domain must be null terminated */
886 	struct sched_group *groups;	/* the balancing groups of the domain */
887 	unsigned long min_interval;	/* Minimum balance interval ms */
888 	unsigned long max_interval;	/* Maximum balance interval ms */
889 	unsigned int busy_factor;	/* less balancing by factor if busy */
890 	unsigned int imbalance_pct;	/* No balance until over watermark */
891 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
892 	unsigned int busy_idx;
893 	unsigned int idle_idx;
894 	unsigned int newidle_idx;
895 	unsigned int wake_idx;
896 	unsigned int forkexec_idx;
897 	unsigned int smt_gain;
898 
899 	int nohz_idle;			/* NOHZ IDLE status */
900 	int flags;			/* See SD_* */
901 	int level;
902 
903 	/* Runtime fields. */
904 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
905 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
906 	unsigned int nr_balance_failed; /* initialise to 0 */
907 
908 	/* idle_balance() stats */
909 	u64 max_newidle_lb_cost;
910 	unsigned long next_decay_max_lb_cost;
911 
912 #ifdef CONFIG_SCHEDSTATS
913 	/* load_balance() stats */
914 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
915 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
916 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
917 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
918 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
919 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
920 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
921 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
922 
923 	/* Active load balancing */
924 	unsigned int alb_count;
925 	unsigned int alb_failed;
926 	unsigned int alb_pushed;
927 
928 	/* SD_BALANCE_EXEC stats */
929 	unsigned int sbe_count;
930 	unsigned int sbe_balanced;
931 	unsigned int sbe_pushed;
932 
933 	/* SD_BALANCE_FORK stats */
934 	unsigned int sbf_count;
935 	unsigned int sbf_balanced;
936 	unsigned int sbf_pushed;
937 
938 	/* try_to_wake_up() stats */
939 	unsigned int ttwu_wake_remote;
940 	unsigned int ttwu_move_affine;
941 	unsigned int ttwu_move_balance;
942 #endif
943 #ifdef CONFIG_SCHED_DEBUG
944 	char *name;
945 #endif
946 	union {
947 		void *private;		/* used during construction */
948 		struct rcu_head rcu;	/* used during destruction */
949 	};
950 
951 	unsigned int span_weight;
952 	/*
953 	 * Span of all CPUs in this domain.
954 	 *
955 	 * NOTE: this field is variable length. (Allocated dynamically
956 	 * by attaching extra space to the end of the structure,
957 	 * depending on how many CPUs the kernel has booted up with)
958 	 */
959 	unsigned long span[0];
960 };
961 
962 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
963 {
964 	return to_cpumask(sd->span);
965 }
966 
967 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
968 				    struct sched_domain_attr *dattr_new);
969 
970 /* Allocate an array of sched domains, for partition_sched_domains(). */
971 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
972 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
973 
974 bool cpus_share_cache(int this_cpu, int that_cpu);
975 
976 #else /* CONFIG_SMP */
977 
978 struct sched_domain_attr;
979 
980 static inline void
981 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
982 			struct sched_domain_attr *dattr_new)
983 {
984 }
985 
986 static inline bool cpus_share_cache(int this_cpu, int that_cpu)
987 {
988 	return true;
989 }
990 
991 #endif	/* !CONFIG_SMP */
992 
993 
994 struct io_context;			/* See blkdev.h */
995 
996 
997 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
998 extern void prefetch_stack(struct task_struct *t);
999 #else
1000 static inline void prefetch_stack(struct task_struct *t) { }
1001 #endif
1002 
1003 struct audit_context;		/* See audit.c */
1004 struct mempolicy;
1005 struct pipe_inode_info;
1006 struct uts_namespace;
1007 
1008 struct load_weight {
1009 	unsigned long weight;
1010 	u32 inv_weight;
1011 };
1012 
1013 struct sched_avg {
1014 	/*
1015 	 * These sums represent an infinite geometric series and so are bound
1016 	 * above by 1024/(1-y).  Thus we only need a u32 to store them for all
1017 	 * choices of y < 1-2^(-32)*1024.
1018 	 */
1019 	u32 runnable_avg_sum, runnable_avg_period;
1020 	u64 last_runnable_update;
1021 	s64 decay_count;
1022 	unsigned long load_avg_contrib;
1023 };
1024 
1025 #ifdef CONFIG_SCHEDSTATS
1026 struct sched_statistics {
1027 	u64			wait_start;
1028 	u64			wait_max;
1029 	u64			wait_count;
1030 	u64			wait_sum;
1031 	u64			iowait_count;
1032 	u64			iowait_sum;
1033 
1034 	u64			sleep_start;
1035 	u64			sleep_max;
1036 	s64			sum_sleep_runtime;
1037 
1038 	u64			block_start;
1039 	u64			block_max;
1040 	u64			exec_max;
1041 	u64			slice_max;
1042 
1043 	u64			nr_migrations_cold;
1044 	u64			nr_failed_migrations_affine;
1045 	u64			nr_failed_migrations_running;
1046 	u64			nr_failed_migrations_hot;
1047 	u64			nr_forced_migrations;
1048 
1049 	u64			nr_wakeups;
1050 	u64			nr_wakeups_sync;
1051 	u64			nr_wakeups_migrate;
1052 	u64			nr_wakeups_local;
1053 	u64			nr_wakeups_remote;
1054 	u64			nr_wakeups_affine;
1055 	u64			nr_wakeups_affine_attempts;
1056 	u64			nr_wakeups_passive;
1057 	u64			nr_wakeups_idle;
1058 };
1059 #endif
1060 
1061 struct sched_entity {
1062 	struct load_weight	load;		/* for load-balancing */
1063 	struct rb_node		run_node;
1064 	struct list_head	group_node;
1065 	unsigned int		on_rq;
1066 
1067 	u64			exec_start;
1068 	u64			sum_exec_runtime;
1069 	u64			vruntime;
1070 	u64			prev_sum_exec_runtime;
1071 
1072 	u64			nr_migrations;
1073 
1074 #ifdef CONFIG_SCHEDSTATS
1075 	struct sched_statistics statistics;
1076 #endif
1077 
1078 #ifdef CONFIG_FAIR_GROUP_SCHED
1079 	struct sched_entity	*parent;
1080 	/* rq on which this entity is (to be) queued: */
1081 	struct cfs_rq		*cfs_rq;
1082 	/* rq "owned" by this entity/group: */
1083 	struct cfs_rq		*my_q;
1084 #endif
1085 
1086 #ifdef CONFIG_SMP
1087 	/* Per-entity load-tracking */
1088 	struct sched_avg	avg;
1089 #endif
1090 };
1091 
1092 struct sched_rt_entity {
1093 	struct list_head run_list;
1094 	unsigned long timeout;
1095 	unsigned long watchdog_stamp;
1096 	unsigned int time_slice;
1097 
1098 	struct sched_rt_entity *back;
1099 #ifdef CONFIG_RT_GROUP_SCHED
1100 	struct sched_rt_entity	*parent;
1101 	/* rq on which this entity is (to be) queued: */
1102 	struct rt_rq		*rt_rq;
1103 	/* rq "owned" by this entity/group: */
1104 	struct rt_rq		*my_q;
1105 #endif
1106 };
1107 
1108 struct sched_dl_entity {
1109 	struct rb_node	rb_node;
1110 
1111 	/*
1112 	 * Original scheduling parameters. Copied here from sched_attr
1113 	 * during sched_setscheduler2(), they will remain the same until
1114 	 * the next sched_setscheduler2().
1115 	 */
1116 	u64 dl_runtime;		/* maximum runtime for each instance	*/
1117 	u64 dl_deadline;	/* relative deadline of each instance	*/
1118 	u64 dl_period;		/* separation of two instances (period) */
1119 	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
1120 
1121 	/*
1122 	 * Actual scheduling parameters. Initialized with the values above,
1123 	 * they are continously updated during task execution. Note that
1124 	 * the remaining runtime could be < 0 in case we are in overrun.
1125 	 */
1126 	s64 runtime;		/* remaining runtime for this instance	*/
1127 	u64 deadline;		/* absolute deadline for this instance	*/
1128 	unsigned int flags;	/* specifying the scheduler behaviour	*/
1129 
1130 	/*
1131 	 * Some bool flags:
1132 	 *
1133 	 * @dl_throttled tells if we exhausted the runtime. If so, the
1134 	 * task has to wait for a replenishment to be performed at the
1135 	 * next firing of dl_timer.
1136 	 *
1137 	 * @dl_new tells if a new instance arrived. If so we must
1138 	 * start executing it with full runtime and reset its absolute
1139 	 * deadline;
1140 	 *
1141 	 * @dl_boosted tells if we are boosted due to DI. If so we are
1142 	 * outside bandwidth enforcement mechanism (but only until we
1143 	 * exit the critical section).
1144 	 */
1145 	int dl_throttled, dl_new, dl_boosted;
1146 
1147 	/*
1148 	 * Bandwidth enforcement timer. Each -deadline task has its
1149 	 * own bandwidth to be enforced, thus we need one timer per task.
1150 	 */
1151 	struct hrtimer dl_timer;
1152 };
1153 
1154 struct rcu_node;
1155 
1156 enum perf_event_task_context {
1157 	perf_invalid_context = -1,
1158 	perf_hw_context = 0,
1159 	perf_sw_context,
1160 	perf_nr_task_contexts,
1161 };
1162 
1163 struct task_struct {
1164 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1165 	void *stack;
1166 	atomic_t usage;
1167 	unsigned int flags;	/* per process flags, defined below */
1168 	unsigned int ptrace;
1169 
1170 #ifdef CONFIG_SMP
1171 	struct llist_node wake_entry;
1172 	int on_cpu;
1173 	struct task_struct *last_wakee;
1174 	unsigned long wakee_flips;
1175 	unsigned long wakee_flip_decay_ts;
1176 
1177 	int wake_cpu;
1178 #endif
1179 	int on_rq;
1180 
1181 	int prio, static_prio, normal_prio;
1182 	unsigned int rt_priority;
1183 	const struct sched_class *sched_class;
1184 	struct sched_entity se;
1185 	struct sched_rt_entity rt;
1186 #ifdef CONFIG_CGROUP_SCHED
1187 	struct task_group *sched_task_group;
1188 #endif
1189 	struct sched_dl_entity dl;
1190 
1191 #ifdef CONFIG_PREEMPT_NOTIFIERS
1192 	/* list of struct preempt_notifier: */
1193 	struct hlist_head preempt_notifiers;
1194 #endif
1195 
1196 #ifdef CONFIG_BLK_DEV_IO_TRACE
1197 	unsigned int btrace_seq;
1198 #endif
1199 
1200 	unsigned int policy;
1201 	int nr_cpus_allowed;
1202 	cpumask_t cpus_allowed;
1203 
1204 #ifdef CONFIG_PREEMPT_RCU
1205 	int rcu_read_lock_nesting;
1206 	char rcu_read_unlock_special;
1207 	struct list_head rcu_node_entry;
1208 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1209 #ifdef CONFIG_TREE_PREEMPT_RCU
1210 	struct rcu_node *rcu_blocked_node;
1211 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1212 #ifdef CONFIG_RCU_BOOST
1213 	struct rt_mutex *rcu_boost_mutex;
1214 #endif /* #ifdef CONFIG_RCU_BOOST */
1215 
1216 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1217 	struct sched_info sched_info;
1218 #endif
1219 
1220 	struct list_head tasks;
1221 #ifdef CONFIG_SMP
1222 	struct plist_node pushable_tasks;
1223 	struct rb_node pushable_dl_tasks;
1224 #endif
1225 
1226 	struct mm_struct *mm, *active_mm;
1227 #ifdef CONFIG_COMPAT_BRK
1228 	unsigned brk_randomized:1;
1229 #endif
1230 #if defined(SPLIT_RSS_COUNTING)
1231 	struct task_rss_stat	rss_stat;
1232 #endif
1233 /* task state */
1234 	int exit_state;
1235 	int exit_code, exit_signal;
1236 	int pdeath_signal;  /*  The signal sent when the parent dies  */
1237 	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
1238 
1239 	/* Used for emulating ABI behavior of previous Linux versions */
1240 	unsigned int personality;
1241 
1242 	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1243 				 * execve */
1244 	unsigned in_iowait:1;
1245 
1246 	/* task may not gain privileges */
1247 	unsigned no_new_privs:1;
1248 
1249 	/* Revert to default priority/policy when forking */
1250 	unsigned sched_reset_on_fork:1;
1251 	unsigned sched_contributes_to_load:1;
1252 
1253 	pid_t pid;
1254 	pid_t tgid;
1255 
1256 #ifdef CONFIG_CC_STACKPROTECTOR
1257 	/* Canary value for the -fstack-protector gcc feature */
1258 	unsigned long stack_canary;
1259 #endif
1260 	/*
1261 	 * pointers to (original) parent process, youngest child, younger sibling,
1262 	 * older sibling, respectively.  (p->father can be replaced with
1263 	 * p->real_parent->pid)
1264 	 */
1265 	struct task_struct __rcu *real_parent; /* real parent process */
1266 	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1267 	/*
1268 	 * children/sibling forms the list of my natural children
1269 	 */
1270 	struct list_head children;	/* list of my children */
1271 	struct list_head sibling;	/* linkage in my parent's children list */
1272 	struct task_struct *group_leader;	/* threadgroup leader */
1273 
1274 	/*
1275 	 * ptraced is the list of tasks this task is using ptrace on.
1276 	 * This includes both natural children and PTRACE_ATTACH targets.
1277 	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1278 	 */
1279 	struct list_head ptraced;
1280 	struct list_head ptrace_entry;
1281 
1282 	/* PID/PID hash table linkage. */
1283 	struct pid_link pids[PIDTYPE_MAX];
1284 	struct list_head thread_group;
1285 	struct list_head thread_node;
1286 
1287 	struct completion *vfork_done;		/* for vfork() */
1288 	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
1289 	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
1290 
1291 	cputime_t utime, stime, utimescaled, stimescaled;
1292 	cputime_t gtime;
1293 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1294 	struct cputime prev_cputime;
1295 #endif
1296 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1297 	seqlock_t vtime_seqlock;
1298 	unsigned long long vtime_snap;
1299 	enum {
1300 		VTIME_SLEEPING = 0,
1301 		VTIME_USER,
1302 		VTIME_SYS,
1303 	} vtime_snap_whence;
1304 #endif
1305 	unsigned long nvcsw, nivcsw; /* context switch counts */
1306 	struct timespec start_time; 		/* monotonic time */
1307 	struct timespec real_start_time;	/* boot based time */
1308 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1309 	unsigned long min_flt, maj_flt;
1310 
1311 	struct task_cputime cputime_expires;
1312 	struct list_head cpu_timers[3];
1313 
1314 /* process credentials */
1315 	const struct cred __rcu *real_cred; /* objective and real subjective task
1316 					 * credentials (COW) */
1317 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1318 					 * credentials (COW) */
1319 	char comm[TASK_COMM_LEN]; /* executable name excluding path
1320 				     - access with [gs]et_task_comm (which lock
1321 				       it with task_lock())
1322 				     - initialized normally by setup_new_exec */
1323 /* file system info */
1324 	int link_count, total_link_count;
1325 #ifdef CONFIG_SYSVIPC
1326 /* ipc stuff */
1327 	struct sysv_sem sysvsem;
1328 #endif
1329 #ifdef CONFIG_DETECT_HUNG_TASK
1330 /* hung task detection */
1331 	unsigned long last_switch_count;
1332 #endif
1333 /* CPU-specific state of this task */
1334 	struct thread_struct thread;
1335 /* filesystem information */
1336 	struct fs_struct *fs;
1337 /* open file information */
1338 	struct files_struct *files;
1339 /* namespaces */
1340 	struct nsproxy *nsproxy;
1341 /* signal handlers */
1342 	struct signal_struct *signal;
1343 	struct sighand_struct *sighand;
1344 
1345 	sigset_t blocked, real_blocked;
1346 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
1347 	struct sigpending pending;
1348 
1349 	unsigned long sas_ss_sp;
1350 	size_t sas_ss_size;
1351 	int (*notifier)(void *priv);
1352 	void *notifier_data;
1353 	sigset_t *notifier_mask;
1354 	struct callback_head *task_works;
1355 
1356 	struct audit_context *audit_context;
1357 #ifdef CONFIG_AUDITSYSCALL
1358 	kuid_t loginuid;
1359 	unsigned int sessionid;
1360 #endif
1361 	struct seccomp seccomp;
1362 
1363 /* Thread group tracking */
1364    	u32 parent_exec_id;
1365    	u32 self_exec_id;
1366 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1367  * mempolicy */
1368 	spinlock_t alloc_lock;
1369 
1370 	/* Protection of the PI data structures: */
1371 	raw_spinlock_t pi_lock;
1372 
1373 #ifdef CONFIG_RT_MUTEXES
1374 	/* PI waiters blocked on a rt_mutex held by this task */
1375 	struct rb_root pi_waiters;
1376 	struct rb_node *pi_waiters_leftmost;
1377 	/* Deadlock detection and priority inheritance handling */
1378 	struct rt_mutex_waiter *pi_blocked_on;
1379 	/* Top pi_waiters task */
1380 	struct task_struct *pi_top_task;
1381 #endif
1382 
1383 #ifdef CONFIG_DEBUG_MUTEXES
1384 	/* mutex deadlock detection */
1385 	struct mutex_waiter *blocked_on;
1386 #endif
1387 #ifdef CONFIG_TRACE_IRQFLAGS
1388 	unsigned int irq_events;
1389 	unsigned long hardirq_enable_ip;
1390 	unsigned long hardirq_disable_ip;
1391 	unsigned int hardirq_enable_event;
1392 	unsigned int hardirq_disable_event;
1393 	int hardirqs_enabled;
1394 	int hardirq_context;
1395 	unsigned long softirq_disable_ip;
1396 	unsigned long softirq_enable_ip;
1397 	unsigned int softirq_disable_event;
1398 	unsigned int softirq_enable_event;
1399 	int softirqs_enabled;
1400 	int softirq_context;
1401 #endif
1402 #ifdef CONFIG_LOCKDEP
1403 # define MAX_LOCK_DEPTH 48UL
1404 	u64 curr_chain_key;
1405 	int lockdep_depth;
1406 	unsigned int lockdep_recursion;
1407 	struct held_lock held_locks[MAX_LOCK_DEPTH];
1408 	gfp_t lockdep_reclaim_gfp;
1409 #endif
1410 
1411 /* journalling filesystem info */
1412 	void *journal_info;
1413 
1414 /* stacked block device info */
1415 	struct bio_list *bio_list;
1416 
1417 #ifdef CONFIG_BLOCK
1418 /* stack plugging */
1419 	struct blk_plug *plug;
1420 #endif
1421 
1422 /* VM state */
1423 	struct reclaim_state *reclaim_state;
1424 
1425 	struct backing_dev_info *backing_dev_info;
1426 
1427 	struct io_context *io_context;
1428 
1429 	unsigned long ptrace_message;
1430 	siginfo_t *last_siginfo; /* For ptrace use.  */
1431 	struct task_io_accounting ioac;
1432 #if defined(CONFIG_TASK_XACCT)
1433 	u64 acct_rss_mem1;	/* accumulated rss usage */
1434 	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1435 	cputime_t acct_timexpd;	/* stime + utime since last update */
1436 #endif
1437 #ifdef CONFIG_CPUSETS
1438 	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1439 	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1440 	int cpuset_mem_spread_rotor;
1441 	int cpuset_slab_spread_rotor;
1442 #endif
1443 #ifdef CONFIG_CGROUPS
1444 	/* Control Group info protected by css_set_lock */
1445 	struct css_set __rcu *cgroups;
1446 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1447 	struct list_head cg_list;
1448 #endif
1449 #ifdef CONFIG_FUTEX
1450 	struct robust_list_head __user *robust_list;
1451 #ifdef CONFIG_COMPAT
1452 	struct compat_robust_list_head __user *compat_robust_list;
1453 #endif
1454 	struct list_head pi_state_list;
1455 	struct futex_pi_state *pi_state_cache;
1456 #endif
1457 #ifdef CONFIG_PERF_EVENTS
1458 	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1459 	struct mutex perf_event_mutex;
1460 	struct list_head perf_event_list;
1461 #endif
1462 #ifdef CONFIG_NUMA
1463 	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1464 	short il_next;
1465 	short pref_node_fork;
1466 #endif
1467 #ifdef CONFIG_NUMA_BALANCING
1468 	int numa_scan_seq;
1469 	unsigned int numa_scan_period;
1470 	unsigned int numa_scan_period_max;
1471 	int numa_preferred_nid;
1472 	int numa_migrate_deferred;
1473 	unsigned long numa_migrate_retry;
1474 	u64 node_stamp;			/* migration stamp  */
1475 	struct callback_head numa_work;
1476 
1477 	struct list_head numa_entry;
1478 	struct numa_group *numa_group;
1479 
1480 	/*
1481 	 * Exponential decaying average of faults on a per-node basis.
1482 	 * Scheduling placement decisions are made based on the these counts.
1483 	 * The values remain static for the duration of a PTE scan
1484 	 */
1485 	unsigned long *numa_faults;
1486 	unsigned long total_numa_faults;
1487 
1488 	/*
1489 	 * numa_faults_buffer records faults per node during the current
1490 	 * scan window. When the scan completes, the counts in numa_faults
1491 	 * decay and these values are copied.
1492 	 */
1493 	unsigned long *numa_faults_buffer;
1494 
1495 	/*
1496 	 * numa_faults_locality tracks if faults recorded during the last
1497 	 * scan window were remote/local. The task scan period is adapted
1498 	 * based on the locality of the faults with different weights
1499 	 * depending on whether they were shared or private faults
1500 	 */
1501 	unsigned long numa_faults_locality[2];
1502 
1503 	unsigned long numa_pages_migrated;
1504 #endif /* CONFIG_NUMA_BALANCING */
1505 
1506 	struct rcu_head rcu;
1507 
1508 	/*
1509 	 * cache last used pipe for splice
1510 	 */
1511 	struct pipe_inode_info *splice_pipe;
1512 
1513 	struct page_frag task_frag;
1514 
1515 #ifdef	CONFIG_TASK_DELAY_ACCT
1516 	struct task_delay_info *delays;
1517 #endif
1518 #ifdef CONFIG_FAULT_INJECTION
1519 	int make_it_fail;
1520 #endif
1521 	/*
1522 	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1523 	 * balance_dirty_pages() for some dirty throttling pause
1524 	 */
1525 	int nr_dirtied;
1526 	int nr_dirtied_pause;
1527 	unsigned long dirty_paused_when; /* start of a write-and-pause period */
1528 
1529 #ifdef CONFIG_LATENCYTOP
1530 	int latency_record_count;
1531 	struct latency_record latency_record[LT_SAVECOUNT];
1532 #endif
1533 	/*
1534 	 * time slack values; these are used to round up poll() and
1535 	 * select() etc timeout values. These are in nanoseconds.
1536 	 */
1537 	unsigned long timer_slack_ns;
1538 	unsigned long default_timer_slack_ns;
1539 
1540 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1541 	/* Index of current stored address in ret_stack */
1542 	int curr_ret_stack;
1543 	/* Stack of return addresses for return function tracing */
1544 	struct ftrace_ret_stack	*ret_stack;
1545 	/* time stamp for last schedule */
1546 	unsigned long long ftrace_timestamp;
1547 	/*
1548 	 * Number of functions that haven't been traced
1549 	 * because of depth overrun.
1550 	 */
1551 	atomic_t trace_overrun;
1552 	/* Pause for the tracing */
1553 	atomic_t tracing_graph_pause;
1554 #endif
1555 #ifdef CONFIG_TRACING
1556 	/* state flags for use by tracers */
1557 	unsigned long trace;
1558 	/* bitmask and counter of trace recursion */
1559 	unsigned long trace_recursion;
1560 #endif /* CONFIG_TRACING */
1561 #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
1562 	struct memcg_batch_info {
1563 		int do_batch;	/* incremented when batch uncharge started */
1564 		struct mem_cgroup *memcg; /* target memcg of uncharge */
1565 		unsigned long nr_pages;	/* uncharged usage */
1566 		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1567 	} memcg_batch;
1568 	unsigned int memcg_kmem_skip_account;
1569 	struct memcg_oom_info {
1570 		struct mem_cgroup *memcg;
1571 		gfp_t gfp_mask;
1572 		int order;
1573 		unsigned int may_oom:1;
1574 	} memcg_oom;
1575 #endif
1576 #ifdef CONFIG_UPROBES
1577 	struct uprobe_task *utask;
1578 #endif
1579 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1580 	unsigned int	sequential_io;
1581 	unsigned int	sequential_io_avg;
1582 #endif
1583 };
1584 
1585 /* Future-safe accessor for struct task_struct's cpus_allowed. */
1586 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1587 
1588 #define TNF_MIGRATED	0x01
1589 #define TNF_NO_GROUP	0x02
1590 #define TNF_SHARED	0x04
1591 #define TNF_FAULT_LOCAL	0x08
1592 
1593 #ifdef CONFIG_NUMA_BALANCING
1594 extern void task_numa_fault(int last_node, int node, int pages, int flags);
1595 extern pid_t task_numa_group_id(struct task_struct *p);
1596 extern void set_numabalancing_state(bool enabled);
1597 extern void task_numa_free(struct task_struct *p);
1598 
1599 extern unsigned int sysctl_numa_balancing_migrate_deferred;
1600 #else
1601 static inline void task_numa_fault(int last_node, int node, int pages,
1602 				   int flags)
1603 {
1604 }
1605 static inline pid_t task_numa_group_id(struct task_struct *p)
1606 {
1607 	return 0;
1608 }
1609 static inline void set_numabalancing_state(bool enabled)
1610 {
1611 }
1612 static inline void task_numa_free(struct task_struct *p)
1613 {
1614 }
1615 #endif
1616 
1617 static inline struct pid *task_pid(struct task_struct *task)
1618 {
1619 	return task->pids[PIDTYPE_PID].pid;
1620 }
1621 
1622 static inline struct pid *task_tgid(struct task_struct *task)
1623 {
1624 	return task->group_leader->pids[PIDTYPE_PID].pid;
1625 }
1626 
1627 /*
1628  * Without tasklist or rcu lock it is not safe to dereference
1629  * the result of task_pgrp/task_session even if task == current,
1630  * we can race with another thread doing sys_setsid/sys_setpgid.
1631  */
1632 static inline struct pid *task_pgrp(struct task_struct *task)
1633 {
1634 	return task->group_leader->pids[PIDTYPE_PGID].pid;
1635 }
1636 
1637 static inline struct pid *task_session(struct task_struct *task)
1638 {
1639 	return task->group_leader->pids[PIDTYPE_SID].pid;
1640 }
1641 
1642 struct pid_namespace;
1643 
1644 /*
1645  * the helpers to get the task's different pids as they are seen
1646  * from various namespaces
1647  *
1648  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1649  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1650  *                     current.
1651  * task_xid_nr_ns()  : id seen from the ns specified;
1652  *
1653  * set_task_vxid()   : assigns a virtual id to a task;
1654  *
1655  * see also pid_nr() etc in include/linux/pid.h
1656  */
1657 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1658 			struct pid_namespace *ns);
1659 
1660 static inline pid_t task_pid_nr(struct task_struct *tsk)
1661 {
1662 	return tsk->pid;
1663 }
1664 
1665 static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1666 					struct pid_namespace *ns)
1667 {
1668 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1669 }
1670 
1671 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1672 {
1673 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1674 }
1675 
1676 
1677 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1678 {
1679 	return tsk->tgid;
1680 }
1681 
1682 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1683 
1684 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1685 {
1686 	return pid_vnr(task_tgid(tsk));
1687 }
1688 
1689 
1690 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1691 					struct pid_namespace *ns)
1692 {
1693 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1694 }
1695 
1696 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1697 {
1698 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1699 }
1700 
1701 
1702 static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1703 					struct pid_namespace *ns)
1704 {
1705 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1706 }
1707 
1708 static inline pid_t task_session_vnr(struct task_struct *tsk)
1709 {
1710 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1711 }
1712 
1713 /* obsolete, do not use */
1714 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1715 {
1716 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1717 }
1718 
1719 /**
1720  * pid_alive - check that a task structure is not stale
1721  * @p: Task structure to be checked.
1722  *
1723  * Test if a process is not yet dead (at most zombie state)
1724  * If pid_alive fails, then pointers within the task structure
1725  * can be stale and must not be dereferenced.
1726  *
1727  * Return: 1 if the process is alive. 0 otherwise.
1728  */
1729 static inline int pid_alive(struct task_struct *p)
1730 {
1731 	return p->pids[PIDTYPE_PID].pid != NULL;
1732 }
1733 
1734 /**
1735  * is_global_init - check if a task structure is init
1736  * @tsk: Task structure to be checked.
1737  *
1738  * Check if a task structure is the first user space task the kernel created.
1739  *
1740  * Return: 1 if the task structure is init. 0 otherwise.
1741  */
1742 static inline int is_global_init(struct task_struct *tsk)
1743 {
1744 	return tsk->pid == 1;
1745 }
1746 
1747 extern struct pid *cad_pid;
1748 
1749 extern void free_task(struct task_struct *tsk);
1750 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1751 
1752 extern void __put_task_struct(struct task_struct *t);
1753 
1754 static inline void put_task_struct(struct task_struct *t)
1755 {
1756 	if (atomic_dec_and_test(&t->usage))
1757 		__put_task_struct(t);
1758 }
1759 
1760 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1761 extern void task_cputime(struct task_struct *t,
1762 			 cputime_t *utime, cputime_t *stime);
1763 extern void task_cputime_scaled(struct task_struct *t,
1764 				cputime_t *utimescaled, cputime_t *stimescaled);
1765 extern cputime_t task_gtime(struct task_struct *t);
1766 #else
1767 static inline void task_cputime(struct task_struct *t,
1768 				cputime_t *utime, cputime_t *stime)
1769 {
1770 	if (utime)
1771 		*utime = t->utime;
1772 	if (stime)
1773 		*stime = t->stime;
1774 }
1775 
1776 static inline void task_cputime_scaled(struct task_struct *t,
1777 				       cputime_t *utimescaled,
1778 				       cputime_t *stimescaled)
1779 {
1780 	if (utimescaled)
1781 		*utimescaled = t->utimescaled;
1782 	if (stimescaled)
1783 		*stimescaled = t->stimescaled;
1784 }
1785 
1786 static inline cputime_t task_gtime(struct task_struct *t)
1787 {
1788 	return t->gtime;
1789 }
1790 #endif
1791 extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1792 extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1793 
1794 /*
1795  * Per process flags
1796  */
1797 #define PF_EXITING	0x00000004	/* getting shut down */
1798 #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1799 #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
1800 #define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
1801 #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1802 #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
1803 #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
1804 #define PF_DUMPCORE	0x00000200	/* dumped core */
1805 #define PF_SIGNALED	0x00000400	/* killed by a signal */
1806 #define PF_MEMALLOC	0x00000800	/* Allocating memory */
1807 #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
1808 #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1809 #define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
1810 #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
1811 #define PF_FROZEN	0x00010000	/* frozen for system suspend */
1812 #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
1813 #define PF_KSWAPD	0x00040000	/* I am kswapd */
1814 #define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
1815 #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1816 #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1817 #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1818 #define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1819 #define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1820 #define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
1821 #define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
1822 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1823 #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
1824 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
1825 #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
1826 #define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
1827 
1828 /*
1829  * Only the _current_ task can read/write to tsk->flags, but other
1830  * tasks can access tsk->flags in readonly mode for example
1831  * with tsk_used_math (like during threaded core dumping).
1832  * There is however an exception to this rule during ptrace
1833  * or during fork: the ptracer task is allowed to write to the
1834  * child->flags of its traced child (same goes for fork, the parent
1835  * can write to the child->flags), because we're guaranteed the
1836  * child is not running and in turn not changing child->flags
1837  * at the same time the parent does it.
1838  */
1839 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1840 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1841 #define clear_used_math() clear_stopped_child_used_math(current)
1842 #define set_used_math() set_stopped_child_used_math(current)
1843 #define conditional_stopped_child_used_math(condition, child) \
1844 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1845 #define conditional_used_math(condition) \
1846 	conditional_stopped_child_used_math(condition, current)
1847 #define copy_to_stopped_child_used_math(child) \
1848 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1849 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1850 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1851 #define used_math() tsk_used_math(current)
1852 
1853 /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
1854 static inline gfp_t memalloc_noio_flags(gfp_t flags)
1855 {
1856 	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
1857 		flags &= ~__GFP_IO;
1858 	return flags;
1859 }
1860 
1861 static inline unsigned int memalloc_noio_save(void)
1862 {
1863 	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
1864 	current->flags |= PF_MEMALLOC_NOIO;
1865 	return flags;
1866 }
1867 
1868 static inline void memalloc_noio_restore(unsigned int flags)
1869 {
1870 	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
1871 }
1872 
1873 /*
1874  * task->jobctl flags
1875  */
1876 #define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
1877 
1878 #define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
1879 #define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
1880 #define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
1881 #define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
1882 #define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
1883 #define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
1884 #define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
1885 
1886 #define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT)
1887 #define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT)
1888 #define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT)
1889 #define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT)
1890 #define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT)
1891 #define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT)
1892 #define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT)
1893 
1894 #define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1895 #define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1896 
1897 extern bool task_set_jobctl_pending(struct task_struct *task,
1898 				    unsigned int mask);
1899 extern void task_clear_jobctl_trapping(struct task_struct *task);
1900 extern void task_clear_jobctl_pending(struct task_struct *task,
1901 				      unsigned int mask);
1902 
1903 #ifdef CONFIG_PREEMPT_RCU
1904 
1905 #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1906 #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1907 
1908 static inline void rcu_copy_process(struct task_struct *p)
1909 {
1910 	p->rcu_read_lock_nesting = 0;
1911 	p->rcu_read_unlock_special = 0;
1912 #ifdef CONFIG_TREE_PREEMPT_RCU
1913 	p->rcu_blocked_node = NULL;
1914 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1915 #ifdef CONFIG_RCU_BOOST
1916 	p->rcu_boost_mutex = NULL;
1917 #endif /* #ifdef CONFIG_RCU_BOOST */
1918 	INIT_LIST_HEAD(&p->rcu_node_entry);
1919 }
1920 
1921 #else
1922 
1923 static inline void rcu_copy_process(struct task_struct *p)
1924 {
1925 }
1926 
1927 #endif
1928 
1929 static inline void tsk_restore_flags(struct task_struct *task,
1930 				unsigned long orig_flags, unsigned long flags)
1931 {
1932 	task->flags &= ~flags;
1933 	task->flags |= orig_flags & flags;
1934 }
1935 
1936 #ifdef CONFIG_SMP
1937 extern void do_set_cpus_allowed(struct task_struct *p,
1938 			       const struct cpumask *new_mask);
1939 
1940 extern int set_cpus_allowed_ptr(struct task_struct *p,
1941 				const struct cpumask *new_mask);
1942 #else
1943 static inline void do_set_cpus_allowed(struct task_struct *p,
1944 				      const struct cpumask *new_mask)
1945 {
1946 }
1947 static inline int set_cpus_allowed_ptr(struct task_struct *p,
1948 				       const struct cpumask *new_mask)
1949 {
1950 	if (!cpumask_test_cpu(0, new_mask))
1951 		return -EINVAL;
1952 	return 0;
1953 }
1954 #endif
1955 
1956 #ifdef CONFIG_NO_HZ_COMMON
1957 void calc_load_enter_idle(void);
1958 void calc_load_exit_idle(void);
1959 #else
1960 static inline void calc_load_enter_idle(void) { }
1961 static inline void calc_load_exit_idle(void) { }
1962 #endif /* CONFIG_NO_HZ_COMMON */
1963 
1964 #ifndef CONFIG_CPUMASK_OFFSTACK
1965 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1966 {
1967 	return set_cpus_allowed_ptr(p, &new_mask);
1968 }
1969 #endif
1970 
1971 /*
1972  * Do not use outside of architecture code which knows its limitations.
1973  *
1974  * sched_clock() has no promise of monotonicity or bounded drift between
1975  * CPUs, use (which you should not) requires disabling IRQs.
1976  *
1977  * Please use one of the three interfaces below.
1978  */
1979 extern unsigned long long notrace sched_clock(void);
1980 /*
1981  * See the comment in kernel/sched/clock.c
1982  */
1983 extern u64 cpu_clock(int cpu);
1984 extern u64 local_clock(void);
1985 extern u64 sched_clock_cpu(int cpu);
1986 
1987 
1988 extern void sched_clock_init(void);
1989 
1990 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1991 static inline void sched_clock_tick(void)
1992 {
1993 }
1994 
1995 static inline void sched_clock_idle_sleep_event(void)
1996 {
1997 }
1998 
1999 static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2000 {
2001 }
2002 #else
2003 /*
2004  * Architectures can set this to 1 if they have specified
2005  * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
2006  * but then during bootup it turns out that sched_clock()
2007  * is reliable after all:
2008  */
2009 extern int sched_clock_stable(void);
2010 extern void set_sched_clock_stable(void);
2011 extern void clear_sched_clock_stable(void);
2012 
2013 extern void sched_clock_tick(void);
2014 extern void sched_clock_idle_sleep_event(void);
2015 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2016 #endif
2017 
2018 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2019 /*
2020  * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2021  * The reason for this explicit opt-in is not to have perf penalty with
2022  * slow sched_clocks.
2023  */
2024 extern void enable_sched_clock_irqtime(void);
2025 extern void disable_sched_clock_irqtime(void);
2026 #else
2027 static inline void enable_sched_clock_irqtime(void) {}
2028 static inline void disable_sched_clock_irqtime(void) {}
2029 #endif
2030 
2031 extern unsigned long long
2032 task_sched_runtime(struct task_struct *task);
2033 
2034 /* sched_exec is called by processes performing an exec */
2035 #ifdef CONFIG_SMP
2036 extern void sched_exec(void);
2037 #else
2038 #define sched_exec()   {}
2039 #endif
2040 
2041 extern void sched_clock_idle_sleep_event(void);
2042 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2043 
2044 #ifdef CONFIG_HOTPLUG_CPU
2045 extern void idle_task_exit(void);
2046 #else
2047 static inline void idle_task_exit(void) {}
2048 #endif
2049 
2050 #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2051 extern void wake_up_nohz_cpu(int cpu);
2052 #else
2053 static inline void wake_up_nohz_cpu(int cpu) { }
2054 #endif
2055 
2056 #ifdef CONFIG_NO_HZ_FULL
2057 extern bool sched_can_stop_tick(void);
2058 extern u64 scheduler_tick_max_deferment(void);
2059 #else
2060 static inline bool sched_can_stop_tick(void) { return false; }
2061 #endif
2062 
2063 #ifdef CONFIG_SCHED_AUTOGROUP
2064 extern void sched_autogroup_create_attach(struct task_struct *p);
2065 extern void sched_autogroup_detach(struct task_struct *p);
2066 extern void sched_autogroup_fork(struct signal_struct *sig);
2067 extern void sched_autogroup_exit(struct signal_struct *sig);
2068 #ifdef CONFIG_PROC_FS
2069 extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2070 extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2071 #endif
2072 #else
2073 static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2074 static inline void sched_autogroup_detach(struct task_struct *p) { }
2075 static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2076 static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2077 #endif
2078 
2079 extern bool yield_to(struct task_struct *p, bool preempt);
2080 extern void set_user_nice(struct task_struct *p, long nice);
2081 extern int task_prio(const struct task_struct *p);
2082 extern int task_nice(const struct task_struct *p);
2083 extern int can_nice(const struct task_struct *p, const int nice);
2084 extern int task_curr(const struct task_struct *p);
2085 extern int idle_cpu(int cpu);
2086 extern int sched_setscheduler(struct task_struct *, int,
2087 			      const struct sched_param *);
2088 extern int sched_setscheduler_nocheck(struct task_struct *, int,
2089 				      const struct sched_param *);
2090 extern int sched_setattr(struct task_struct *,
2091 			 const struct sched_attr *);
2092 extern struct task_struct *idle_task(int cpu);
2093 /**
2094  * is_idle_task - is the specified task an idle task?
2095  * @p: the task in question.
2096  *
2097  * Return: 1 if @p is an idle task. 0 otherwise.
2098  */
2099 static inline bool is_idle_task(const struct task_struct *p)
2100 {
2101 	return p->pid == 0;
2102 }
2103 extern struct task_struct *curr_task(int cpu);
2104 extern void set_curr_task(int cpu, struct task_struct *p);
2105 
2106 void yield(void);
2107 
2108 /*
2109  * The default (Linux) execution domain.
2110  */
2111 extern struct exec_domain	default_exec_domain;
2112 
2113 union thread_union {
2114 	struct thread_info thread_info;
2115 	unsigned long stack[THREAD_SIZE/sizeof(long)];
2116 };
2117 
2118 #ifndef __HAVE_ARCH_KSTACK_END
2119 static inline int kstack_end(void *addr)
2120 {
2121 	/* Reliable end of stack detection:
2122 	 * Some APM bios versions misalign the stack
2123 	 */
2124 	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2125 }
2126 #endif
2127 
2128 extern union thread_union init_thread_union;
2129 extern struct task_struct init_task;
2130 
2131 extern struct   mm_struct init_mm;
2132 
2133 extern struct pid_namespace init_pid_ns;
2134 
2135 /*
2136  * find a task by one of its numerical ids
2137  *
2138  * find_task_by_pid_ns():
2139  *      finds a task by its pid in the specified namespace
2140  * find_task_by_vpid():
2141  *      finds a task by its virtual pid
2142  *
2143  * see also find_vpid() etc in include/linux/pid.h
2144  */
2145 
2146 extern struct task_struct *find_task_by_vpid(pid_t nr);
2147 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2148 		struct pid_namespace *ns);
2149 
2150 /* per-UID process charging. */
2151 extern struct user_struct * alloc_uid(kuid_t);
2152 static inline struct user_struct *get_uid(struct user_struct *u)
2153 {
2154 	atomic_inc(&u->__count);
2155 	return u;
2156 }
2157 extern void free_uid(struct user_struct *);
2158 
2159 #include <asm/current.h>
2160 
2161 extern void xtime_update(unsigned long ticks);
2162 
2163 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2164 extern int wake_up_process(struct task_struct *tsk);
2165 extern void wake_up_new_task(struct task_struct *tsk);
2166 #ifdef CONFIG_SMP
2167  extern void kick_process(struct task_struct *tsk);
2168 #else
2169  static inline void kick_process(struct task_struct *tsk) { }
2170 #endif
2171 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2172 extern void sched_dead(struct task_struct *p);
2173 
2174 extern void proc_caches_init(void);
2175 extern void flush_signals(struct task_struct *);
2176 extern void __flush_signals(struct task_struct *);
2177 extern void ignore_signals(struct task_struct *);
2178 extern void flush_signal_handlers(struct task_struct *, int force_default);
2179 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2180 
2181 static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2182 {
2183 	unsigned long flags;
2184 	int ret;
2185 
2186 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
2187 	ret = dequeue_signal(tsk, mask, info);
2188 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2189 
2190 	return ret;
2191 }
2192 
2193 extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2194 			      sigset_t *mask);
2195 extern void unblock_all_signals(void);
2196 extern void release_task(struct task_struct * p);
2197 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2198 extern int force_sigsegv(int, struct task_struct *);
2199 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2200 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2201 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2202 extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2203 				const struct cred *, u32);
2204 extern int kill_pgrp(struct pid *pid, int sig, int priv);
2205 extern int kill_pid(struct pid *pid, int sig, int priv);
2206 extern int kill_proc_info(int, struct siginfo *, pid_t);
2207 extern __must_check bool do_notify_parent(struct task_struct *, int);
2208 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2209 extern void force_sig(int, struct task_struct *);
2210 extern int send_sig(int, struct task_struct *, int);
2211 extern int zap_other_threads(struct task_struct *p);
2212 extern struct sigqueue *sigqueue_alloc(void);
2213 extern void sigqueue_free(struct sigqueue *);
2214 extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2215 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2216 
2217 static inline void restore_saved_sigmask(void)
2218 {
2219 	if (test_and_clear_restore_sigmask())
2220 		__set_current_blocked(&current->saved_sigmask);
2221 }
2222 
2223 static inline sigset_t *sigmask_to_save(void)
2224 {
2225 	sigset_t *res = &current->blocked;
2226 	if (unlikely(test_restore_sigmask()))
2227 		res = &current->saved_sigmask;
2228 	return res;
2229 }
2230 
2231 static inline int kill_cad_pid(int sig, int priv)
2232 {
2233 	return kill_pid(cad_pid, sig, priv);
2234 }
2235 
2236 /* These can be the second arg to send_sig_info/send_group_sig_info.  */
2237 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
2238 #define SEND_SIG_PRIV	((struct siginfo *) 1)
2239 #define SEND_SIG_FORCED	((struct siginfo *) 2)
2240 
2241 /*
2242  * True if we are on the alternate signal stack.
2243  */
2244 static inline int on_sig_stack(unsigned long sp)
2245 {
2246 #ifdef CONFIG_STACK_GROWSUP
2247 	return sp >= current->sas_ss_sp &&
2248 		sp - current->sas_ss_sp < current->sas_ss_size;
2249 #else
2250 	return sp > current->sas_ss_sp &&
2251 		sp - current->sas_ss_sp <= current->sas_ss_size;
2252 #endif
2253 }
2254 
2255 static inline int sas_ss_flags(unsigned long sp)
2256 {
2257 	return (current->sas_ss_size == 0 ? SS_DISABLE
2258 		: on_sig_stack(sp) ? SS_ONSTACK : 0);
2259 }
2260 
2261 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2262 {
2263 	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2264 #ifdef CONFIG_STACK_GROWSUP
2265 		return current->sas_ss_sp;
2266 #else
2267 		return current->sas_ss_sp + current->sas_ss_size;
2268 #endif
2269 	return sp;
2270 }
2271 
2272 /*
2273  * Routines for handling mm_structs
2274  */
2275 extern struct mm_struct * mm_alloc(void);
2276 
2277 /* mmdrop drops the mm and the page tables */
2278 extern void __mmdrop(struct mm_struct *);
2279 static inline void mmdrop(struct mm_struct * mm)
2280 {
2281 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2282 		__mmdrop(mm);
2283 }
2284 
2285 /* mmput gets rid of the mappings and all user-space */
2286 extern void mmput(struct mm_struct *);
2287 /* Grab a reference to a task's mm, if it is not already going away */
2288 extern struct mm_struct *get_task_mm(struct task_struct *task);
2289 /*
2290  * Grab a reference to a task's mm, if it is not already going away
2291  * and ptrace_may_access with the mode parameter passed to it
2292  * succeeds.
2293  */
2294 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2295 /* Remove the current tasks stale references to the old mm_struct */
2296 extern void mm_release(struct task_struct *, struct mm_struct *);
2297 
2298 extern int copy_thread(unsigned long, unsigned long, unsigned long,
2299 			struct task_struct *);
2300 extern void flush_thread(void);
2301 extern void exit_thread(void);
2302 
2303 extern void exit_files(struct task_struct *);
2304 extern void __cleanup_sighand(struct sighand_struct *);
2305 
2306 extern void exit_itimers(struct signal_struct *);
2307 extern void flush_itimer_signals(void);
2308 
2309 extern void do_group_exit(int);
2310 
2311 extern int allow_signal(int);
2312 extern int disallow_signal(int);
2313 
2314 extern int do_execve(const char *,
2315 		     const char __user * const __user *,
2316 		     const char __user * const __user *);
2317 extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2318 struct task_struct *fork_idle(int);
2319 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2320 
2321 extern void set_task_comm(struct task_struct *tsk, char *from);
2322 extern char *get_task_comm(char *to, struct task_struct *tsk);
2323 
2324 #ifdef CONFIG_SMP
2325 void scheduler_ipi(void);
2326 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2327 #else
2328 static inline void scheduler_ipi(void) { }
2329 static inline unsigned long wait_task_inactive(struct task_struct *p,
2330 					       long match_state)
2331 {
2332 	return 1;
2333 }
2334 #endif
2335 
2336 #define next_task(p) \
2337 	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2338 
2339 #define for_each_process(p) \
2340 	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2341 
2342 extern bool current_is_single_threaded(void);
2343 
2344 /*
2345  * Careful: do_each_thread/while_each_thread is a double loop so
2346  *          'break' will not work as expected - use goto instead.
2347  */
2348 #define do_each_thread(g, t) \
2349 	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2350 
2351 #define while_each_thread(g, t) \
2352 	while ((t = next_thread(t)) != g)
2353 
2354 #define __for_each_thread(signal, t)	\
2355 	list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2356 
2357 #define for_each_thread(p, t)		\
2358 	__for_each_thread((p)->signal, t)
2359 
2360 /* Careful: this is a double loop, 'break' won't work as expected. */
2361 #define for_each_process_thread(p, t)	\
2362 	for_each_process(p) for_each_thread(p, t)
2363 
2364 static inline int get_nr_threads(struct task_struct *tsk)
2365 {
2366 	return tsk->signal->nr_threads;
2367 }
2368 
2369 static inline bool thread_group_leader(struct task_struct *p)
2370 {
2371 	return p->exit_signal >= 0;
2372 }
2373 
2374 /* Do to the insanities of de_thread it is possible for a process
2375  * to have the pid of the thread group leader without actually being
2376  * the thread group leader.  For iteration through the pids in proc
2377  * all we care about is that we have a task with the appropriate
2378  * pid, we don't actually care if we have the right task.
2379  */
2380 static inline bool has_group_leader_pid(struct task_struct *p)
2381 {
2382 	return task_pid(p) == p->signal->leader_pid;
2383 }
2384 
2385 static inline
2386 bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2387 {
2388 	return p1->signal == p2->signal;
2389 }
2390 
2391 static inline struct task_struct *next_thread(const struct task_struct *p)
2392 {
2393 	return list_entry_rcu(p->thread_group.next,
2394 			      struct task_struct, thread_group);
2395 }
2396 
2397 static inline int thread_group_empty(struct task_struct *p)
2398 {
2399 	return list_empty(&p->thread_group);
2400 }
2401 
2402 #define delay_group_leader(p) \
2403 		(thread_group_leader(p) && !thread_group_empty(p))
2404 
2405 /*
2406  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2407  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2408  * pins the final release of task.io_context.  Also protects ->cpuset and
2409  * ->cgroup.subsys[]. And ->vfork_done.
2410  *
2411  * Nests both inside and outside of read_lock(&tasklist_lock).
2412  * It must not be nested with write_lock_irq(&tasklist_lock),
2413  * neither inside nor outside.
2414  */
2415 static inline void task_lock(struct task_struct *p)
2416 {
2417 	spin_lock(&p->alloc_lock);
2418 }
2419 
2420 static inline void task_unlock(struct task_struct *p)
2421 {
2422 	spin_unlock(&p->alloc_lock);
2423 }
2424 
2425 extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2426 							unsigned long *flags);
2427 
2428 static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2429 						       unsigned long *flags)
2430 {
2431 	struct sighand_struct *ret;
2432 
2433 	ret = __lock_task_sighand(tsk, flags);
2434 	(void)__cond_lock(&tsk->sighand->siglock, ret);
2435 	return ret;
2436 }
2437 
2438 static inline void unlock_task_sighand(struct task_struct *tsk,
2439 						unsigned long *flags)
2440 {
2441 	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2442 }
2443 
2444 #ifdef CONFIG_CGROUPS
2445 static inline void threadgroup_change_begin(struct task_struct *tsk)
2446 {
2447 	down_read(&tsk->signal->group_rwsem);
2448 }
2449 static inline void threadgroup_change_end(struct task_struct *tsk)
2450 {
2451 	up_read(&tsk->signal->group_rwsem);
2452 }
2453 
2454 /**
2455  * threadgroup_lock - lock threadgroup
2456  * @tsk: member task of the threadgroup to lock
2457  *
2458  * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
2459  * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2460  * change ->group_leader/pid.  This is useful for cases where the threadgroup
2461  * needs to stay stable across blockable operations.
2462  *
2463  * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2464  * synchronization.  While held, no new task will be added to threadgroup
2465  * and no existing live task will have its PF_EXITING set.
2466  *
2467  * de_thread() does threadgroup_change_{begin|end}() when a non-leader
2468  * sub-thread becomes a new leader.
2469  */
2470 static inline void threadgroup_lock(struct task_struct *tsk)
2471 {
2472 	down_write(&tsk->signal->group_rwsem);
2473 }
2474 
2475 /**
2476  * threadgroup_unlock - unlock threadgroup
2477  * @tsk: member task of the threadgroup to unlock
2478  *
2479  * Reverse threadgroup_lock().
2480  */
2481 static inline void threadgroup_unlock(struct task_struct *tsk)
2482 {
2483 	up_write(&tsk->signal->group_rwsem);
2484 }
2485 #else
2486 static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2487 static inline void threadgroup_change_end(struct task_struct *tsk) {}
2488 static inline void threadgroup_lock(struct task_struct *tsk) {}
2489 static inline void threadgroup_unlock(struct task_struct *tsk) {}
2490 #endif
2491 
2492 #ifndef __HAVE_THREAD_FUNCTIONS
2493 
2494 #define task_thread_info(task)	((struct thread_info *)(task)->stack)
2495 #define task_stack_page(task)	((task)->stack)
2496 
2497 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2498 {
2499 	*task_thread_info(p) = *task_thread_info(org);
2500 	task_thread_info(p)->task = p;
2501 }
2502 
2503 static inline unsigned long *end_of_stack(struct task_struct *p)
2504 {
2505 	return (unsigned long *)(task_thread_info(p) + 1);
2506 }
2507 
2508 #endif
2509 
2510 static inline int object_is_on_stack(void *obj)
2511 {
2512 	void *stack = task_stack_page(current);
2513 
2514 	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2515 }
2516 
2517 extern void thread_info_cache_init(void);
2518 
2519 #ifdef CONFIG_DEBUG_STACK_USAGE
2520 static inline unsigned long stack_not_used(struct task_struct *p)
2521 {
2522 	unsigned long *n = end_of_stack(p);
2523 
2524 	do { 	/* Skip over canary */
2525 		n++;
2526 	} while (!*n);
2527 
2528 	return (unsigned long)n - (unsigned long)end_of_stack(p);
2529 }
2530 #endif
2531 
2532 /* set thread flags in other task's structures
2533  * - see asm/thread_info.h for TIF_xxxx flags available
2534  */
2535 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2536 {
2537 	set_ti_thread_flag(task_thread_info(tsk), flag);
2538 }
2539 
2540 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2541 {
2542 	clear_ti_thread_flag(task_thread_info(tsk), flag);
2543 }
2544 
2545 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2546 {
2547 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2548 }
2549 
2550 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2551 {
2552 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2553 }
2554 
2555 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2556 {
2557 	return test_ti_thread_flag(task_thread_info(tsk), flag);
2558 }
2559 
2560 static inline void set_tsk_need_resched(struct task_struct *tsk)
2561 {
2562 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2563 }
2564 
2565 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2566 {
2567 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2568 }
2569 
2570 static inline int test_tsk_need_resched(struct task_struct *tsk)
2571 {
2572 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2573 }
2574 
2575 static inline int restart_syscall(void)
2576 {
2577 	set_tsk_thread_flag(current, TIF_SIGPENDING);
2578 	return -ERESTARTNOINTR;
2579 }
2580 
2581 static inline int signal_pending(struct task_struct *p)
2582 {
2583 	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2584 }
2585 
2586 static inline int __fatal_signal_pending(struct task_struct *p)
2587 {
2588 	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2589 }
2590 
2591 static inline int fatal_signal_pending(struct task_struct *p)
2592 {
2593 	return signal_pending(p) && __fatal_signal_pending(p);
2594 }
2595 
2596 static inline int signal_pending_state(long state, struct task_struct *p)
2597 {
2598 	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2599 		return 0;
2600 	if (!signal_pending(p))
2601 		return 0;
2602 
2603 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2604 }
2605 
2606 /*
2607  * cond_resched() and cond_resched_lock(): latency reduction via
2608  * explicit rescheduling in places that are safe. The return
2609  * value indicates whether a reschedule was done in fact.
2610  * cond_resched_lock() will drop the spinlock before scheduling,
2611  * cond_resched_softirq() will enable bhs before scheduling.
2612  */
2613 extern int _cond_resched(void);
2614 
2615 #define cond_resched() ({			\
2616 	__might_sleep(__FILE__, __LINE__, 0);	\
2617 	_cond_resched();			\
2618 })
2619 
2620 extern int __cond_resched_lock(spinlock_t *lock);
2621 
2622 #ifdef CONFIG_PREEMPT_COUNT
2623 #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
2624 #else
2625 #define PREEMPT_LOCK_OFFSET	0
2626 #endif
2627 
2628 #define cond_resched_lock(lock) ({				\
2629 	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2630 	__cond_resched_lock(lock);				\
2631 })
2632 
2633 extern int __cond_resched_softirq(void);
2634 
2635 #define cond_resched_softirq() ({					\
2636 	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2637 	__cond_resched_softirq();					\
2638 })
2639 
2640 static inline void cond_resched_rcu(void)
2641 {
2642 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2643 	rcu_read_unlock();
2644 	cond_resched();
2645 	rcu_read_lock();
2646 #endif
2647 }
2648 
2649 /*
2650  * Does a critical section need to be broken due to another
2651  * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2652  * but a general need for low latency)
2653  */
2654 static inline int spin_needbreak(spinlock_t *lock)
2655 {
2656 #ifdef CONFIG_PREEMPT
2657 	return spin_is_contended(lock);
2658 #else
2659 	return 0;
2660 #endif
2661 }
2662 
2663 /*
2664  * Idle thread specific functions to determine the need_resched
2665  * polling state. We have two versions, one based on TS_POLLING in
2666  * thread_info.status and one based on TIF_POLLING_NRFLAG in
2667  * thread_info.flags
2668  */
2669 #ifdef TS_POLLING
2670 static inline int tsk_is_polling(struct task_struct *p)
2671 {
2672 	return task_thread_info(p)->status & TS_POLLING;
2673 }
2674 static inline void __current_set_polling(void)
2675 {
2676 	current_thread_info()->status |= TS_POLLING;
2677 }
2678 
2679 static inline bool __must_check current_set_polling_and_test(void)
2680 {
2681 	__current_set_polling();
2682 
2683 	/*
2684 	 * Polling state must be visible before we test NEED_RESCHED,
2685 	 * paired by resched_task()
2686 	 */
2687 	smp_mb();
2688 
2689 	return unlikely(tif_need_resched());
2690 }
2691 
2692 static inline void __current_clr_polling(void)
2693 {
2694 	current_thread_info()->status &= ~TS_POLLING;
2695 }
2696 
2697 static inline bool __must_check current_clr_polling_and_test(void)
2698 {
2699 	__current_clr_polling();
2700 
2701 	/*
2702 	 * Polling state must be visible before we test NEED_RESCHED,
2703 	 * paired by resched_task()
2704 	 */
2705 	smp_mb();
2706 
2707 	return unlikely(tif_need_resched());
2708 }
2709 #elif defined(TIF_POLLING_NRFLAG)
2710 static inline int tsk_is_polling(struct task_struct *p)
2711 {
2712 	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2713 }
2714 
2715 static inline void __current_set_polling(void)
2716 {
2717 	set_thread_flag(TIF_POLLING_NRFLAG);
2718 }
2719 
2720 static inline bool __must_check current_set_polling_and_test(void)
2721 {
2722 	__current_set_polling();
2723 
2724 	/*
2725 	 * Polling state must be visible before we test NEED_RESCHED,
2726 	 * paired by resched_task()
2727 	 *
2728 	 * XXX: assumes set/clear bit are identical barrier wise.
2729 	 */
2730 	smp_mb__after_clear_bit();
2731 
2732 	return unlikely(tif_need_resched());
2733 }
2734 
2735 static inline void __current_clr_polling(void)
2736 {
2737 	clear_thread_flag(TIF_POLLING_NRFLAG);
2738 }
2739 
2740 static inline bool __must_check current_clr_polling_and_test(void)
2741 {
2742 	__current_clr_polling();
2743 
2744 	/*
2745 	 * Polling state must be visible before we test NEED_RESCHED,
2746 	 * paired by resched_task()
2747 	 */
2748 	smp_mb__after_clear_bit();
2749 
2750 	return unlikely(tif_need_resched());
2751 }
2752 
2753 #else
2754 static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2755 static inline void __current_set_polling(void) { }
2756 static inline void __current_clr_polling(void) { }
2757 
2758 static inline bool __must_check current_set_polling_and_test(void)
2759 {
2760 	return unlikely(tif_need_resched());
2761 }
2762 static inline bool __must_check current_clr_polling_and_test(void)
2763 {
2764 	return unlikely(tif_need_resched());
2765 }
2766 #endif
2767 
2768 static inline void current_clr_polling(void)
2769 {
2770 	__current_clr_polling();
2771 
2772 	/*
2773 	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
2774 	 * Once the bit is cleared, we'll get IPIs with every new
2775 	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
2776 	 * fold.
2777 	 */
2778 	smp_mb(); /* paired with resched_task() */
2779 
2780 	preempt_fold_need_resched();
2781 }
2782 
2783 static __always_inline bool need_resched(void)
2784 {
2785 	return unlikely(tif_need_resched());
2786 }
2787 
2788 /*
2789  * Thread group CPU time accounting.
2790  */
2791 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2792 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2793 
2794 static inline void thread_group_cputime_init(struct signal_struct *sig)
2795 {
2796 	raw_spin_lock_init(&sig->cputimer.lock);
2797 }
2798 
2799 /*
2800  * Reevaluate whether the task has signals pending delivery.
2801  * Wake the task if so.
2802  * This is required every time the blocked sigset_t changes.
2803  * callers must hold sighand->siglock.
2804  */
2805 extern void recalc_sigpending_and_wake(struct task_struct *t);
2806 extern void recalc_sigpending(void);
2807 
2808 extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2809 
2810 static inline void signal_wake_up(struct task_struct *t, bool resume)
2811 {
2812 	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2813 }
2814 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2815 {
2816 	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2817 }
2818 
2819 /*
2820  * Wrappers for p->thread_info->cpu access. No-op on UP.
2821  */
2822 #ifdef CONFIG_SMP
2823 
2824 static inline unsigned int task_cpu(const struct task_struct *p)
2825 {
2826 	return task_thread_info(p)->cpu;
2827 }
2828 
2829 static inline int task_node(const struct task_struct *p)
2830 {
2831 	return cpu_to_node(task_cpu(p));
2832 }
2833 
2834 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2835 
2836 #else
2837 
2838 static inline unsigned int task_cpu(const struct task_struct *p)
2839 {
2840 	return 0;
2841 }
2842 
2843 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2844 {
2845 }
2846 
2847 #endif /* CONFIG_SMP */
2848 
2849 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2850 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2851 
2852 #ifdef CONFIG_CGROUP_SCHED
2853 extern struct task_group root_task_group;
2854 #endif /* CONFIG_CGROUP_SCHED */
2855 
2856 extern int task_can_switch_user(struct user_struct *up,
2857 					struct task_struct *tsk);
2858 
2859 #ifdef CONFIG_TASK_XACCT
2860 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2861 {
2862 	tsk->ioac.rchar += amt;
2863 }
2864 
2865 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2866 {
2867 	tsk->ioac.wchar += amt;
2868 }
2869 
2870 static inline void inc_syscr(struct task_struct *tsk)
2871 {
2872 	tsk->ioac.syscr++;
2873 }
2874 
2875 static inline void inc_syscw(struct task_struct *tsk)
2876 {
2877 	tsk->ioac.syscw++;
2878 }
2879 #else
2880 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2881 {
2882 }
2883 
2884 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2885 {
2886 }
2887 
2888 static inline void inc_syscr(struct task_struct *tsk)
2889 {
2890 }
2891 
2892 static inline void inc_syscw(struct task_struct *tsk)
2893 {
2894 }
2895 #endif
2896 
2897 #ifndef TASK_SIZE_OF
2898 #define TASK_SIZE_OF(tsk)	TASK_SIZE
2899 #endif
2900 
2901 #ifdef CONFIG_MM_OWNER
2902 extern void mm_update_next_owner(struct mm_struct *mm);
2903 extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2904 #else
2905 static inline void mm_update_next_owner(struct mm_struct *mm)
2906 {
2907 }
2908 
2909 static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2910 {
2911 }
2912 #endif /* CONFIG_MM_OWNER */
2913 
2914 static inline unsigned long task_rlimit(const struct task_struct *tsk,
2915 		unsigned int limit)
2916 {
2917 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2918 }
2919 
2920 static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2921 		unsigned int limit)
2922 {
2923 	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2924 }
2925 
2926 static inline unsigned long rlimit(unsigned int limit)
2927 {
2928 	return task_rlimit(current, limit);
2929 }
2930 
2931 static inline unsigned long rlimit_max(unsigned int limit)
2932 {
2933 	return task_rlimit_max(current, limit);
2934 }
2935 
2936 #endif
2937